A Discrete-Event Network Simulator
API
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Groups Pages
cairo-wideint.c
1 /* cairo - a vector graphics library with display and print output
2  *
3  * Copyright © 2004 Keith Packard
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation;
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17  *
18  * The original code as contributed to the cairo library under
19  * the dual license MPL+LGPL. We used the LGPL relicensing clause to
20  * get a GPL version of this code which now lives here. This header is
21  * unmodified other than the licensing clause.
22  *
23  * The Original Code is the cairo graphics library.
24  *
25  * The Initial Developer of the Original Code is Keith Packard
26  *
27  * Contributor(s):
28  * Keith R. Packard <keithp@keithp.com>
29  */
30 
31 #include "cairo-wideint-private.h"
32 
33 #if HAVE_UINT64_T
34 
35 #define _cairo_uint32s_to_uint64(h,l) ((uint64_t) (h) << 32 | (l))
36 
38 _cairo_uint64_divrem (cairo_uint64_t num, cairo_uint64_t den)
39 {
41 
42  qr.quo = num / den;
43  qr.rem = num % den;
44  return qr;
45 }
46 
47 #else
48 
49 cairo_uint64_t
50 _cairo_uint32_to_uint64 (uint32_t i)
51 {
52  cairo_uint64_t q;
53 
54  q.lo = i;
55  q.hi = 0;
56  return q;
57 }
58 
59 cairo_int64_t
60 _cairo_int32_to_int64 (int32_t i)
61 {
62  cairo_uint64_t q;
63 
64  q.lo = i;
65  q.hi = i < 0 ? -1 : 0;
66  return q;
67 }
68 
69 static cairo_uint64_t
70 _cairo_uint32s_to_uint64 (uint32_t h, uint32_t l)
71 {
72  cairo_uint64_t q;
73 
74  q.lo = l;
75  q.hi = h;
76  return q;
77 }
78 
79 cairo_uint64_t
80 _cairo_uint64_add (cairo_uint64_t a, cairo_uint64_t b)
81 {
82  cairo_uint64_t s;
83 
84  s.hi = a.hi + b.hi;
85  s.lo = a.lo + b.lo;
86  if (s.lo < a.lo)
87  s.hi++;
88  return s;
89 }
90 
91 cairo_uint64_t
92 _cairo_uint64_sub (cairo_uint64_t a, cairo_uint64_t b)
93 {
94  cairo_uint64_t s;
95 
96  s.hi = a.hi - b.hi;
97  s.lo = a.lo - b.lo;
98  if (s.lo > a.lo)
99  s.hi--;
100  return s;
101 }
102 
103 #define uint32_lo(i) ((i) & 0xffff)
104 #define uint32_hi(i) ((i) >> 16)
105 #define uint32_carry16 ((1) << 16)
106 
107 cairo_uint64_t
108 _cairo_uint32x32_64_mul (uint32_t a, uint32_t b)
109 {
110  cairo_uint64_t s;
111 
112  uint16_t ah, al, bh, bl;
113  uint32_t r0, r1, r2, r3;
114 
115  al = uint32_lo (a);
116  ah = uint32_hi (a);
117  bl = uint32_lo (b);
118  bh = uint32_hi (b);
119 
120  r0 = (uint32_t) al * bl;
121  r1 = (uint32_t) al * bh;
122  r2 = (uint32_t) ah * bl;
123  r3 = (uint32_t) ah * bh;
124 
125  r1 += uint32_hi(r0); /* no carry possible */
126  r1 += r2; /* but this can carry */
127  if (r1 < r2) /* check */
128  r3 += uint32_carry16;
129 
130  s.hi = r3 + uint32_hi(r1);
131  s.lo = (uint32_lo (r1) << 16) + uint32_lo (r0);
132  return s;
133 }
134 
135 cairo_int64_t
136 _cairo_int32x32_64_mul (int32_t a, int32_t b)
137 {
138  cairo_int64_t s;
139  s = _cairo_uint32x32_64_mul ((uint32_t) a, (uint32_t) b);
140  if (a < 0)
141  s.hi -= b;
142  if (b < 0)
143  s.hi -= a;
144  return s;
145 }
146 
147 cairo_uint64_t
148 _cairo_uint64_mul (cairo_uint64_t a, cairo_uint64_t b)
149 {
150  cairo_uint64_t s;
151 
152  s = _cairo_uint32x32_64_mul (a.lo, b.lo);
153  s.hi += a.lo * b.hi + a.hi * b.lo;
154  return s;
155 }
156 
157 cairo_uint64_t
158 _cairo_uint64_lsl (cairo_uint64_t a, int shift)
159 {
160  if (shift >= 32)
161  {
162  a.hi = a.lo;
163  a.lo = 0;
164  shift -= 32;
165  }
166  if (shift)
167  {
168  a.hi = a.hi << shift | a.lo >> (32 - shift);
169  a.lo = a.lo << shift;
170  }
171  return a;
172 }
173 
174 cairo_uint64_t
175 _cairo_uint64_rsl (cairo_uint64_t a, int shift)
176 {
177  if (shift >= 32)
178  {
179  a.lo = a.hi;
180  a.hi = 0;
181  shift -= 32;
182  }
183  if (shift)
184  {
185  a.lo = a.lo >> shift | a.hi << (32 - shift);
186  a.hi = a.hi >> shift;
187  }
188  return a;
189 }
190 
191 #define _cairo_uint32_rsa(a,n) ((uint32_t) (((int32_t) (a)) >> (n)))
192 
193 cairo_int64_t
194 _cairo_uint64_rsa (cairo_int64_t a, int shift)
195 {
196  if (shift >= 32)
197  {
198  a.lo = a.hi;
199  a.hi = _cairo_uint32_rsa (a.hi, 31);
200  shift -= 32;
201  }
202  if (shift)
203  {
204  a.lo = a.lo >> shift | a.hi << (32 - shift);
205  a.hi = _cairo_uint32_rsa (a.hi, shift);
206  }
207  return a;
208 }
209 
210 int
211 _cairo_uint64_lt (cairo_uint64_t a, cairo_uint64_t b)
212 {
213  return (a.hi < b.hi ||
214  (a.hi == b.hi && a.lo < b.lo));
215 }
216 
217 int
218 _cairo_uint64_eq (cairo_uint64_t a, cairo_uint64_t b)
219 {
220  return a.hi == b.hi && a.lo == b.lo;
221 }
222 
223 int
224 _cairo_int64_lt (cairo_int64_t a, cairo_int64_t b)
225 {
226  if (_cairo_int64_negative (a) && !_cairo_int64_negative (b))
227  return 1;
228  if (!_cairo_int64_negative (a) && _cairo_int64_negative (b))
229  return 0;
230  return _cairo_uint64_lt (a, b);
231 }
232 
233 cairo_uint64_t
234 _cairo_uint64_not (cairo_uint64_t a)
235 {
236  a.lo = ~a.lo;
237  a.hi = ~a.hi;
238  return a;
239 }
240 
241 cairo_uint64_t
242 _cairo_uint64_negate (cairo_uint64_t a)
243 {
244  a.lo = ~a.lo;
245  a.hi = ~a.hi;
246  if (++a.lo == 0)
247  ++a.hi;
248  return a;
249 }
250 
251 /*
252  * Simple bit-at-a-time divide.
253  */
255 _cairo_uint64_divrem (cairo_uint64_t num, cairo_uint64_t den)
256 {
258  cairo_uint64_t bit;
259  cairo_uint64_t quo;
260 
261  bit = _cairo_uint32_to_uint64 (1);
262 
263  /* normalize to make den >= num, but not overflow */
264  while (_cairo_uint64_lt (den, num) && (den.hi & 0x80000000) == 0)
265  {
266  bit = _cairo_uint64_lsl (bit, 1);
267  den = _cairo_uint64_lsl (den, 1);
268  }
269  quo = _cairo_uint32_to_uint64 (0);
270 
271  /* generate quotient, one bit at a time */
272  while (bit.hi | bit.lo)
273  {
274  if (_cairo_uint64_le (den, num))
275  {
276  num = _cairo_uint64_sub (num, den);
277  quo = _cairo_uint64_add (quo, bit);
278  }
279  bit = _cairo_uint64_rsl (bit, 1);
280  den = _cairo_uint64_rsl (den, 1);
281  }
282  qr.quo = quo;
283  qr.rem = num;
284  return qr;
285 }
286 
287 #endif /* !HAVE_UINT64_T */
288 
290 _cairo_int64_divrem (cairo_int64_t num, cairo_int64_t den)
291 {
292  int num_neg = _cairo_int64_negative (num);
293  int den_neg = _cairo_int64_negative (den);
294  cairo_uquorem64_t uqr;
295  cairo_quorem64_t qr;
296 
297  if (num_neg)
298  num = _cairo_int64_negate (num);
299  if (den_neg)
300  den = _cairo_int64_negate (den);
301  uqr = _cairo_uint64_divrem (num, den);
302  if (num_neg)
303  qr.rem = _cairo_int64_negate (uqr.rem);
304  else
305  qr.rem = uqr.rem;
306  if (num_neg != den_neg)
307  qr.quo = (cairo_int64_t) _cairo_int64_negate (uqr.quo);
308  else
309  qr.quo = (cairo_int64_t) uqr.quo;
310  return qr;
311 }
312 
313 #if HAVE_UINT128_T
314 
316 _cairo_uint128_divrem (cairo_uint128_t num, cairo_uint128_t den)
317 {
319 
320  qr.quo = num / den;
321  qr.rem = num % den;
322  return qr;
323 }
324 
325 #else
326 
327 cairo_uint128_t
328 _cairo_uint32_to_uint128 (uint32_t i)
329 {
330  cairo_uint128_t q;
331 
332  q.lo = _cairo_uint32_to_uint64 (i);
333  q.hi = _cairo_uint32_to_uint64 (0);
334  return q;
335 }
336 
338 _cairo_int32_to_int128 (int32_t i)
339 {
340  cairo_int128_t q;
341 
342  q.lo = _cairo_int32_to_int64 (i);
343  q.hi = _cairo_int32_to_int64 (i < 0 ? -1 : 0);
344  return q;
345 }
346 
347 cairo_uint128_t
348 _cairo_uint64_to_uint128 (cairo_uint64_t i)
349 {
350  cairo_uint128_t q;
351 
352  q.lo = i;
353  q.hi = _cairo_uint32_to_uint64 (0);
354  return q;
355 }
356 
358 _cairo_int64_to_int128 (cairo_int64_t i)
359 {
360  cairo_int128_t q;
361 
362  q.lo = i;
363  q.hi = _cairo_int32_to_int64 (_cairo_int64_negative(i) ? -1 : 0);
364  return q;
365 }
366 
367 cairo_uint128_t
368 _cairo_uint128_add (cairo_uint128_t a, cairo_uint128_t b)
369 {
370  cairo_uint128_t s;
371 
372  s.hi = _cairo_uint64_add (a.hi, b.hi);
373  s.lo = _cairo_uint64_add (a.lo, b.lo);
374  if (_cairo_uint64_lt (s.lo, a.lo))
375  s.hi = _cairo_uint64_add (s.hi, _cairo_uint32_to_uint64 (1));
376  return s;
377 }
378 
379 cairo_uint128_t
380 _cairo_uint128_sub (cairo_uint128_t a, cairo_uint128_t b)
381 {
382  cairo_uint128_t s;
383 
384  s.hi = _cairo_uint64_sub (a.hi, b.hi);
385  s.lo = _cairo_uint64_sub (a.lo, b.lo);
386  if (_cairo_uint64_gt (s.lo, a.lo))
387  s.hi = _cairo_uint64_sub (s.hi, _cairo_uint32_to_uint64(1));
388  return s;
389 }
390 
391 #if HAVE_UINT64_T
392 
393 #define uint64_lo32(i) ((i) & 0xffffffff)
394 #define uint64_hi32(i) ((i) >> 32)
395 #define uint64_lo(i) ((i) & 0xffffffff)
396 #define uint64_hi(i) ((i) >> 32)
397 #define uint64_shift32(i) ((i) << 32)
398 #define uint64_carry32 (((uint64_t) 1) << 32)
399 
400 #else
401 
402 #define uint64_lo32(i) ((i).lo)
403 #define uint64_hi32(i) ((i).hi)
404 
405 static cairo_uint64_t
406 uint64_lo (cairo_uint64_t i)
407 {
408  cairo_uint64_t s;
409 
410  s.lo = i.lo;
411  s.hi = 0;
412  return s;
413 }
414 
415 static cairo_uint64_t
416 uint64_hi (cairo_uint64_t i)
417 {
418  cairo_uint64_t s;
419 
420  s.lo = i.hi;
421  s.hi = 0;
422  return s;
423 }
424 
425 static cairo_uint64_t
426 uint64_shift32 (cairo_uint64_t i)
427 {
428  cairo_uint64_t s;
429 
430  s.lo = 0;
431  s.hi = i.lo;
432  return s;
433 }
434 
435 static const cairo_uint64_t uint64_carry32 = { 0, 1 };
436 
437 #endif
438 
439 cairo_uint128_t
440 _cairo_uint64x64_128_mul (cairo_uint64_t a, cairo_uint64_t b)
441 {
442  cairo_uint128_t s;
443  uint32_t ah, al, bh, bl;
444  cairo_uint64_t r0, r1, r2, r3;
445 
446  al = uint64_lo32 (a);
447  ah = uint64_hi32 (a);
448  bl = uint64_lo32 (b);
449  bh = uint64_hi32 (b);
450 
451  r0 = _cairo_uint32x32_64_mul (al, bl);
452  r1 = _cairo_uint32x32_64_mul (al, bh);
453  r2 = _cairo_uint32x32_64_mul (ah, bl);
454  r3 = _cairo_uint32x32_64_mul (ah, bh);
455 
456  r1 = _cairo_uint64_add (r1, uint64_hi (r0)); /* no carry possible */
457  r1 = _cairo_uint64_add (r1, r2); /* but this can carry */
458  if (_cairo_uint64_lt (r1, r2)) /* check */
459  r3 = _cairo_uint64_add (r3, uint64_carry32);
460 
461  s.hi = _cairo_uint64_add (r3, uint64_hi(r1));
462  s.lo = _cairo_uint64_add (uint64_shift32 (r1),
463  uint64_lo (r0));
464  return s;
465 }
466 
468 _cairo_int64x64_128_mul (cairo_int64_t a, cairo_int64_t b)
469 {
470  cairo_int128_t s;
471  s = _cairo_uint64x64_128_mul (_cairo_int64_to_uint64(a),
472  _cairo_int64_to_uint64(b));
473  if (_cairo_int64_negative (a))
474  s.hi = _cairo_uint64_sub (s.hi,
475  _cairo_int64_to_uint64 (b));
476  if (_cairo_int64_negative (b))
477  s.hi = _cairo_uint64_sub (s.hi,
478  _cairo_int64_to_uint64 (a));
479  return s;
480 }
481 
482 cairo_uint128_t
483 _cairo_uint128_mul (cairo_uint128_t a, cairo_uint128_t b)
484 {
485  cairo_uint128_t s;
486 
487  s = _cairo_uint64x64_128_mul (a.lo, b.lo);
488  s.hi = _cairo_uint64_add (s.hi,
489  _cairo_uint64_mul (a.lo, b.hi));
490  s.hi = _cairo_uint64_add (s.hi,
491  _cairo_uint64_mul (a.hi, b.lo));
492  return s;
493 }
494 
495 cairo_uint128_t
496 _cairo_uint128_lsl (cairo_uint128_t a, int shift)
497 {
498  if (shift >= 64)
499  {
500  a.hi = a.lo;
501  a.lo = _cairo_uint32_to_uint64 (0);
502  shift -= 64;
503  }
504  if (shift)
505  {
506  a.hi = _cairo_uint64_add (_cairo_uint64_lsl (a.hi, shift),
507  _cairo_uint64_rsl (a.lo, (64 - shift)));
508  a.lo = _cairo_uint64_lsl (a.lo, shift);
509  }
510  return a;
511 }
512 
513 cairo_uint128_t
514 _cairo_uint128_rsl (cairo_uint128_t a, int shift)
515 {
516  if (shift >= 64)
517  {
518  a.lo = a.hi;
519  a.hi = _cairo_uint32_to_uint64 (0);
520  shift -= 64;
521  }
522  if (shift)
523  {
524  a.lo = _cairo_uint64_add (_cairo_uint64_rsl (a.lo, shift),
525  _cairo_uint64_lsl (a.hi, (64 - shift)));
526  a.hi = _cairo_uint64_rsl (a.hi, shift);
527  }
528  return a;
529 }
530 
531 cairo_uint128_t
532 _cairo_uint128_rsa (cairo_int128_t a, int shift)
533 {
534  if (shift >= 64)
535  {
536  a.lo = a.hi;
537  a.hi = _cairo_uint64_rsa (a.hi, 64-1);
538  shift -= 64;
539  }
540  if (shift)
541  {
542  a.lo = _cairo_uint64_add (_cairo_uint64_rsl (a.lo, shift),
543  _cairo_uint64_lsl (a.hi, (64 - shift)));
544  a.hi = _cairo_uint64_rsa (a.hi, shift);
545  }
546  return a;
547 }
548 
549 int
550 _cairo_uint128_lt (cairo_uint128_t a, cairo_uint128_t b)
551 {
552  return (_cairo_uint64_lt (a.hi, b.hi) ||
553  (_cairo_uint64_eq (a.hi, b.hi) &&
554  _cairo_uint64_lt (a.lo, b.lo)));
555 }
556 
557 int
558 _cairo_int128_lt (cairo_int128_t a, cairo_int128_t b)
559 {
560  if (_cairo_int128_negative (a) && !_cairo_int128_negative (b))
561  return 1;
562  if (!_cairo_int128_negative (a) && _cairo_int128_negative (b))
563  return 0;
564  return _cairo_uint128_lt (a, b);
565 }
566 
567 int
568 _cairo_uint128_eq (cairo_uint128_t a, cairo_uint128_t b)
569 {
570  return (_cairo_uint64_eq (a.hi, b.hi) &&
571  _cairo_uint64_eq (a.lo, b.lo));
572 }
573 
574 #if HAVE_UINT64_T
575 #define _cairo_msbset64(q) (q & ((uint64_t) 1 << 63))
576 #else
577 #define _cairo_msbset64(q) (q.hi & ((uint32_t) 1 << 31))
578 #endif
579 
581 _cairo_uint128_divrem (cairo_uint128_t num, cairo_uint128_t den)
582 {
584  cairo_uint128_t bit;
585  cairo_uint128_t quo;
586 
587  bit = _cairo_uint32_to_uint128 (1);
588 
589  /* normalize to make den >= num, but not overflow */
590  while (_cairo_uint128_lt (den, num) && !_cairo_msbset64(den.hi))
591  {
592  bit = _cairo_uint128_lsl (bit, 1);
593  den = _cairo_uint128_lsl (den, 1);
594  }
595  quo = _cairo_uint32_to_uint128 (0);
596 
597  /* generate quotient, one bit at a time */
598  while (_cairo_uint128_ne (bit, _cairo_uint32_to_uint128(0)))
599  {
600  if (_cairo_uint128_le (den, num))
601  {
602  num = _cairo_uint128_sub (num, den);
603  quo = _cairo_uint128_add (quo, bit);
604  }
605  bit = _cairo_uint128_rsl (bit, 1);
606  den = _cairo_uint128_rsl (den, 1);
607  }
608  qr.quo = quo;
609  qr.rem = num;
610  return qr;
611 }
612 
614 _cairo_int128_negate (cairo_int128_t a)
615 {
616  a.lo = _cairo_uint64_not (a.lo);
617  a.hi = _cairo_uint64_not (a.hi);
618  return _cairo_uint128_add (a, _cairo_uint32_to_uint128 (1));
619 }
620 
622 _cairo_int128_not (cairo_int128_t a)
623 {
624  a.lo = _cairo_uint64_not (a.lo);
625  a.hi = _cairo_uint64_not (a.hi);
626  return a;
627 }
628 
629 #endif /* !HAVE_UINT128_T */
630 
632 _cairo_int128_divrem (cairo_int128_t num, cairo_int128_t den)
633 {
634  int num_neg = _cairo_int128_negative (num);
635  int den_neg = _cairo_int128_negative (den);
636  cairo_uquorem128_t uqr;
638 
639  if (num_neg)
640  num = _cairo_int128_negate (num);
641  if (den_neg)
642  den = _cairo_int128_negate (den);
643  uqr = _cairo_uint128_divrem (num, den);
644  if (num_neg)
645  qr.rem = _cairo_int128_negate (uqr.rem);
646  else
647  qr.rem = uqr.rem;
648  if (num_neg != den_neg)
649  qr.quo = _cairo_int128_negate (uqr.quo);
650  else
651  qr.quo = uqr.quo;
652  return qr;
653 }
654 
665 _cairo_uint_96by64_32x64_divrem (cairo_uint128_t num,
666  cairo_uint64_t den)
667 {
668  cairo_uquorem64_t result;
669  cairo_uint64_t B = _cairo_uint32s_to_uint64 (1, 0);
670 
671  /* These are the high 64 bits of the *96* bit numerator. We're
672  * going to represent the numerator as xB + y, where x is a 64,
673  * and y is a 32 bit number. */
674  cairo_uint64_t x = _cairo_uint128_to_uint64 (_cairo_uint128_rsl(num, 32));
675 
676  /* Initialise the result to indicate overflow. */
677  result.quo = _cairo_uint32s_to_uint64 (-1U, -1U);
678  result.rem = den;
679 
680  /* Don't bother if the quotient is going to overflow. */
681  if (_cairo_uint64_ge (x, den)) {
682  return /* overflow */ result;
683  }
684 
685  if (_cairo_uint64_lt (x, B)) {
686  /* When the final quotient is known to fit in 32 bits, then
687  * num < 2^64 if and only if den < 2^32. */
688  return _cairo_uint64_divrem (_cairo_uint128_to_uint64 (num), den);
689  }
690  else {
691  /* Denominator is >= 2^32. the numerator is >= 2^64, and the
692  * division won't overflow: need two divrems. Write the
693  * numerator and denominator as
694  *
695  * num = xB + y x : 64 bits, y : 32 bits
696  * den = uB + v u, v : 32 bits
697  */
698  uint32_t y = _cairo_uint128_to_uint32 (num);
699  uint32_t u = uint64_hi32 (den);
700  uint32_t v = _cairo_uint64_to_uint32 (den);
701 
702  /* Compute a lower bound approximate quotient of num/den
703  * from x/(u+1). Then we have
704  *
705  * x = q(u+1) + r ; q : 32 bits, r <= u : 32 bits.
706  *
707  * xB + y = q(u+1)B + (rB+y)
708  * = q(uB + B + v - v) + (rB+y)
709  * = q(uB + v) + qB - qv + (rB+y)
710  * = q(uB + v) + q(B-v) + (rB+y)
711  *
712  * The true quotient of num/den then is q plus the
713  * contribution of q(B-v) + (rB+y). The main contribution
714  * comes from the term q(B-v), with the term (rB+y) only
715  * contributing at most one part.
716  *
717  * The term q(B-v) must fit into 64 bits, since q fits into 32
718  * bits on account of being a lower bound to the true
719  * quotient, and as B-v <= 2^32, we may safely use a single
720  * 64/64 bit division to find its contribution. */
721 
722  cairo_uquorem64_t quorem;
723  cairo_uint64_t remainder; /* will contain final remainder */
724  uint32_t quotient; /* will contain final quotient. */
725  uint32_t q;
726  uint32_t r;
727 
728  /* Approximate quotient by dividing the high 64 bits of num by
729  * u+1. Watch out for overflow of u+1. */
730  if (u+1) {
731  quorem = _cairo_uint64_divrem (x, _cairo_uint32_to_uint64 (u+1));
732  q = _cairo_uint64_to_uint32 (quorem.quo);
733  r = _cairo_uint64_to_uint32 (quorem.rem);
734  }
735  else {
736  q = uint64_hi32 (x);
737  r = _cairo_uint64_to_uint32 (x);
738  }
739  quotient = q;
740 
741  /* Add the main term's contribution to quotient. Note B-v =
742  * -v as an uint32 (unless v = 0) */
743  if (v)
744  quorem = _cairo_uint64_divrem (_cairo_uint32x32_64_mul (q, -v), den);
745  else
746  quorem = _cairo_uint64_divrem (_cairo_uint32s_to_uint64 (q, 0), den);
747  quotient += _cairo_uint64_to_uint32 (quorem.quo);
748 
749  /* Add the contribution of the subterm and start computing the
750  * true remainder. */
751  remainder = _cairo_uint32s_to_uint64 (r, y);
752  if (_cairo_uint64_ge (remainder, den)) {
753  remainder = _cairo_uint64_sub (remainder, den);
754  quotient++;
755  }
756 
757  /* Add the contribution of the main term's remainder. The
758  * funky test here checks that remainder + main_rem >= den,
759  * taking into account overflow of the addition. */
760  remainder = _cairo_uint64_add (remainder, quorem.rem);
761  if (_cairo_uint64_ge (remainder, den) ||
762  _cairo_uint64_lt (remainder, quorem.rem))
763  {
764  remainder = _cairo_uint64_sub (remainder, den);
765  quotient++;
766  }
767 
768  result.quo = _cairo_uint32_to_uint64 (quotient);
769  result.rem = remainder;
770  }
771  return result;
772 }
773 
775 _cairo_int_96by64_32x64_divrem (cairo_int128_t num, cairo_int64_t den)
776 {
777  int num_neg = _cairo_int128_negative (num);
778  int den_neg = _cairo_int64_negative (den);
779  cairo_uint64_t nonneg_den;
780  cairo_uquorem64_t uqr;
781  cairo_quorem64_t qr;
782 
783  if (num_neg)
784  num = _cairo_int128_negate (num);
785  if (den_neg)
786  nonneg_den = _cairo_int64_negate (den);
787  else
788  nonneg_den = den;
789 
790  uqr = _cairo_uint_96by64_32x64_divrem (num, nonneg_den);
791  if (_cairo_uint64_eq (uqr.rem, _cairo_int64_to_uint64 (nonneg_den))) {
792  /* bail on overflow. */
793  qr.quo = _cairo_uint32s_to_uint64 (0x7FFFFFFF, -1U);;
794  qr.rem = den;
795  return qr;
796  }
797 
798  if (num_neg)
799  qr.rem = _cairo_int64_negate (uqr.rem);
800  else
801  qr.rem = uqr.rem;
802  if (num_neg != den_neg)
803  qr.quo = _cairo_int64_negate (uqr.quo);
804  else
805  qr.quo = uqr.quo;
806  return qr;
807 }