Lines Matching +full:- +full:l

23  * the restrictions contained in a BSD-style copyright.)
63 tst.l (TASK_MM-8,%a2)
65 tst.l (TASK_MM-4,%a2)
67 tst.l (TASK_MM,%a2)
69 1: printf ,"oops:%p,%p,%p\n",3,%a2@(TASK_MM-8),%a2@(TASK_MM-4),%a2@(TASK_MM)
70 2: clr.l %d0
94 | args: %d0 = source (32-bit long)
98 printf PCONV,"l2e: %p -> %p(",2,%d0,%a0
99 clr.l %d1 | sign defaults to zero
100 tst.l %d0
104 neg.l %d0
107 move.l %d1,(%a0)+ | set sign / exp
108 move.l %d0,(%a0)+ | set mantissa
109 clr.l (%a0)
110 subq.l #8,%a0 | restore %a0
116 clr.l (%a0)+
117 clr.l (%a0)+
118 clr.l (%a0)
119 subq.l #8,%a0
125 | args: %d0 = source (single-precision fp value)
129 printf PCONV,"s2e: %p -> %p(",2,%d0,%a0
130 move.l %d0,%d1
131 lsl.l #8,%d0 | shift mantissa
132 lsr.l #8,%d1 | exponent / sign
133 lsr.l #7,%d1
139 add.w #0x3fff-0x7f,%d1 | re-bias the exponent.
140 9: move.l %d1,(%a0)+ | fp_ext.sign, fp_ext.exp
141 move.l %d0,(%a0)+ | high lword of fp_ext.mant
142 clr.l (%a0) | low lword = 0
143 subq.l #8,%a0
150 tst.l %d0
152 move.w #0x4000-0x7f,%d1
162 getuser.l %a1@(0),%d0,fp_err_ua2,%a1
163 getuser.l %a1@(4),%d1,fp_err_ua2,%a1
164 printf PCONV,"d2e: %p%p -> %p(",3,%d0,%d1,%a0
166 getuser.l (%a1)+,%d0,fp_err_ua2,%a1
167 move.l %d0,%d1
168 lsl.l #8,%d0 | shift high mantissa
169 lsl.l #3,%d0
170 lsr.l #8,%d1 | exponent / sign
171 lsr.l #7,%d1
177 add.w #0x3fff-0x3ff,%d1 | re-bias the exponent.
178 9: move.l %d1,(%a0)+ | fp_ext.sign, fp_ext.exp
179 move.l %d0,(%a0)+
180 getuser.l (%a1)+,%d0,fp_err_ua2,%a1
181 move.l %d0,%d1
182 lsl.l #8,%d0
183 lsl.l #3,%d0
184 move.l %d0,(%a0)
186 lsr.l %d0,%d1
187 or.l %d1,-(%a0)
188 subq.l #4,%a0
195 tst.l %d0
197 move.w #0x4000-0x3ff,%d1
216 move.l (%a0)+,%d0
219 move.l (%a0),%d0
223 subq.l #4,%a0
246 addq.l #1,(8,%a0)
248 addq.l #1,(4,%a0)
260 tst.b (1,%a0) | to -inf
271 move.l (4,%a0),%d0
274 clr.l %d0
275 move.b (-4,%a0),%d0
279 clr.w -(%a0)
280 subq.l #2,%a0
289 move.w -(%a0),%d2
298 | fancy 64-bit double-shift begins here
299 lsl.l %d2,%d0
300 move.l %d0,(%a0)+
301 move.l (%a0),%d0
302 move.l %d0,%d1
303 lsl.l %d2,%d0
304 move.l %d0,(%a0)
307 lsr.l %d2,%d1
308 or.l %d1,-(%a0)
311 clr.l %d0
312 move.b (-4,%a0),%d0
316 clr.b (-4,%a0)
317 lsl.l %d2,%d0
318 or.l %d0,(4,%a0)
321 lsl.l %d2,%d0
322 move.b %d0,(-4,%a0)
323 lsr.l #8,%d0
324 or.l %d0,(4,%a0)
331 move.w -(%a0),%d2
338 ext.l %d1
340 clr.l (4,%a0)
343 lsl.l %d1,%d0 | lower lword needs only to be shifted
344 move.l %d0,(%a0) | into the higher lword
346 clr.l %d0
347 move.b (-4,%a0),%d0
348 clr.b (-4,%a0)
366 move.w -(%a0),%d2
373 ext.l %d1
377 1: clr.b (-4,%a0)
381 lsl.l %d1,%d0
382 move.l %d0,(%a0)
387 2: lsl.l %d1,%d0
388 move.b %d0,(-4,%a0)
389 lsr.l #8,%d0
393 1: move.l %d0,%d1 | lower lword is splitted between
394 lsl.l %d2,%d0 | higher and lower lword
395 move.l %d0,(%a0)
396 move.l %d1,%d0
399 lsr.l %d2,%d0
400 move.l %d0,-(%a0)
404 move.l (%a0)+,%d0
406 1: tst.l (%a0)
409 2: subq.l #8,%a0
415 3: lsl.l #1,%d0
441 move.l (%a0)+,%d0
444 move.l (%a0),%d0
448 subq.l #4,%a0
470 addq.l #1,(8,%a0)
472 addq.l #1,(4,%a0)
484 tst.b (1,%a0) | to -inf
496 move.l (4,%a0),%d0
499 clr.l %d0
500 move.b (-4,%a0),%d0
504 clr.w -(%a0)
505 subq.l #2,%a0
513 move.w -(%a0),%d2
523 | This is exactly the same 64-bit double shift as seen above.
524 lsl.l %d2,%d0
525 move.l %d0,(%a0)+
526 move.l (%a0),%d0
527 move.l %d0,%d1
528 lsl.l %d2,%d0
529 move.l %d0,(%a0)
532 lsr.l %d2,%d1
533 or.l %d1,-(%a0)
536 clr.l %d0
537 move.b (-4,%a0),%d0
541 clr.b (-4,%a0)
542 lsl.l %d2,%d0
543 or.l %d0,(4,%a0)
546 lsl.l %d2,%d0
547 move.b %d0,(-4,%a0)
548 lsr.l #8,%d0
549 or.l %d0,(4,%a0)
556 move.w -(%a0),%d2
564 ext.l %d1
566 clr.l (4,%a0)
569 lsl.l %d1,%d0 | lower lword needs only to be shifted
570 move.l %d0,(%a0) | into the higher lword
572 clr.l %d0
573 move.b (-4,%a0),%d0
574 clr.b (-4,%a0)
592 move.w -(%a0),%d2
599 ext.l %d1
603 1: clr.b (-4,%a0)
607 lsl.l %d1,%d0
608 move.l %d0,(%a0)
613 2: lsl.l %d1,%d0
614 move.b %d0,(-4,%a0)
615 lsr.l #8,%d0
621 move.l (%a0)+,%d0
623 1: tst.l (%a0)
625 2: subq.l #8,%a0
631 3: move.l %d0,%d1
632 lsl.l #1,%d1
634 clr.l (-4,%a0)
640 move.l %d0,(-4,%a0)
647 | 2: Round to -Infinity
656 | normalize an extended with double (52-bit) precision
663 move.l (%a0)+,%d2
668 sub.w #0x4000-0x3ff,%d2 | will the exponent fit?
672 addq.l #4,%a0
673 move.l (%a0),%d0 | low lword of mantissa
677 lsl.l %d1,%d0 | keep 11 low bits.
678 jne fp_nd_checkround | Are they non-zero?
680 9: subq.l #8,%a0
689 and.w #0xf800,(2,%a0) | clear bits 0-10
692 tst.l %d0 | test guard bit
697 | IEEE754-specified "round to even" behaviour. If the guard
699 | in grade-school arithmetic (i.e. 1.5 rounds to 2.0)
704 lsl.l #1,%d0 | check low bits
708 add.l #0x800,(%a0)
710 addq.l #1,-(%a0) | extend to high lword
716 addq.w #1,-(%a0)
719 1: subq.l #4,%a0
727 | Round to +Inf or -Inf. High word of %d2 contains the
729 swap %d2 | to -inf
741 move.w #0x3c01,(-2,%a0) | 2**-1022
745 | Again, another 64-bit double shift.
746 move.l (%a0),%d0
747 move.l %d0,%d1
748 lsr.l %d2,%d0
749 move.l %d0,(%a0)+
750 move.l (%a0),%d0
751 lsr.l %d2,%d0
754 lsl.l %d2,%d1
755 or.l %d1,%d0
756 move.l (%a0),%d1
757 move.l %d0,(%a0)
759 lsl.l %d2,%d1
763 | Another 64-bit single shift and store
767 move.l (%a0),%d0
768 clr.l (%a0)+
769 move.l %d0,%d1
770 lsr.l %d2,%d0
774 tst.l (%a0)
777 1: move.l %d0,(%a0)
778 lsl.l %d2,%d1
783 2: clr.l (%a0)+
784 clr.l (%a0)
789 tst.l (%a0)+
791 tst.l (%a0)
793 subq.l #8,%a0
799 | i.e. pathologically small (exponent is 2**-16383) numbers.
804 clr.l (%a0)
805 clr.l -(%a0)
806 move.w #0x3c01,-(%a0) | i.e. 2**-1022
807 addq.l #6,%a0
819 1: move.w #0x7fff,(-2,%a0)
820 clr.l (%a0)+
821 clr.l (%a0)
822 2: subq.l #8,%a0
830 tst.b (-3,%a0) | to -inf
833 4: tst.b (-3,%a0) | to +inf
835 5: move.w #0x43fe,(-2,%a0)
836 moveq #-1,%d0
837 move.l %d0,(%a0)+
839 move.l %d0,(%a0)
843 subq.l #4,%a0
850 | normalize an extended with single (23-bit) precision
857 addq.l #2,%a0
862 sub.w #0x4000-0x7f,%d2 | will the exponent fit?
866 move.l (%a0)+,%d0 | get high lword of mantissa
868 tst.l (%a0) | check the low lword
870 | Set a sticky bit if it is non-zero. This should only
871 | affect the rounding in what would otherwise be equal-
874 1: clr.l (%a0) | zap it from memory.
877 jne fp_ns_checkround | Are they non-zero?
879 subq.l #8,%a0
886 clr.b -(%a0) | clear low byte of high lword
887 subq.l #3,%a0
899 add.l #0x100,(%a0)
905 addq.w #1,-(%a0)
908 9: subq.l #4,%a0
917 tst.b (-3,%a0) | to -inf
920 3: tst.b (-3,%a0) | to +inf
927 move.w #0x3f81,(-2,%a0) | 2**-126
931 | a 32-bit shift.
932 move.l (%a0),%d0
933 move.l %d0,%d1
934 lsr.l %d2,%d0
935 move.l %d0,(%a0)+
939 lsl.l %d2,%d1
943 1: tst.l (%a0)
949 2: clr.l (%a0)+
950 clr.l (%a0)
961 1: move.w #0x7fff,(-2,%a0)
962 clr.l (%a0)+
963 clr.l (%a0)
964 2: subq.l #8,%a0
972 tst.b (-3,%a0) | to -inf
975 4: tst.b (-3,%a0) | to +inf
977 5: move.w #0x407e,(-2,%a0)
978 move.l #0xffffff00,(%a0)+
979 clr.l (%a0)
983 tst.l (%a0)+
985 tst.l (%a0)
987 subq.l #8,%a0
993 | i.e. pathologically small (exponent is 2**-16383) numbers.
998 clr.l (%a0)
999 clr.l -(%a0)
1000 move.w #0x3f81,-(%a0) | i.e. 2**-126
1001 addq.l #6,%a0
1006 subq.l #4,%a0
1013 | normalize an extended with single (23-bit) precision
1022 addq.l #2,%a0
1026 move.l (%a0)+,%d0 | get high lword of mantissa
1028 tst.l (%a0) | check the low lword
1030 | Set a sticky bit if it is non-zero. This should only
1031 | affect the rounding in what would otherwise be equal-
1034 1: clr.l (%a0) | zap it from memory.
1037 jne fp_nsf_checkround | Are they non-zero?
1039 subq.l #8,%a0
1046 clr.b -(%a0) | clear low byte of high lword
1047 subq.l #3,%a0
1059 add.l #0x100,(%a0)
1065 addq.w #1,-(%a0)
1068 9: subq.l #4,%a0
1077 tst.b (-3,%a0) | to -inf
1080 3: tst.b (-3,%a0) | to +inf
1091 1: move.w #0x7fff,(-2,%a0)
1092 clr.l (%a0)+
1093 clr.l (%a0)
1094 2: subq.l #8,%a0
1102 tst.b (-3,%a0) | to -inf
1105 4: tst.b (-3,%a0) | to +inf
1107 5: move.w #0x407e,(-2,%a0)
1108 move.l #0xffffff00,(%a0)+
1109 clr.l (%a0)
1113 subq.l #4,%a0
1134 .set inf,(1<<(\b-1))-1 | i.e. MAXINT
1138 addq.l #2,%a0
1147 move.l (%a0),%d0
1148 move.l %d0,%d1
1149 lsl.l %d2,%d1
1151 tst.l (4,%a0)
1155 lsr.l %d2,%d0
1156 9: tst.w (-4,%a0)
1160 printf PCONV,"-> %p\n",1,%d0
1165 1: printf PCONV,"-> %p\n",1,%d0
1174 lsr.l %d2,%d0
1177 tst.l %d1 | test guard bit
1181 lsl.l #1,%d1 | check low bits
1183 tst.l (4,%a0)
1186 addq.l #1,%d0
1192 tst.w (-4,%a0) | to -inf
1195 3: tst.w (-4,%a0) | to +inf
1198 | we are only want -2**127 get correctly rounded here,
1204 move.l (4,%a0),%d1 | test guard bit
1206 lsl.l #1,%d1 | check low bits
1211 clr.l %d0
1212 tst.l (%a0)+
1214 tst.l (%a0)
1216 1: subq.l #4,%a0
1220 clr.l %d0
1225 tst.w (-4,%a0) | to -inf
1229 2: tst.w (-4,%a0) | to +inf
1232 3: printf PCONV,"-> %p\n",1,%d0
1237 tst.w (-4,%a0)
1240 1: printf PCONV,"-> %p\n",1,%d0
1244 tst.l (%a0)
1246 tst.l (%a0)
1253 1: printf PCONV,"-> %p\n",1,%d0
1258 conv_ext2int l,32
1271 move.l (%a0)+,%d2
1275 move.l (%a0)+,%d0
1277 1: sub.w #0x3fff-0x3ff,%d2
1278 move.l (%a0)+,%d0
1282 lsl.l #7,%d2
1283 lsl.l #8,%d2
1284 move.l %d0,%d1
1285 lsl.l #1,%d0
1286 lsr.l #4,%d0
1287 lsr.l #8,%d0
1288 or.l %d2,%d0
1289 putuser.l %d0,(%a1)+,fp_err_ua2,%a1
1291 lsl.l %d0,%d1
1292 move.l (%a0),%d0
1293 lsr.l #4,%d0
1294 lsr.l #7,%d0
1295 or.l %d1,%d0
1296 putuser.l %d0,(%a1),fp_err_ua2,%a1
1298 getuser.l %a1@(-4),%d0,fp_err_ua2,%a1
1299 getuser.l %a1@(0),%d1,fp_err_ua2,%a1
1309 move.l (%a0)+,%d1
1313 move.l (%a0)+,%d0
1315 1: sub.w #0x3fff-0x7f,%d1
1316 move.l (%a0)+,%d0
1320 lsl.l #7,%d1
1321 lsl.l #8,%d1
1323 lsr.l #8,%d0
1324 or.l %d1,%d0
1333 addq.l #8,%sp
1339 addq.l #8,%sp
1345 addq.l #8,%sp
1355 addq.l #8,%sp
1371 clr.l %d0
1372 addq.l #1,%a0
1375 bset #FPSR_CC_NEG-24,%d0 | N bit
1379 moveq #FPSR_CC_Z-24,%d1
1380 tst.l (%a0)+
1382 tst.l (%a0)
1386 2: moveq #FPSR_CC_NAN-24,%d1
1387 move.l (%a0)+,%d2
1388 lsl.l #1,%d2 | ignore high bit
1390 tst.l (%a0)
1392 moveq #FPSR_CC_INF-24,%d1
1401 move.l (FPD_FPSR,FPDATA),%d0
1426 2: move.l %d0,(FPD_FPSR,FPDATA)
1429 move.l %d0,%d2
1430 lsr.l #5,%d0
1431 move.l %d0,%d1
1432 lsr.l #4,%d1
1433 or.l %d0,%d1
1435 move.l %d2,%d0
1436 lsr.l #6,%d0
1437 or.l %d1,%d0
1438 move.l %d2,%d1
1439 lsr.l #4,%d1
1442 move.l %d2,%d1
1443 lsr.l #7,%d1
1448 move.l %d2,(FPD_FPSR,FPDATA)