Lines Matching +full:0 +full:x40190000
42 set _off_bsun, 0x00
43 set _off_snan, 0x04
44 set _off_operr, 0x08
45 set _off_ovfl, 0x0c
46 set _off_unfl, 0x10
47 set _off_dz, 0x14
48 set _off_inex, 0x18
49 set _off_fline, 0x1c
50 set _off_fpu_dis, 0x20
51 set _off_trap, 0x24
52 set _off_trace, 0x28
53 set _off_access, 0x2c
54 set _off_done, 0x30
56 set _off_imr, 0x40
57 set _off_dmr, 0x44
58 set _off_dmw, 0x48
59 set _off_irw, 0x4c
60 set _off_irl, 0x50
61 set _off_drb, 0x54
62 set _off_drw, 0x58
63 set _off_drl, 0x5c
64 set _off_dwb, 0x60
65 set _off_dww, 0x64
66 set _off_dwl, 0x68
74 short 0x0000
76 short 0x0000
78 short 0x0000
80 short 0x0000
82 short 0x0000
84 short 0x0000
86 short 0x0000
88 short 0x0000
90 short 0x0000
98 mov.l (_060FPSP_TABLE-0x80+_off_done,%pc),%d0
99 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
100 mov.l 0x4(%sp),%d0
101 rtd &0x4
106 mov.l (_060FPSP_TABLE-0x80+_off_ovfl,%pc),%d0
107 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
108 mov.l 0x4(%sp),%d0
109 rtd &0x4
114 mov.l (_060FPSP_TABLE-0x80+_off_unfl,%pc),%d0
115 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
116 mov.l 0x4(%sp),%d0
117 rtd &0x4
122 mov.l (_060FPSP_TABLE-0x80+_off_inex,%pc),%d0
123 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
124 mov.l 0x4(%sp),%d0
125 rtd &0x4
130 mov.l (_060FPSP_TABLE-0x80+_off_bsun,%pc),%d0
131 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
132 mov.l 0x4(%sp),%d0
133 rtd &0x4
138 mov.l (_060FPSP_TABLE-0x80+_off_operr,%pc),%d0
139 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
140 mov.l 0x4(%sp),%d0
141 rtd &0x4
146 mov.l (_060FPSP_TABLE-0x80+_off_snan,%pc),%d0
147 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
148 mov.l 0x4(%sp),%d0
149 rtd &0x4
154 mov.l (_060FPSP_TABLE-0x80+_off_dz,%pc),%d0
155 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
156 mov.l 0x4(%sp),%d0
157 rtd &0x4
162 mov.l (_060FPSP_TABLE-0x80+_off_fline,%pc),%d0
163 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
164 mov.l 0x4(%sp),%d0
165 rtd &0x4
170 mov.l (_060FPSP_TABLE-0x80+_off_fpu_dis,%pc),%d0
171 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
172 mov.l 0x4(%sp),%d0
173 rtd &0x4
178 mov.l (_060FPSP_TABLE-0x80+_off_trap,%pc),%d0
179 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
180 mov.l 0x4(%sp),%d0
181 rtd &0x4
186 mov.l (_060FPSP_TABLE-0x80+_off_trace,%pc),%d0
187 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
188 mov.l 0x4(%sp),%d0
189 rtd &0x4
194 mov.l (_060FPSP_TABLE-0x80+_off_access,%pc),%d0
195 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
196 mov.l 0x4(%sp),%d0
197 rtd &0x4
204 mov.l (_060FPSP_TABLE-0x80+_off_imr,%pc),%d0
205 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
206 mov.l 0x4(%sp),%d0
207 rtd &0x4
212 mov.l (_060FPSP_TABLE-0x80+_off_dmr,%pc),%d0
213 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
214 mov.l 0x4(%sp),%d0
215 rtd &0x4
220 mov.l (_060FPSP_TABLE-0x80+_off_dmw,%pc),%d0
221 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
222 mov.l 0x4(%sp),%d0
223 rtd &0x4
228 mov.l (_060FPSP_TABLE-0x80+_off_irw,%pc),%d0
229 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
230 mov.l 0x4(%sp),%d0
231 rtd &0x4
236 mov.l (_060FPSP_TABLE-0x80+_off_irl,%pc),%d0
237 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
238 mov.l 0x4(%sp),%d0
239 rtd &0x4
244 mov.l (_060FPSP_TABLE-0x80+_off_drb,%pc),%d0
245 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
246 mov.l 0x4(%sp),%d0
247 rtd &0x4
252 mov.l (_060FPSP_TABLE-0x80+_off_drw,%pc),%d0
253 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
254 mov.l 0x4(%sp),%d0
255 rtd &0x4
260 mov.l (_060FPSP_TABLE-0x80+_off_drl,%pc),%d0
261 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
262 mov.l 0x4(%sp),%d0
263 rtd &0x4
268 mov.l (_060FPSP_TABLE-0x80+_off_dwb,%pc),%d0
269 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
270 mov.l 0x4(%sp),%d0
271 rtd &0x4
276 mov.l (_060FPSP_TABLE-0x80+_off_dww,%pc),%d0
277 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
278 mov.l 0x4(%sp),%d0
279 rtd &0x4
284 mov.l (_060FPSP_TABLE-0x80+_off_dwl,%pc),%d0
285 pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
286 mov.l 0x4(%sp),%d0
287 rtd &0x4
297 set EXC_SR, 0x4 # stack status register
298 set EXC_PC, 0x6 # stack pc
299 set EXC_VOFF, 0xa # stacked vector offset
300 set EXC_EA, 0xc # stacked <ea>
302 set EXC_FP, 0x0 # frame pointer
316 set EXC_A0, EXC_AREGS+(0*4)
324 set EXC_D0, EXC_DREGS+(0*4)
326 set EXC_FP0, EXC_FPREGS+(0*12) # offset of saved fp0
331 set FP_SCR1_EX, FP_SCR1+0
336 set FP_SCR0, LV+68 # fp scratch 0
337 set FP_SCR0_EX, FP_SCR0+0
343 set FP_DST_EX, FP_DST+0
349 set FP_SRC_EX, FP_SRC+0
357 set FPSR_CC, USER_FPSR+0 # FPSR condition codes
384 set EXC_OPWORD, LV+0 # saved operation word
390 set FTEMP, 0 # offsets within an
391 set FTEMP_EX, 0 # extended precision
397 set LOCAL, 0 # offsets within an
398 set LOCAL_EX, 0 # extended precision
404 set DST, 0 # offsets within an
405 set DST_EX, 0 # extended precision
409 set SRC, 0 # offsets within an
410 set SRC_EX, 0 # extended precision
414 set SGL_LO, 0x3f81 # min sgl prec exponent
415 set SGL_HI, 0x407e # max sgl prec exponent
416 set DBL_LO, 0x3c01 # min dbl prec exponent
417 set DBL_HI, 0x43fe # max dbl prec exponent
418 set EXT_LO, 0x0 # min ext prec exponent
419 set EXT_HI, 0x7ffe # max ext prec exponent
421 set EXT_BIAS, 0x3fff # extended precision bias
422 set SGL_BIAS, 0x007f # single precision bias
423 set DBL_BIAS, 0x03ff # double precision bias
425 set NORM, 0x00 # operand type for STAG/DTAG
426 set ZERO, 0x01 # operand type for STAG/DTAG
427 set INF, 0x02 # operand type for STAG/DTAG
428 set QNAN, 0x03 # operand type for STAG/DTAG
429 set DENORM, 0x04 # operand type for STAG/DTAG
430 set SNAN, 0x05 # operand type for STAG/DTAG
431 set UNNORM, 0x06 # operand type for STAG/DTAG
436 set neg_bit, 0x3 # negative result
437 set z_bit, 0x2 # zero result
438 set inf_bit, 0x1 # infinite result
439 set nan_bit, 0x0 # NAN result
441 set q_sn_bit, 0x7 # sign bit of quotient byte
450 set inex1_bit, 0 # inexact result 1
461 set neg_mask, 0x08000000 # negative bit mask (lw)
462 set inf_mask, 0x02000000 # infinity bit mask (lw)
463 set z_mask, 0x04000000 # zero bit mask (lw)
464 set nan_mask, 0x01000000 # nan bit mask (lw)
466 set neg_bmask, 0x08 # negative bit mask (byte)
467 set inf_bmask, 0x02 # infinity bit mask (byte)
468 set z_bmask, 0x04 # zero bit mask (byte)
469 set nan_bmask, 0x01 # nan bit mask (byte)
471 set bsun_mask, 0x00008000 # bsun exception mask
472 set snan_mask, 0x00004000 # snan exception mask
473 set operr_mask, 0x00002000 # operr exception mask
474 set ovfl_mask, 0x00001000 # overflow exception mask
475 set unfl_mask, 0x00000800 # underflow exception mask
476 set dz_mask, 0x00000400 # dz exception mask
477 set inex2_mask, 0x00000200 # inex2 exception mask
478 set inex1_mask, 0x00000100 # inex1 exception mask
480 set aiop_mask, 0x00000080 # accrued illegal operation
481 set aovfl_mask, 0x00000040 # accrued overflow
482 set aunfl_mask, 0x00000020 # accrued underflow
483 set adz_mask, 0x00000010 # accrued divide by zero
484 set ainex_mask, 0x00000008 # accrued inexact
491 set nzi_mask, 0x01ffffff #clears N, Z, and I
512 set sign_bit, 0x7 # sign bit
513 set signan_bit, 0x6 # signalling nan bit
515 set sgl_thresh, 0x3f81 # minimum sgl exponent
516 set dbl_thresh, 0x3c01 # minimum dbl exponent
518 set x_mode, 0x0 # extended precision
519 set s_mode, 0x4 # single precision
520 set d_mode, 0x8 # double precision
522 set rn_mode, 0x0 # round-to-nearest
523 set rz_mode, 0x1 # round-to-zero
524 set rm_mode, 0x2 # round-tp-minus-infinity
525 set rp_mode, 0x3 # round-to-plus-infinity
533 set BSUN_VEC, 0xc0 # bsun vector offset
534 set INEX_VEC, 0xc4 # inexact vector offset
535 set DZ_VEC, 0xc8 # dz vector offset
536 set UNFL_VEC, 0xcc # unfl vector offset
537 set OPERR_VEC, 0xd0 # operr vector offset
538 set OVFL_VEC, 0xd4 # ovfl vector offset
539 set SNAN_VEC, 0xd8 # snan vector offset
544 set ftrapcc_flg, 0x01 # flag bit: ftrapcc exception
545 set fbsun_flg, 0x02 # flag bit: bsun exception
546 set mia7_flg, 0x04 # flag bit: (a7)+ <ea>
547 set mda7_flg, 0x08 # flag bit: -(a7) <ea>
548 set fmovm_flg, 0x40 # flag bit: fmovm instruction
549 set immed_flg, 0x80 # flag bit: &<data> <ea>
551 set ftrapcc_bit, 0x0
552 set fbsun_bit, 0x1
553 set mia7_bit, 0x2
554 set mda7_bit, 0x3
555 set immed_bit, 0x7
560 set FMUL_OP, 0x0 # fmul instr performed last
561 set FDIV_OP, 0x1 # fdiv performed last
562 set FADD_OP, 0x2 # fadd performed last
563 set FMOV_OP, 0x3 # fmov performed last
568 T1: long 0x40C62D38,0xD3D64634 # 16381 LOG2 LEAD
569 T2: long 0x3D6F90AE,0xB1E75CC7 # 16381 LOG2 TRAIL
571 PI: long 0x40000000,0xC90FDAA2,0x2168C235,0x00000000
572 PIBY2: long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
575 long 0x3FE45F30,0x6DC9C883
588 # store_fpreg() - store opclass 0 or 2 result to FP regfile #
592 # tbl_unsupp - add of table of emulation routines for opclass 0,2 #
605 # - The fsave frame contains the adjusted src op for opclass 0,2 #
644 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
646 fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
651 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
657 btst &0x5,EXC_CMDREG(%a6) # is instr an fmove out?
673 btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
700 andi.w &0x007f,%d1 # extract extension
702 andi.l &0x00ff01ff,USER_FPSR(%a6) # zero all but accured field
704 fmov.l &0x0,%fpcr # zero current control regs
705 fmov.l &0x0,%fpsr
730 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
732 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
741 fmovm.x &0x40,FP_SRC(%a6) # save EXOP (fp1) to stack
743 mov.w &0xe005,2+FP_SRC(%a6) # save exc status
745 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
747 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
759 fmovm.x &0x40,FP_SRC(%a6) # save EXOP (fp1) to stack
761 mov.b &0xc4,1+EXC_VOFF(%a6) # vector offset = 0xc4
762 mov.w &0xe001,2+FP_SRC(%a6) # save exc status
764 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
766 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
788 and.l &0xffff00ff,USER_FPSR(%a6) # zero all but accured field
790 fmov.l &0x0,%fpcr # zero current control regs
791 fmov.l &0x0,%fpsr
803 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
805 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
810 btst &0x7,(%sp) # is trace on?
813 fmov.l %fpiar,0x8(%sp) # "Current PC" is in FPIAR
814 mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x024
828 # store_fpreg() - store opclass 0 or 2 result to FP regfile #
832 # tbl_unsupp - add of table of emulation routines for opclass 0,2 #
845 # - The fsave frame contains the adjusted src op for opclass 0,2 #
884 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
886 fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
891 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
897 btst &0x5,EXC_CMDREG(%a6) # is instr an fmove out?
911 btst &0x5,1+EXC_CMDREG(%a6) # is op monadic or dyadic?
916 btst &0x4,1+EXC_CMDREG(%a6) # is op an fsincos?
943 andi.w &0x007f,%d1 # extract extension
945 andi.l &0x00ff01ff,USER_FPSR(%a6)
947 fmov.l &0x0,%fpcr # zero current control regs
948 fmov.l &0x0,%fpsr
962 # (0x00000000_80000000_00000000), then the machine will take an
980 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
982 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
995 # (0x00000000_80000000_00000000), then the machine will take an
1003 fmovm.x &0x40,FP_SRC(%a6) # save EXOP (fp1) to stack
1005 mov.w &0xe003,2+FP_SRC(%a6) # save exc status
1007 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
1009 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1023 # (0x00000000_80000000_00000000), then the machine will take an
1033 fmovm.x &0x40,FP_SRC(%a6) # save EXOP to stack
1035 mov.b &0xc4,1+EXC_VOFF(%a6) # vector offset = 0xc4
1036 mov.w &0xe001,2+FP_SRC(%a6) # save exc status
1038 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
1040 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1062 and.l &0xffff00ff,USER_FPSR(%a6) # zero all but accured field
1064 fmov.l &0x0,%fpcr # zero current control regs
1065 fmov.l &0x0,%fpsr
1077 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
1079 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1084 btst &0x7,(%sp) # is trace on?
1087 fmov.l %fpiar,0x8(%sp) # "Current PC" is in FPIAR
1088 mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x024
1103 # store_fpreg() - store opclass 0 or 2 result to FP regfile #
1108 # tbl_unsupp - add of table of emulation routines for opclass 0,2 #
1141 # unimplemented data types. These can be either opclass 0,2 or 3 #
1143 # also of opclasses 0,2, or 3. #
1144 # For UNNORM/DENORM opclass 0 and 2, the handler fetches the src #
1154 # PACKED opclass 0 and 2 is similar in how the instruction is #
1181 # * 0x0 * 0x0dc * * 0x3 * 0x0dc *
1194 # * 0x2 * 0x0dc *
1209 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
1211 fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
1213 btst &0x5,EXC_SR(%a6) # user or supervisor mode?
1223 lea 0x4+EXC_EA(%a6),%a0 # load old a7'
1233 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
1243 btst &0x5,EXC_CMDREG(%a6) # is it an fmove out?
1247 bfextu EXC_CMDREG(%a6){&0:&6},%d0
1248 cmpi.b %d0,&0x13
1254 andi.l &0x00ff00ff,USER_FPSR(%a6) # zero exception field
1256 fmov.l &0x0,%fpcr # zero current control regs
1257 fmov.l &0x0,%fpsr
1281 btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
1283 cmpi.b 1+EXC_CMDREG(%a6),&0x3a # is operation an ftst?
1328 andi.b &0x38,%d0 # extract bits 3-5
1329 cmpi.b %d0,&0x38 # is instr fcmp or ftst?
1337 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1339 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1384 subi.l &24,%d0 # fix offset to be 0-8
1385 cmpi.b %d0,&0x6 # is exception INEX? (6)
1404 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1406 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1415 short 0xe000,0xe006,0xe004,0xe005
1416 short 0xe003,0xe002,0xe001,0xe001
1419 mov.w &0x4,%d0
1422 mov.w &0x03,%d0
1431 bfextu EXC_CMDREG(%a6){&0:&6},%d0 # extract opclass,src fmt
1432 cmpi.b %d0,&0x11 # is class = 2 & fmt = sgl?
1434 cmpi.b %d0,&0x15 # is class = 2 & fmt = dbl?
1440 andi.w &0x7fff,%d0 # strip sign
1441 cmpi.w %d0,&0x3f80 # is |exp| == $3f80?
1443 cmpi.w %d0,&0x407f # no; is |exp| == $407f?
1448 andi.l &0x7fffffff,LOCAL_HI(%a0) # clear j-bit
1454 addi.w &0x3f81,%d0 # adjust new exponent
1455 andi.w &0x8000,LOCAL_EX(%a0) # clear old exponent
1460 andi.w &0x8000,LOCAL_EX(%a0) # clear bogus exponent
1464 andi.b &0x7f,LOCAL_HI(%a0) # clear j-bit
1465 ori.w &0x7fff,LOCAL_EX(%a0) # make exponent = $7fff
1470 andi.w &0x7fff,%d0 # strip sign
1471 cmpi.w %d0,&0x3c00 # is |exp| == $3c00?
1473 cmpi.w %d0,&0x43ff # no; is |exp| == $43ff?
1478 andi.l &0x7fffffff,LOCAL_HI(%a0) # clear j-bit
1486 addi.w &0x3c01,%d0 # adjust new exponent
1487 andi.w &0x8000,LOCAL_EX(%a0) # clear old exponent
1500 cmpi.b %d0,&0x3
1502 cmpi.b %d0,&0x7
1509 and.l &0xffff00ff,USER_FPSR(%a6) # zero exception field
1511 fmov.l &0x0,%fpcr # zero current control regs
1512 fmov.l &0x0,%fpsr
1517 andi.w &0x7fff,%d0 # strip sign
1562 btst &0x5,EXC_SR(%a6)
1569 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1571 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1575 btst &0x7,(%sp) # is trace on?
1591 fmovm.x &0x80,FP_SRC(%a6) # put answer on stack
1593 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1595 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1599 mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
1600 mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
1603 mov.l LOCAL_SIZE+FP_SRC_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
1604 mov.l LOCAL_SIZE+FP_SRC_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
1605 mov.l LOCAL_SIZE+FP_SRC_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
1607 add.l &LOCAL_SIZE-0x8,%sp
1609 btst &0x7,(%sp)
1640 # * 0x3 * 0x0dc * * 0x2 * 0x024 *
1649 mov.w &0x2024,0x6(%sp)
1650 fmov.l %fpiar,0x8(%sp)
1655 subi.l &24,%d0 # fix offset to be 0-8
1662 swbeg &0x8
1676 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1678 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1680 mov.w &0x30d8,EXC_VOFF(%a6) # vector offset = 0xd8
1681 mov.w &0xe006,2+FP_SRC(%a6)
1691 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1693 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1695 mov.w &0x30d0,EXC_VOFF(%a6) # vector offset = 0xd0
1696 mov.w &0xe004,2+FP_SRC(%a6)
1706 fmovm.x &0x40,FP_SRC(%a6) # save EXOP to the stack
1708 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1710 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1712 mov.w &0x30d4,EXC_VOFF(%a6) # vector offset = 0xd4
1713 mov.w &0xe005,2+FP_SRC(%a6)
1728 btst &0x5,EXC_SR(%a6)
1735 fmovm.x &0x40,FP_SRC(%a6) # save EXOP to the stack
1737 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1739 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1741 mov.w &0x30cc,EXC_VOFF(%a6) # vector offset = 0xcc
1742 mov.w &0xe003,2+FP_SRC(%a6)
1758 fmovm.x &0x80,FP_SRC(%a6) # put answer on stack
1759 fmovm.x &0x40,FP_DST(%a6) # put EXOP on stack
1761 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1763 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1765 mov.w &0x30cc,EXC_VOFF(%a6) # vector offset = 0xcc
1766 mov.w &0xe003,2+FP_DST(%a6)
1772 mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
1773 mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
1774 mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
1777 mov.l LOCAL_SIZE+FP_SRC_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
1778 mov.l LOCAL_SIZE+FP_SRC_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
1779 mov.l LOCAL_SIZE+FP_SRC_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
1781 add.l &LOCAL_SIZE-0x8,%sp
1787 fmovm.x &0x40,FP_SRC(%a6) # save EXOP to the stack
1789 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1791 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1793 mov.w &0x30c4,EXC_VOFF(%a6) # vector offset = 0xc4
1794 mov.w &0xe001,2+FP_SRC(%a6)
1810 andi.l &0x0ff00ff,USER_FPSR(%a6) # zero exception field
1812 fmov.l &0x0,%fpcr # zero current control regs
1813 fmov.l &0x0,%fpsr
1826 btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
1828 cmpi.b 1+EXC_CMDREG(%a6),&0x3a # is operation an ftst?
1873 andi.b &0x38,%d0 # extract bits 3-5
1874 cmpi.b %d0,&0x38 # is instr fcmp or ftst?
1882 btst &0x5,EXC_SR(%a6) # user or supervisor?
1889 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1891 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1895 btst &0x7,(%sp) # is trace on?
1907 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1909 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1914 mov.l 0x4(%sp),0x10(%sp)
1915 mov.l 0x0(%sp),0xc(%sp)
1916 add.l &0xc,%sp
1918 btst &0x7,(%sp) # is trace on?
1962 subi.l &24,%d0 # fix offset to be 0-8
1963 cmpi.b %d0,&0x6 # is exception INEX? (6 or 7)
1979 btst &0x5,EXC_SR(%a6) # user or supervisor?
1988 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
1990 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
1996 btst &0x7,(%sp) # is trace enabled?
2002 short 0xe000,0xe006,0xe004,0xe005
2003 short 0xe003,0xe002,0xe001,0xe001
2006 mov.w &0x3,%d0
2010 mov.w &0x4,%d0
2019 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
2021 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2028 mov.l 0x4(%sp),0x10(%sp)
2029 mov.l 0x0(%sp),0xc(%sp)
2030 add.l &0xc,%sp
2032 btst &0x7,(%sp) # is trace on?
2047 # * 0x2 * 0x0dc * * 0x2 * 0x024 *
2055 mov.w &0x2024,0x6(%sp)
2056 fmov.l %fpiar,0x8(%sp)
2068 and.l &0xffff00ff,USER_FPSR(%a6) # zero exception field
2070 fmov.l &0x0,%fpcr # zero current control regs
2071 fmov.l &0x0,%fpsr
2113 btst &0x5,EXC_SR(%a6) # user or supervisor?
2120 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
2122 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2126 btst &0x7,(%sp) # is trace on?
2138 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
2140 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2144 mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
2145 mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
2148 mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
2149 mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
2150 mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
2152 add.l &LOCAL_SIZE-0x8,%sp
2154 btst &0x7,(%sp)
2169 cmpi.b %d0,&0x1a
2174 btst &0x5,EXC_SR(%a6)
2188 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
2190 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2192 mov.w &0x30d8,EXC_VOFF(%a6) # vector offset = 0xd0
2193 mov.w &0xe006,2+FP_SRC(%a6) # set fsave status
2199 mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
2200 mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
2201 mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
2204 mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
2205 mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
2206 mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
2208 add.l &LOCAL_SIZE-0x8,%sp
2214 btst &0x5,EXC_SR(%a6)
2228 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
2230 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2232 mov.w &0x30d0,EXC_VOFF(%a6) # vector offset = 0xd0
2233 mov.w &0xe004,2+FP_SRC(%a6) # set fsave status
2239 mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
2240 mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
2241 mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
2244 mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
2245 mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
2246 mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
2248 add.l &LOCAL_SIZE-0x8,%sp
2254 btst &0x5,EXC_SR(%a6)
2268 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
2270 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2272 mov.w &0x30c4,EXC_VOFF(%a6) # vector offset = 0xc4
2273 mov.w &0xe001,2+FP_SRC(%a6) # set fsave status
2279 mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
2280 mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
2281 mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
2284 mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
2285 mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
2286 mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
2288 add.l &LOCAL_SIZE-0x8,%sp
2303 cmpi.b %d0,&0x1 # was src sgl?
2305 cmpi.b %d0,&0x5 # was src dbl?
2311 andi.w &0x7fff,%d0 # strip sign
2313 cmpi.w %d0,&0x3f80
2316 addi.w &0x3f81,%d0 # find amt to shift
2321 andi.w &0x8000,FP_SRC_EX(%a6) # clear old exponent
2322 ori.w &0x3f80,FP_SRC_EX(%a6) # insert new "skewed" exponent
2328 andi.w &0x7fff,%d0 # strip sign
2330 cmpi.w %d0,&0x3c00
2334 smi.b 0x2+FP_SRC(%a6)
2338 mov.w &0x3c01,%d1 # pass denorm threshold
2340 mov.w &0x3c00,%d0 # new exponent
2341 tst.b 0x2+FP_SRC(%a6) # is sign set?
2345 bset &0x7,FP_SRC_HI(%a6) # set j-bit
2353 btst &0x5,EXC_SR(%a6)
2355 mov.l 0x0(%a0),FP_DST_EX(%a6)
2356 mov.l 0x4(%a0),FP_DST_HI(%a6)
2357 mov.l 0x8(%a0),FP_DST_LO(%a6)
2374 # store_fpreg() - store opclass 0 or 2 result to FP regfile #
2377 # tbl_unsupp - add of table of emulation routines for opclass 0,2 #
2459 btst &0x1,%d0 # is FPU disabled?
2465 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
2467 fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
2473 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
2502 andi.l &0x00ff00ff,USER_FPSR(%a6)
2504 btst &0xa,%d0 # is src fmt x or p?
2510 mov.l &0xc,%d0 # pass: 12 bytes
2522 mov.l &0xc,%d0 # pass: 12 bytes
2530 cmpi.w %d0,&0x7fff # INF or NAN?
2536 andi.b &0x0f,%d0 # clear all but last nybble
2545 fmovm.x &0x80,FP_SRC(%a6) # make this the srcop
2548 addi.l &0xc,EXC_EXTWPTR(%a6) # update extension word pointer
2561 btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
2563 btst &0x4,1+EXC_CMDREG(%a6) # is operation fsincos,ftst,fcmp?
2581 btst &0x3,1+EXC_CMDREG(%a6) # is operation fsincos?
2586 btst &0x1,1+EXC_CMDREG(%a6) # is operation fcmp?
2594 andi.w &0x007f,%d1 # extract extension
2596 fmov.l &0x0,%fpcr
2597 fmov.l &0x0,%fpsr
2636 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
2638 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2642 btst &0x7,(%sp) # is trace on?
2665 subi.l &24,%d0 # fix offset to be 0-8
2666 cmpi.b %d0,&0x6 # is exception INEX?
2682 short 0xe002, 0xe006, 0xe004, 0xe005
2683 short 0xe003, 0xe002, 0xe001, 0xe001
2686 mov.w &0xe005,2+FP_SRC(%a6)
2690 mov.w &0xe003,2+FP_SRC(%a6)
2696 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
2698 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2704 btst &0x7,(%sp) # is trace on?
2716 # * 0x0 * 0x0f0 * * Current *
2719 # * PC * * 0x2 * 0x024 *
2728 mov.w 0x8(%sp),0x4(%sp)
2729 mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x024
2730 fmov.l %fpiar,0x8(%sp) # "Current PC" is in FPIAR
2741 btst &0x5,EXC_SR(%a6) # user or supervisor mode
2754 lea 0x2+EXC_VOFF(%a6),%a0
2768 btst &0x7,EXC_SR(%a6)
2773 mov.w &0x00f0,(EXC_VOFF,%a6,%d0)
2778 fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
2780 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2787 mov.w EXC_SR(%a6),(EXC_SR-0x4,%a6,%d0)
2788 mov.l EXC_EXTWPTR(%a6),(EXC_PC-0x4,%a6,%d0)
2789 mov.w &0x2024,(EXC_VOFF-0x4,%a6,%d0)
2790 mov.l EXC_PC(%a6),(EXC_VOFF+0x2-0x4,%a6,%d0)
2792 lea (EXC_SR-0x4,%a6,%d0),%a0
2795 fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
2797 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2806 mov.b %d0,0x1+EXC_VOFF(%a6) # store size
2808 fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
2810 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2818 mov.b 0x1+EXC_VOFF(%a6),%d0 # fetch size
2821 btst &0x7,EXC_SR(%a6) # is trace enabled?
2824 mov.w EXC_SR(%a6),(EXC_SR-0x4,%a6,%d0)
2825 mov.l EXC_PC(%a6),(EXC_VOFF-0x2,%a6,%d0)
2826 mov.l (%sp)+,(EXC_PC-0x4,%a6,%d0)
2827 mov.w &0x2024,(EXC_VOFF-0x4,%a6,%d0)
2835 mov.w &0x00f0,(EXC_VOFF,%a6,%d0)
2837 pea (0x4,%a6,%d0) # create final sp
2845 fmovm.x &0x80,(0x4+0x8,%a6,%d0)
2846 addi.l &0xc,%d0
2848 lsl.b &0x1,%d1
2850 fmovm.x &0x40,(0x4+0x8,%a6,%d0)
2851 addi.l &0xc,%d0
2853 lsl.b &0x1,%d1
2855 fmovm.x &0x20,(0x4+0x8,%a6,%d0)
2856 addi.l &0xc,%d0
2858 lsl.b &0x1,%d1
2860 fmovm.x &0x10,(0x4+0x8,%a6,%d0)
2861 addi.l &0xc,%d0
2863 lsl.b &0x1,%d1
2865 fmovm.x &0x08,(0x4+0x8,%a6,%d0)
2866 addi.l &0xc,%d0
2868 lsl.b &0x1,%d1
2870 fmovm.x &0x04,(0x4+0x8,%a6,%d0)
2871 addi.l &0xc,%d0
2873 lsl.b &0x1,%d1
2875 fmovm.x &0x02,(0x4+0x8,%a6,%d0)
2876 addi.l &0xc,%d0
2878 lsl.b &0x1,%d1
2880 fmovm.x &0x01,(0x4+0x8,%a6,%d0)
2882 mov.l 0x4(%sp),%d1
2883 mov.l 0x8(%sp),%d0
2884 mov.l 0xc(%sp),%a6
2887 btst &0x7,(%sp) # is trace enabled?
2897 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
2899 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2901 btst &0x7,EXC_SR(%a6) # is trace on?
2918 # * 0x0 * 0x0f0 * * Current *
2921 # * PC * * 0x2 * 0x024 *
2934 mov.w EXC_SR+LOCAL_SIZE(%sp),0x0+LOCAL_SIZE(%sp)
2935 mov.l EXC_PC+LOCAL_SIZE(%sp),0x8+LOCAL_SIZE(%sp)
2936 mov.l EXC_EXTWPTR+LOCAL_SIZE(%sp),0x2+LOCAL_SIZE(%sp)
2937 mov.w &0x2024,0x6+LOCAL_SIZE(%sp) # stk fmt = 0x2; voff = 0x024
2954 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
2959 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
2968 mov.l &0x10,%d0 # 16 bytes of instruction
2971 btst &0xe,%d0 # is instr fmovm ctrl
2975 mov.l &0xc,%d0
2976 cmpi.b %d1,&0x7 # move all regs?
2978 addq.l &0x4,%d0
2992 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
2999 subq.l &0x8,%sp # make room for new stack
3001 mov.w 0xc(%sp),0x4(%sp) # move SR
3002 mov.l 0xe(%sp),0x6(%sp) # move Current PC
3004 mov.w 0x12(%sp),%d0
3005 mov.l 0x6(%sp),0x10(%sp) # move Current PC
3006 add.l %d0,0x6(%sp) # make Next PC
3007 mov.w &0x402c,0xa(%sp) # insert offset,frame format
3016 btst &0x1,%d0
3019 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1 on stack
3021 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3025 subq.w &0x8,%sp # make stack frame bigger
3026 mov.l 0x8(%sp),(%sp) # store SR,hi(PC)
3027 mov.w 0xc(%sp),0x4(%sp) # store lo(PC)
3028 mov.w &0x4008,0x6(%sp) # store voff
3029 mov.l 0x2(%sp),0x8(%sp) # store ea
3030 mov.l &0x09428001,0xc(%sp) # store fslw
3033 btst &0x5,(%sp) # user or supervisor mode?
3035 bset &0x2,0xd(%sp) # set supervisor TM bit
3044 btst &0x1,%d1
3046 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1 on stack
3051 mov.l 0x4+LOCAL_SIZE(%sp),-0x8+0x4+LOCAL_SIZE(%sp)
3052 mov.w 0x8+LOCAL_SIZE(%sp),-0x8+0x8+LOCAL_SIZE(%sp)
3053 mov.w &0x4008,-0x8+0xa+LOCAL_SIZE(%sp)
3054 mov.l %a0,-0x8+0xc+LOCAL_SIZE(%sp)
3055 mov.w %d0,-0x8+0x10+LOCAL_SIZE(%sp)
3056 mov.w &0x0001,-0x8+0x12+LOCAL_SIZE(%sp)
3058 movm.l LOCAL_SIZE+EXC_DREGS(%sp),&0x0303 # restore d0-d1/a0-a1
3059 add.w &LOCAL_SIZE-0x4,%sp
3085 # - The fsave frame contains the adjusted src op for opclass 0,2 #
3112 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
3114 fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
3120 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
3138 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
3140 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3167 andi.w &0x7fff,%d1
3168 cmpi.w %d1,&0x7fff
3174 andi.l &0x7fffffff,%d1
3181 mov.l &0x7fffffff,%d1
3184 addq.l &0x1,%d1
3206 cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3216 andi.w &0x0007,%d1
3222 cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3232 andi.w &0x0007,%d1
3238 cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3248 andi.w &0x0007,%d1
3275 # - The fsave frame contains the adjusted src op for opclass 0,2 #
3308 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
3310 fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
3316 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
3334 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
3336 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3377 cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3387 andi.w &0x0007,%d1
3394 cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3404 andi.w &0x0007,%d1
3411 cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3421 andi.w &0x0007,%d1
3426 cmpi.b %d1,&0x7 # is <ea> mode a data reg?
3429 andi.l &0x80000000,%d0 # keep sign
3430 ori.l &0x7fc00000,%d0 # insert new exponent,SNAN bit
3432 lsr.l &0x8,%d1 # shift mantissa for sgl
3443 andi.l &0x80000000,%d0 # keep sign
3444 ori.l &0x7fc00000,%d0 # insert new exponent,SNAN bit
3447 lsr.l &0x8,%d1 # shift mantissa for sgl
3450 andi.w &0x0007,%d1
3456 andi.l &0x80000000,%d0 # keep sign
3457 ori.l &0x7ff80000,%d0 # insert new exponent,SNAN bit
3464 andi.l &0x000007ff,%d1
3472 movq.l &0x8,%d0 # pass: size of 8 bytes
3493 btst &0x5,EXC_SR(%a6) # supervisor mode exception?
3510 movq.l &0xc,%d0 # pass: size of extended
3531 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
3533 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3539 mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
3540 mov.l LOCAL_SIZE+EXC_PC+0x2(%sp),LOCAL_SIZE+EXC_PC+0x2-0xc(%sp)
3541 mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
3544 mov.l LOCAL_SIZE+FP_SCR0_HI(%sp),LOCAL_SIZE+EXC_PC+0x2(%sp)
3547 add.l &LOCAL_SIZE-0x8,%sp
3562 # store_fpreg() - store opclass 0 or 2 result to FP regfile #
3567 # tbl_unsupp - add of table of emulation routines for opclass 0,2 #
3576 # - The fsave frame contains the adjusted src op for opclass 0,2 #
3600 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
3602 fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
3608 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
3620 # w/ an exponent value of 0x401e. we convert this to extended precision here.
3623 cmpi.w FP_SRC_EX(%a6),&0x401e # is exponent 0x401e?
3625 fmov.l &0x0,%fpcr
3628 mov.w &0xe001,0x2+FP_SRC(%a6)
3639 andi.l &0x00ff01ff,USER_FPSR(%a6) # zero all but accured field
3641 fmov.l &0x0,%fpcr # zero current control regs
3642 fmov.l &0x0,%fpsr
3644 bfextu EXC_EXTWORD(%a6){&0:&6},%d1 # extract upper 6 of cmdreg
3645 cmpi.b %d1,&0x17 # is op an fmovecr?
3655 btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
3658 btst &0x4,1+EXC_CMDREG(%a6) # is operation an fsincos?
3677 andi.w &0x007f,%d1 # extract extension
3691 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
3693 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3704 andi.l &0x0000007f,%d1 # pass rom offset
3729 andi.l &0xffff00ff,USER_FPSR(%a6) # zero exception field
3774 movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
3776 fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
3782 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
3796 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
3798 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
3841 cmpi.w 0x6(%sp),&0x402c
3882 andi.w &0x38, %d0 # extract mode field
3883 andi.l &0x7, %d1 # extract reg field
3885 cmpi.b %d0,&0x18 # is mode (An)+ ?
3888 cmpi.b %d0,&0x20 # is mode -(An) ?
3892 cmpi.b %d0,&0x3c # is mode #<data>?
3903 lea ([USER_FPIAR,%a6],0x4),%a0 # no; return <ea>
3925 cmpi.b %d0,&0xc # is opsize ext or packed?
3929 sub.l &0x8,%a0 # correct <ea>
3965 andi.w &0x38,%d0 # extract mode field
3966 andi.l &0x7,%d1 # extract reg field
3968 cmpi.b %d0,&0x18 # is mode (An)+ ?
3971 cmpi.b %d0,&0x20 # is mode -(An) ?
3985 swbeg &0x8
3997 addi.l &0xc,EXC_DREGS+0x8(%a6)
4000 addi.l &0xc,EXC_DREGS+0xc(%a6)
4003 add.l &0xc,%a2
4006 add.l &0xc,%a3
4009 add.l &0xc,%a4
4012 add.l &0xc,%a5
4015 addi.l &0xc,EXC_A6(%a6)
4019 addi.l &0xc,EXC_A7(%a6)
4028 sub.l &0x8,%a0
4029 sub.l &0x8,EXC_EA(%a6)
4032 swbeg &0x8
4044 mov.l %a0,EXC_DREGS+0x8(%a6)
4047 mov.l %a0,EXC_DREGS+0xc(%a6)
4090 long tbl_unsupp - tbl_unsupp # 0a: fatan
4092 long tbl_unsupp - tbl_unsupp # 0c: fasin
4093 long tbl_unsupp - tbl_unsupp # 0d: fatanh
4094 long tbl_unsupp - tbl_unsupp # 0e: fsin
4095 long tbl_unsupp - tbl_unsupp # 0f: ftan
4250 # 1111 0010 00 |<ea>| 11@& 1000 0$$$ 0000 #
4252 # & = (0): predecrement addressing mode #
4254 # @ = (0): move listed regs from memory to the FPU #
4269 andi.w &0x70,%d1 # extract reg bits
4270 lsr.b &0x4,%d1 # shift into lo bits
4275 andi.l &0x000000ff,%d0 # keep only lo byte
4289 btst &0x5,EXC_EXTWORD(%a6) # is it a move in or out?
4296 btst &0x4,EXC_EXTWORD(%a6) # control or predecrement?
4306 btst &0x5,EXC_SR(%a6) # user or supervisor mode?
4328 mov.l 0x0+EXC_FP0(%a6),(%a0)+ # yes
4329 mov.l 0x4+EXC_FP0(%a6),(%a0)+
4330 mov.l 0x8+EXC_FP0(%a6),(%a0)+
4333 lsl.b &0x1,%d1 # should FP1 be moved?
4336 mov.l 0x0+EXC_FP1(%a6),(%a0)+ # yes
4337 mov.l 0x4+EXC_FP1(%a6),(%a0)+
4338 mov.l 0x8+EXC_FP1(%a6),(%a0)+
4341 lsl.b &0x1,%d1 # should FP2 be moved?
4344 fmovm.x &0x20,(%a0) # yes
4345 add.l &0xc,%a0
4348 lsl.b &0x1,%d1 # should FP3 be moved?
4351 fmovm.x &0x10,(%a0) # yes
4352 add.l &0xc,%a0
4355 lsl.b &0x1,%d1 # should FP4 be moved?
4358 fmovm.x &0x08,(%a0) # yes
4359 add.l &0xc,%a0
4362 lsl.b &0x1,%d1 # should FP5 be moved?
4365 fmovm.x &0x04,(%a0) # yes
4366 add.l &0xc,%a0
4369 lsl.b &0x1,%d1 # should FP6 be moved?
4372 fmovm.x &0x02,(%a0) # yes
4373 add.l &0xc,%a0
4376 lsl.b &0x1,%d1 # should FP7 be moved?
4379 fmovm.x &0x01,(%a0) # yes
4380 add.l &0xc,%a0
4423 mov.l (%a0)+,0x0+EXC_FP0(%a6) # yes
4424 mov.l (%a0)+,0x4+EXC_FP0(%a6)
4425 mov.l (%a0)+,0x8+EXC_FP0(%a6)
4428 lsl.b &0x1,%d1 # should FP1 be moved?
4431 mov.l (%a0)+,0x0+EXC_FP1(%a6) # yes
4432 mov.l (%a0)+,0x4+EXC_FP1(%a6)
4433 mov.l (%a0)+,0x8+EXC_FP1(%a6)
4436 lsl.b &0x1,%d1 # should FP2 be moved?
4439 fmovm.x (%a0)+,&0x20 # yes
4442 lsl.b &0x1,%d1 # should FP3 be moved?
4445 fmovm.x (%a0)+,&0x10 # yes
4448 lsl.b &0x1,%d1 # should FP4 be moved?
4451 fmovm.x (%a0)+,&0x08 # yes
4454 lsl.b &0x1,%d1 # should FP5 be moved?
4457 fmovm.x (%a0)+,&0x04 # yes
4460 lsl.b &0x1,%d1 # should FP6 be moved?
4463 fmovm.x (%a0)+,&0x02 # yes
4466 lsl.b &0x1,%d1 # should FP7 be moved?
4469 fmovm.x (%a0)+,&0x01 # yes
4489 byte 0x00,0x0c,0x0c,0x18,0x0c,0x18,0x18,0x24
4490 byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
4491 byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
4492 byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
4493 byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
4494 byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
4495 byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
4496 byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
4497 byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
4498 byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
4499 byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
4500 byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
4501 byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
4502 byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
4503 byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
4504 byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
4505 byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
4506 byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
4507 byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
4508 byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
4509 byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
4510 byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
4511 byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
4512 byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
4513 byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
4514 byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
4515 byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
4516 byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
4517 byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
4518 byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
4519 byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
4520 byte 0x3c,0x48,0x48,0x54,0x48,0x54,0x54,0x60
4525 # ex: 0x00 ==> 0x00
4526 # 0x01 ==> 0x80
4527 # 0x02 ==> 0x40
4530 # 0xfd ==> 0xbf
4531 # 0xfe ==> 0x7f
4532 # 0xff ==> 0xff
4535 byte 0x00,0x80,0x40,0xc0,0x20,0xa0,0x60,0xe0
4536 byte 0x10,0x90,0x50,0xd0,0x30,0xb0,0x70,0xf0
4537 byte 0x08,0x88,0x48,0xc8,0x28,0xa8,0x68,0xe8
4538 byte 0x18,0x98,0x58,0xd8,0x38,0xb8,0x78,0xf8
4539 byte 0x04,0x84,0x44,0xc4,0x24,0xa4,0x64,0xe4
4540 byte 0x14,0x94,0x54,0xd4,0x34,0xb4,0x74,0xf4
4541 byte 0x0c,0x8c,0x4c,0xcc,0x2c,0xac,0x6c,0xec
4542 byte 0x1c,0x9c,0x5c,0xdc,0x3c,0xbc,0x7c,0xfc
4543 byte 0x02,0x82,0x42,0xc2,0x22,0xa2,0x62,0xe2
4544 byte 0x12,0x92,0x52,0xd2,0x32,0xb2,0x72,0xf2
4545 byte 0x0a,0x8a,0x4a,0xca,0x2a,0xaa,0x6a,0xea
4546 byte 0x1a,0x9a,0x5a,0xda,0x3a,0xba,0x7a,0xfa
4547 byte 0x06,0x86,0x46,0xc6,0x26,0xa6,0x66,0xe6
4548 byte 0x16,0x96,0x56,0xd6,0x36,0xb6,0x76,0xf6
4549 byte 0x0e,0x8e,0x4e,0xce,0x2e,0xae,0x6e,0xee
4550 byte 0x1e,0x9e,0x5e,0xde,0x3e,0xbe,0x7e,0xfe
4551 byte 0x01,0x81,0x41,0xc1,0x21,0xa1,0x61,0xe1
4552 byte 0x11,0x91,0x51,0xd1,0x31,0xb1,0x71,0xf1
4553 byte 0x09,0x89,0x49,0xc9,0x29,0xa9,0x69,0xe9
4554 byte 0x19,0x99,0x59,0xd9,0x39,0xb9,0x79,0xf9
4555 byte 0x05,0x85,0x45,0xc5,0x25,0xa5,0x65,0xe5
4556 byte 0x15,0x95,0x55,0xd5,0x35,0xb5,0x75,0xf5
4557 byte 0x0d,0x8d,0x4d,0xcd,0x2d,0xad,0x6d,0xed
4558 byte 0x1d,0x9d,0x5d,0xdd,0x3d,0xbd,0x7d,0xfd
4559 byte 0x03,0x83,0x43,0xc3,0x23,0xa3,0x63,0xe3
4560 byte 0x13,0x93,0x53,0xd3,0x33,0xb3,0x73,0xf3
4561 byte 0x0b,0x8b,0x4b,0xcb,0x2b,0xab,0x6b,0xeb
4562 byte 0x1b,0x9b,0x5b,0xdb,0x3b,0xbb,0x7b,0xfb
4563 byte 0x07,0x87,0x47,0xc7,0x27,0xa7,0x67,0xe7
4564 byte 0x17,0x97,0x57,0xd7,0x37,0xb7,0x77,0xf7
4565 byte 0x0f,0x8f,0x4f,0xcf,0x2f,0xaf,0x6f,0xef
4566 byte 0x1f,0x9f,0x5f,0xdf,0x3f,0xbf,0x7f,0xff
4580 andi.w &0x3f,%d0 # extract mode field
4581 andi.l &0x7,%d1 # extract reg field
4665 mov.l EXC_DREGS+0x8(%a6),%a0 # Get current a0
4669 mov.l EXC_DREGS+0xc(%a6),%a0 # Get current a1
4700 mov.l EXC_DREGS+0x8(%a6),%d0 # Get current a0
4703 mov.l %d1,EXC_DREGS+0x8(%a6) # Save incr value
4708 mov.l EXC_DREGS+0xc(%a6),%d0 # Get current a1
4711 mov.l %d1,EXC_DREGS+0xc(%a6) # Save incr value
4769 mov.l EXC_DREGS+0x8(%a6),%d0 # Get current a0
4771 mov.l %d0,EXC_DREGS+0x8(%a6) # Save decr value
4776 mov.l EXC_DREGS+0xc(%a6),%d0 # Get current a1
4778 mov.l %d0,EXC_DREGS+0xc(%a6) # Save decr value
4831 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
4839 add.l EXC_DREGS+0x8(%a6),%a0 # a0 + d16
4844 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
4852 add.l EXC_DREGS+0xc(%a6),%a0 # a1 + d16
4857 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
4870 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
4883 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
4896 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
4909 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
4922 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
4940 addq.l &0x8,%d1
4945 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
4953 btst &0x8,%d0
4959 rol.w &0x4,%d1
4960 andi.w &0xf,%d1 # extract index regno
4968 btst &0xb,%d2 # is it word or long?
4973 rol.w &0x7,%d1
4974 andi.l &0x3,%d1 # extract scale value
4990 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
5004 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
5018 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
5029 subq.l &0x2,%a0 # adjust <ea>
5040 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
5047 subq.l &0x2,%a0 # adjust base
5049 btst &0x8,%d0 # is disp only 8 bits?
5055 rol.w &0x4,%d1 # rotate reg num into place
5056 andi.w &0xf,%d1 # extract register number
5064 btst &0xb,%d2 # is index word or long?
5069 rol.w &0x7,%d1 # rotate scale value into place
5070 andi.l &0x3,%d1 # extract scale value
5086 btst &0x6,%d0 # is the index suppressed?
5089 movm.l &0x3c00,-(%sp) # save d2-d5
5094 clr.l %d2 # yes, so index = 0
5103 movm.l &0x3c00,-(%sp) # save d2-d5
5108 btst &0xb,%d5 # is index word or long?
5119 btst &0x7,%d5 # is the bd suppressed?
5126 # beq.l fmovm_error # if (size == 0) it's reserved
5128 cmpi.b %d0,&0x2
5133 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
5143 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
5159 cmpi.b %d0,&0x2
5164 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
5174 addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
5189 btst &0x2,%d5 # pre or post indexing?
5219 movm.l (%sp)+,&0x003c # restore d2-d5
5226 movm.l (%sp)+,&0x003c # restore d2-d5
5227 mov.w &0x0101,%d0
5231 movm.l (%sp)+,&0x003c # restore d2-d5
5236 mov.w &0x00e1,%d0
5241 mov.w &0x0161,%d0
5287 cmpi.b %d0,&0x9c # fpcr & fpsr & fpiar ?
5289 cmpi.b %d0,&0x98 # fpcr & fpsr ?
5291 cmpi.b %d0,&0x94 # fpcr & fpiar ?
5297 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
5305 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
5317 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
5325 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
5337 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
5345 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
5357 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
5365 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
5373 addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
5403 # equal to 0x3fff and scale the SRC exponent by the value that the #
5423 andi.w &0x7fff,%d0
5424 andi.w &0x7fff,%d1
5431 # dst exp is > src exp; scale dst to exp = 0x3fff
5452 add.w 0x2(%sp),%d0 # scale src exponent by scale factor
5454 and.w &0x8000,%d1
5462 andi.w &0x8000,FP_SCR0_EX(%a6) # zero src exponent
5463 bset &0x0,1+FP_SCR0_EX(%a6) # set exp = 1
5468 # src exp is >= dst exp; scale src to exp = 0x3fff
5488 add.w 0x2(%sp),%d0 # scale dst exponent by scale factor
5490 andi.w &0x8000,%d1
5498 andi.w &0x8000,FP_SCR1_EX(%a6) # zero dst exponent
5499 bset &0x0,1+FP_SCR1_EX(%a6) # set exp = 1
5522 # Set the exponent of the input operand to 0x3fff. Save the value #
5534 andi.l &0x7fff,%d1 # extract operand's exponent
5536 andi.w &0x8000,%d0 # extract operand's sgn
5537 or.w &0x3fff,%d0 # insert new operand's exponent(=0)
5545 mov.l &0x3fff,%d0
5577 # to 0x3ffe and return a scale factor of "(exp-0x3ffe)/2". If the #
5579 # return a scale factor of "(exp-0x3fff)/2". #
5589 andi.l &0x7fff,%d1 # extract operand's exponent
5591 andi.w &0x8000,FP_SCR0_EX(%a6) # extract operand's sgn
5593 btst &0x0,%d1 # is exp even or odd?
5596 ori.w &0x3fff,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
5598 mov.l &0x3fff,%d0
5600 asr.l &0x1,%d0 # divide scale factor by 2
5604 ori.w &0x3ffe,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
5606 mov.l &0x3ffe,%d0
5608 asr.l &0x1,%d0 # divide scale factor by 2
5615 btst &0x0,%d0 # is exp even or odd?
5618 ori.w &0x3fff,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
5620 add.l &0x3fff,%d0
5621 asr.l &0x1,%d0 # divide scale factor by 2
5625 ori.w &0x3ffe,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
5627 add.l &0x3ffe,%d0
5628 asr.l &0x1,%d0 # divide scale factor by 2
5649 # Set the exponent of the input operand to 0x3fff. Save the value #
5661 andi.l &0x7fff,%d1 # extract operand's exponent
5663 andi.w &0x8000,%d0 # extract operand's sgn
5664 or.w &0x3fff,%d0 # insert new operand's exponent(=0)
5672 mov.l &0x3fff,%d0
5732 bset &0x6, FP_SRC_HI(%a6) # set SNAN bit
5744 bset &0x6, FP_DST_HI(%a6) # set SNAN bit
5755 btst &0x7, FTEMP_EX(%a0) # is NAN neg?
5759 fmovm.x (%a0), &0x80
5787 fmovm.x nan_return(%pc), &0x80
5791 long 0x7fff0000, 0xffffffff, 0xffffffff
5828 short 0x0
5840 lsr.b &0x2, %d0 # shift prec to lo bits
5861 mov.l &0x20000000, %d0 # set sticky bit in return value
5863 clr.l FTEMP_HI(%a0) # set d1 = 0 (ms mantissa)
5864 clr.l FTEMP_LO(%a0) # set d2 = 0 (ms mantissa)
5873 # %d1{15:0} : denormalization threshold #
5899 ble.b dnrm_no_lp # d1 <= 0
5900 cmpi.w %d1, &0x20 # is ( 0 <= d1 < 32) ?
5902 cmpi.w %d1, &0x40 # is (32 <= d1 < 64) ?
5914 # case (0<d1<32)
5933 # |0.....0| NEW_HI | NEW_FTEMP_LO |grs |
5949 bfextu FTEMP_HI(%a0){&0:%d0}, %d2 # %d2 = new FTEMP_HI
5961 and.l &0xe0000000, %d0 # clear all but G,R,S
5985 # |0...............0|0....0| NEW_LO |grs |
5992 subi.w &0x20, %d1 # %d1 now between 0 and 32
5993 mov.l &0x20, %d0
6002 bfextu FTEMP_HI(%a0){&0:%d0}, %d2 # %d2 = new FTEMP_LO
6018 clr.l FTEMP_HI(%a0) # store FTEMP_HI = 0
6020 and.l &0xe0000000, %d0 # clear all but G,R,S
6046 mov.l &0x20000000, %d0 # set sticky bit
6066 # |0...............0|0................0|grs |
6072 and.l &0xc0000000, %d0 # extract G,R
6073 and.l &0x3fffffff, %d1 # extract other bits
6094 # |0...............0|0................0|0rs |
6099 and.l &0x80000000, %d0 # extract R bit
6100 lsr.l &0x1, %d0 # shift high bit into R bit
6101 and.l &0x7fffffff, %d1 # extract other bits
6170 # If (G,R,S == 0) then result is exact and round is done, else set
6182 # All of the following assumes grs != 0.
6196 # If sign of fp number = 0 (positive), then add 1 to l. #
6202 mov.l &0xffffffff, %d0 # force g,r,s to be all f's
6219 mov.l &0xffffffff, %d0 # force g,r,s to be all f's
6230 # If (g=1), then add 1 to l and if (r=s=0), then clear l #
6234 asl.l &0x1, %d0 # shift g-bit to c-bit
6245 set ad_1_sgl, 0x00000100 # constant to add 1 to l-bit in sgl prec
6246 set ad_1_dbl, 0x00000800 # constant to add 1 to l-bit in dbl prec
6256 add.w &0x1, FTEMP_EX(%a0) # and incr exponent
6258 tst.l %d0 # test for rs = 0
6260 and.w &0xfe00, FTEMP_HI+2(%a0) # clear the l-bit
6262 and.l &0xffffff00, FTEMP_HI(%a0) # truncate bits beyond sgl limit
6274 roxr.w FTEMP_HI(%a0) # mant is 0 so restore v-bit
6275 roxr.w FTEMP_HI+2(%a0) # mant is 0 so restore v-bit
6278 add.w &0x1,FTEMP_EX(%a0) # and inc exp
6280 tst.l %d0 # test rs = 0
6282 and.b &0xfe,FTEMP_LO+3(%a0) # clear the l bit
6292 addq.l &0x1, FTEMP_HI(%a0) # propagate carry
6295 roxr.w FTEMP_HI(%a0) # mant is 0 so restore v-bit
6296 roxr.w FTEMP_HI+2(%a0) # mant is 0 so restore v-bit
6299 addq.w &0x1, FTEMP_EX(%a0) # incr exponent
6301 tst.l %d0 # test for rs = 0
6303 and.w &0xf000, FTEMP_LO+2(%a0) # clear the l-bit
6306 and.l &0xfffff800,FTEMP_LO(%a0) # truncate bits beyond dbl limit
6356 movm.l &0x3000, -(%sp) # make some temp registers {d2/d3}
6363 # 96 64 40 32 0
6378 and.l &0x0000003f, %d2 # s bit is the or of all other
6388 # 96 64 32 11 0
6403 and.l &0x000001ff, %d2 # s bit is the or-ing of all
6414 movm.l (%sp)+, &0xc # restore scratch registers {d2/d3}
6446 bfffo %d0{&0:&32}, %d2 # how many places to shift?
6451 bfextu %d1{&0:%d2}, %d3 # extract lo bits
6467 bfffo %d1{&0:&32}, %d2 # how many places to shift?
6503 bfffo FTEMP_HI(%a0){&0:&32}, %d0 # how many shifts are needed?
6510 bfffo FTEMP_LO(%a0){&0:&32}, %d0 # is operand really a zero?
6521 and.w &0x7fff, %d1 # strip off sgn
6523 cmp.w %d0, %d1 # will denorm push exp < 0?
6524 bgt.b unnorm_nrm_zero # yes; denorm only until exp = 0
6527 # exponent would not go < 0. Therefore, number stays normalized
6531 and.w &0x8000, %d0 # save old sign
6541 # exponent would go < 0, so only denormalize until exp = 0
6554 and.w &0x8000, FTEMP_EX(%a0) # set exp = 0
6569 clr.l FTEMP_LO(%a0) # lo(man) = 0
6571 and.w &0x8000, FTEMP_EX(%a0) # set exp = 0
6580 and.w &0x8000, FTEMP_EX(%a0) # force exponent to zero
6610 andi.w &0x7fff, %d0 # strip off sign
6611 cmpi.w %d0, &0x7fff # is (EXP == MAX)?
6614 btst &0x7,FTEMP_HI(%a0)
6620 tst.w %d0 # is exponent = 0?
6641 andi.w &0x8000,FTEMP_EX(%a0) # clear exponent
6651 and.l &0x7fffffff, %d0 # msb is a don't care!
6657 btst &0x6, FTEMP_HI(%a0)
6690 andi.l &0x7ff00000, %d0
6693 cmpi.l %d0, &0x7ff00000
6700 and.l &0x000fffff, %d1
6711 and.l &0x000fffff, %d1
6753 andi.l &0x7f800000, %d0
6756 cmpi.l %d0, &0x7f800000
6763 and.l &0x007fffff, %d1
6772 and.l &0x007fffff, %d1
6825 btst &0x7, FTEMP_EX(%a0) # make "internal" format
6829 and.w &0x7fff, %d1
6835 mov.l 0x4(%sp),%d0 # pass rnd prec.
6836 andi.w &0x00c0,%d0
6837 lsr.w &0x4,%d0
6841 mov.w 0x6(%sp),%d1 # load prec:mode into %d1
6842 andi.w &0xc0,%d1 # extract rnd prec
6843 lsr.w &0x4,%d1
6845 mov.w 0x6(%sp),%d1
6846 andi.w &0x30,%d1
6847 lsr.w &0x4,%d1
6853 bclr &0x7, FTEMP_EX(%a0) # clear sgn first; may have residue
6856 bset &0x7, FTEMP_EX(%a0) # set result sgn
6881 add.l &0x4, %sp # clear stack
6889 btst &0x7,FTEMP_EX(%a0) # make "internal" format
6893 and.w &0x7fff,%d1
6905 mov.w 0x6(%sp),%d1 # load rnd mode
6906 andi.w &0x30,%d1 # extract rnd prec
6907 lsr.w &0x4,%d1
6913 bclr &0x7,FTEMP_EX(%a0) # clear sgn first; may have residue
6916 bset &0x7,FTEMP_EX(%a0) # set result sgn
6941 add.l &0x4,%sp # clear stack
6955 # d1.b = '-1' => (-); '0' => (+) #
6978 andi.w &0x10,%d1 # keep result sign
6979 lsr.b &0x4,%d0 # shift prec/mode
6982 lsl.b &0x1,%d1 # multiply d1 by 2
6987 and.w &0x10, %d1 # keep result sign
6992 lsl.b &0x1, %d1 # shift left by 1
7005 byte 0x2, 0x0, 0x0, 0x2
7006 byte 0x2, 0x0, 0x0, 0x2
7007 byte 0x2, 0x0, 0x0, 0x2
7008 byte 0x0, 0x0, 0x0, 0x0
7009 byte 0x2+0x8, 0x8, 0x2+0x8, 0x8
7010 byte 0x2+0x8, 0x8, 0x2+0x8, 0x8
7011 byte 0x2+0x8, 0x8, 0x2+0x8, 0x8
7014 long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
7015 long 0x7ffe0000,0xffffffff,0xffffffff,0x00000000 # +EXT; RZ
7016 long 0x7ffe0000,0xffffffff,0xffffffff,0x00000000 # +EXT; RM
7017 long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
7019 long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
7020 long 0x407e0000,0xffffff00,0x00000000,0x00000000 # +SGL; RZ
7021 long 0x407e0000,0xffffff00,0x00000000,0x00000000 # +SGL; RM
7022 long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
7024 long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
7025 long 0x43fe0000,0xffffffff,0xfffff800,0x00000000 # +DBL; RZ
7026 long 0x43fe0000,0xffffffff,0xfffff800,0x00000000 # +DBL; RM
7027 long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
7029 long 0x00000000,0x00000000,0x00000000,0x00000000
7030 long 0x00000000,0x00000000,0x00000000,0x00000000
7031 long 0x00000000,0x00000000,0x00000000,0x00000000
7032 long 0x00000000,0x00000000,0x00000000,0x00000000
7034 long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
7035 long 0xfffe0000,0xffffffff,0xffffffff,0x00000000 # -EXT; RZ
7036 long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
7037 long 0xfffe0000,0xffffffff,0xffffffff,0x00000000 # -EXT; RP
7039 long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
7040 long 0xc07e0000,0xffffff00,0x00000000,0x00000000 # -SGL; RZ
7041 long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
7042 long 0xc07e0000,0xffffff00,0x00000000,0x00000000 # -SGL; RP
7044 long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
7045 long 0xc3fe0000,0xffffffff,0xfffff800,0x00000000 # -DBL; RZ
7046 long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
7047 long 0xc3fe0000,0xffffffff,0xfffff800,0x00000000 # -DBL; RP
7106 swbeg &0x8
7127 fmovm.x SRC(%a0),&0x80 # load value
7134 fmov.l &0x0,%fpcr # clear FPCR
7139 andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
7152 andi.w &0x7,%d1
7158 andi.l &0x80000000,%d1 # keep DENORM sign
7159 ori.l &0x00800000,%d1 # make smallest sgl
7173 fmovm.x SRC(%a0),&0x80 # load value
7180 fmov.l &0x0,%fpcr # clear FPCR
7185 andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
7198 andi.w &0x7,%d1
7204 andi.l &0x80000000,%d1 # keep DENORM sign
7205 ori.l &0x00800000,%d1 # make smallest sgl
7219 fmovm.x SRC(%a0),&0x80 # load value
7226 fmov.l &0x0,%fpcr # clear FPCR
7232 andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
7245 andi.w &0x7,%d1
7251 andi.l &0x80000000,%d1 # keep DENORM sign
7252 ori.l &0x00800000,%d1 # make smallest sgl
7273 fmovm.x SRC(%a0),&0x80 # return result
7279 mov.l &0xc,%d0 # pass: opsize is 12 bytes
7301 andi.b &0x0a,%d0 # is UNFL or INEX enabled?
7321 andi.w &0x7fff,%d0
7322 andi.w &0x8000,FP_SCR0_EX(%a6) # keep only old sign
7324 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
7335 andi.b &0x30,%d0 # clear rnd prec
7336 ori.b &s_mode*0x10,%d0 # insert sgl prec
7346 andi.w &0x7fff,%d0 # strip sign
7359 fmovm.x SRC(%a0),&0x80 # fetch fop from stack
7362 fmov.l &0x0,%fpsr # clear FPSR
7366 fmov.l &0x0,%fpcr # clear FPCR
7373 andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
7386 andi.w &0x7,%d1
7403 clr.l %d0 # pass: S.F. = 0
7420 andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
7433 andi.w &0x7,%d1
7438 andi.b &0x0a,%d1 # is UNFL or INEX enabled?
7440 addq.l &0x4,%sp
7466 fmovm.x (%a0),&0x80 # load default overflow result
7470 andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
7483 andi.w &0x7,%d1
7488 andi.b &0x0a,%d1 # is UNFL or INEX enabled?
7490 addq.l &0x4,%sp
7495 # (1) force the exp to 0x3fff
7503 andi.w &0x8000,%d1 # keep it,clear exp
7504 ori.w &0x3fff,%d1 # insert exp = 0
7512 fmov.l &0x0,%fpcr # clear FPCR
7515 fcmp.b %fp0,&0x2 # did exponent increase?
7534 andi.w &0x7fff,%d0
7547 bclr &0x7,FP_SCR0_EX(%a6) # clear sign bit
7552 lsr.b &0x4,%d1
7553 andi.w &0x0c,%d1
7556 lsr.b &0x4,%d1
7557 andi.w &0x03,%d1
7563 bset &0x7,FP_SCR0_EX(%a6) # yes
7566 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
7573 andi.b &0x30,%d0 # clear rnd prec
7574 ori.b &d_mode*0x10,%d0 # insert dbl prec
7584 andi.w &0x7fff,%d0 # strip sign
7597 fmovm.x SRC(%a0),&0x80 # fetch fop from stack
7600 fmov.l &0x0,%fpsr # clear FPSR
7604 fmov.l &0x0,%fpcr # clear FPCR
7611 movq.l &0x8,%d0 # pass: opsize is 8 bytes
7632 clr.l %d0 # pass: S.F. = 0
7652 movq.l &0x8,%d0 # pass: opsize is 8 bytes
7659 andi.b &0x0a,%d1 # is UNFL or INEX enabled?
7661 addq.l &0x4,%sp
7669 andi.w &0x7ff,%d0
7687 fmovm.x (%a0),&0x80 # load default overflow result
7692 movq.l &0x8,%d0 # pass: opsize is 8 bytes
7699 andi.b &0x0a,%d1 # is UNFL or INEX enabled?
7701 addq.l &0x4,%sp
7706 # (1) force the exp to 0x3fff
7714 andi.w &0x8000,%d1 # keep it,clear exp
7715 ori.w &0x3fff,%d1 # insert exp = 0
7723 fmov.l &0x0,%fpcr # clear FPCR
7726 fcmp.b %fp0,&0x2 # did exponent increase?
7756 # 95 64 63 62 32 31 11 0 #
7764 # 63 51 32 31 0 #
7775 subq.w &0x1,%d0 # yes; denorm bias = DBL_BIAS - 1
7778 lsl.l &0x4,%d0 # d0 in proper place for dbl prec exp
7781 bset &0x1f,%d0 # if negative, set sign
7792 bfextu %d1{&0:&21},%d0 # get ls 21 bits of double
7821 # 95 64 63 62 40 32 31 12 0 #
7829 # 31 22 0 #
7840 subq.w &0x1,%d0 # yes; denorm bias = SGL_BIAS - 1
7843 lsl.l &0x7,%d0 # shift it into single exp bits
7846 bset &0x1f,%d0 # if negative, put in sign first
7849 andi.l &0x7fffff00,%d1 # get upper 23 bits of ms
7850 lsr.l &0x8,%d1 # and put them flush right
7863 btst &0x4,EXC_CMDREG(%a6) # static or dynamic?
7868 lsr.b &0x4,%d1
7869 andi.w &0x7,%d1
7887 # andi.l &0xcfff000f,FP_SCR0(%a6) # clear unused fields
7888 andi.l &0xcffff00f,FP_SCR0(%a6) # clear unused fields
7908 andi.w &0xf000,FP_SCR0(%a6)
7916 mov.l &0xc,%d0 # pass: opsize is 12 bytes
7949 bset &0x6,FP_SRC_HI(%a6) # set snan bit
7987 align 0x10
7989 long 0x3fff - 0x7ffe # ext_max
7990 long 0x3fff - 0x407e # sgl_max
7991 long 0x3fff - 0x43fe # dbl_max
7993 long 0x3fff + 0x0001 # ext_unfl
7994 long 0x3fff - 0x3f80 # sgl_unfl
7995 long 0x3fff - 0x3c00 # dbl_unfl
7999 andi.b &0x30,%d0 # clear rnd prec
8000 ori.b &s_mode*0x10,%d0 # insert sgl prec
8005 andi.b &0x30,%d0
8006 ori.b &d_mode*0x10,%d0 # insert dbl prec
8014 lsl.b &0x3,%d1
8035 lsr.b &0x6,%d1 # shift to lo bits
8054 fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
8057 fmov.l &0x0,%fpsr # clear FPSR
8062 fmov.l &0x0,%fpcr # clear FPCR
8067 fmovm.x &0x80,FP_SCR0(%a6) # store out result
8071 andi.l &0x7fff,%d1 # strip sign
8072 andi.w &0x8000,%d2 # keep old sign
8077 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
8090 # of this operation then has its exponent scaled by -0x6000 to create the
8094 fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
8097 fmov.l &0x0,%fpsr # clear FPSR
8102 fmov.l &0x0,%fpcr # clear FPCR
8111 andi.b &0x13,%d1 # is OVFL or INEX enabled?
8121 fmovm.x (%a0),&0x80 # return default result in fp0
8127 # with an extra -0x6000. if the precision is single or double, we need to
8132 andi.b &0xc0,%d1 # test the rnd prec
8136 fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
8141 andi.l &0x7fff,%d1 # strip sign
8143 subi.l &0x6000,%d1 # subtract bias
8144 andi.w &0x7fff,%d1 # clear sign bit
8145 andi.w &0x8000,%d2 # keep old sign
8149 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
8153 fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
8156 andi.b &0x30,%d1 # keep rnd mode only
8161 fmov.l &0x0,%fpcr # clear FPCR
8172 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
8175 fmov.l &0x0,%fpsr # clear FPSR
8180 fmov.l &0x0,%fpcr # clear FPCR
8185 fcmp.b %fp1,&0x2 # is |result| >= 2.b?
8201 # of this operation then has its exponent scaled by -0x6000 to create the
8210 fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
8212 fmov.l &rz_mode*0x10,%fpcr # set FPCR
8213 fmov.l &0x0,%fpsr # clear FPSR
8218 fmov.l &0x0,%fpcr # clear FPCR
8223 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
8227 fmovm.x &0x80,FP_SCR0(%a6) # store out result
8233 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
8240 fmovm.x FP_SCR1(%a6),&0x40 # load dst op
8243 andi.b &0xc0,%d1 # is precision extended?
8251 fmov.l &0x0,%fpsr # clear FPSR
8255 fmov.l &0x0,%fpcr # clear FPCR
8257 fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
8261 andi.l &0x7fff,%d1 # strip sign
8262 andi.w &0x8000,%d2 # keep old sign
8264 addi.l &0x6000,%d1 # add bias
8265 andi.w &0x7fff,%d1
8269 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
8274 andi.b &0x30,%d1 # use only rnd mode
8283 fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
8286 fmov.l &0x0,%fpsr # clear FPSR
8291 fmov.l &0x0,%fpcr # clear FPCR
8296 fcmp.b %fp1,&0x2 # is |result| > 2.b?
8307 fmovm.x FP_SCR1(%a6),&0x40 # load dst operand
8310 andi.b &0xc0,%d1 # keep rnd prec
8311 ori.b &rz_mode*0x10,%d1 # insert RZ
8314 fmov.l &0x0,%fpsr # clear FPSR
8318 fmov.l &0x0,%fpcr # clear FPCR
8320 fcmp.b %fp1,&0x2 # is |result| < 2.b?
8406 fmov.s &0x80000000,%fp0 # load -ZERO
8410 fmov.s &0x00000000,%fp0 # load +ZERO
8424 fmovm.x DST(%a1),&0x80 # return INF result in fp0
8441 fmovm.x SRC(%a0),&0x80 # return INF result in fp0
8484 andi.b &0x30,%d0 # clear rnd prec
8485 ori.b &s_mode*0x10,%d0 # insert sgl precision
8490 andi.b &0x30,%d0 # clear rnd prec
8491 ori.b &d_mode*0x10,%d0 # insert dbl precision
8504 andi.b &0xc0,%d0 # is precision extended?
8516 fmovm.x SRC(%a0),&0x80 # return result in fp0
8524 andi.b &0xc0,%d0 # is precision extended?
8532 fmovm.x SRC(%a0),&0x80 # return result in fp0
8539 # normalize the mantissa and add the bias of 0x6000 to the resulting negative
8549 addi.w &0x6000,%d0 # add new bias to exponent
8551 andi.w &0x8000,%d1 # keep old sign
8552 andi.w &0x7fff,%d0 # clear sign position
8555 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
8562 cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
8574 cmpi.l %d0,&0x3fff-0x3f80 # will move in underflow?
8576 cmpi.l %d0,&0x3fff-0x407e # will move in overflow?
8584 fmov.l &0x0,%fpsr # clear FPSR
8590 fmov.l &0x0,%fpcr # clear FPCR
8596 fmovm.x &0x80,FP_SCR0(%a6) # store out result
8599 andi.l &0x7fff,%d1 # strip sign
8601 andi.w &0x8000,%d2 # keep old sign
8605 fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
8617 cmpi.l %d0,&0x3fff-0x3c00 # will move in underflow?
8619 cmpi.l %d0,&0x3fff-0x43fe # will move in overflow?
8637 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
8645 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
8659 andi.l &0x7fff,%d1 # strip sign
8661 andi.w &0x8000,%d2 # extract old sign
8662 addi.l &0x6000,%d1 # add new bias
8663 andi.w &0x7fff,%d1
8666 fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
8674 fmov.l &0x0,%fpsr # clear FPSR
8679 fmov.l &0x0,%fpcr # clear FPCR
8688 andi.b &0x13,%d1 # is OVFL or INEX enabled?
8701 fmovm.x (%a0),&0x80 # return default result in fp0
8713 andi.l &0x7fff,%d1 # strip sign
8714 andi.w &0x8000,%d2 # keep old sign
8716 sub.l &0x6000,%d1 # subtract bias
8717 andi.w &0x7fff,%d1
8721 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
8728 fmov.l &0x0,%fpsr # clear FPSR
8734 fmov.l &0x0,%fpcr # clear FPCR
8739 fcmp.b %fp1,&0x2 # is |result| >= 2.b?
8765 rol.l &0x8,%d0 # put ccodes in lo byte
8804 align 0x10
8806 long 0x3fff - 0x0000 # ext_unfl
8807 long 0x3fff - 0x3f81 # sgl_unfl
8808 long 0x3fff - 0x3c01 # dbl_unfl
8811 long 0x3fff - 0x7ffe # ext overflow exponent
8812 long 0x3fff - 0x407e # sgl overflow exponent
8813 long 0x3fff - 0x43fe # dbl overflow exponent
8817 andi.b &0x30,%d0 # clear rnd prec
8818 ori.b &s_mode*0x10,%d0 # insert sgl prec
8823 andi.b &0x30,%d0 # clear rnd prec
8824 ori.b &d_mode*0x10,%d0 # insert dbl prec
8832 lsl.b &0x3,%d1
8858 lsr.b &0x6,%d1 # shift to lo bits
8868 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
8871 fmov.l &0x0,%fpsr # clear FPSR
8876 fmov.l &0x0,%fpcr # clear FPCR
8881 fmovm.x &0x80,FP_SCR0(%a6) # store result on stack
8885 andi.l &0x7fff,%d1 # strip sign
8886 andi.w &0x8000,%d2 # keep old sign
8891 fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
8895 long 0x7fff
8896 long 0x407f
8897 long 0x43ff
8906 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
8909 fmov.l &0x0,%fpsr # set FPSR
8914 fmov.l &0x0,%fpcr
8918 fmovm.x &0x01,-(%sp) # save result to stack
8920 add.l &0xc,%sp # clear result from stack
8921 andi.l &0x7fff,%d0 # strip sign
8931 andi.b &0x13,%d1 # is OVFL or INEX enabled?
8940 fmovm.x (%a0),&0x80 # return default result in fp0
8945 andi.b &0xc0,%d1 # is precision extended?
8949 fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
8954 andi.l &0x7fff,%d1 # strip sign
8956 subi.l &0x6000,%d1 # subtract bias
8957 andi.w &0x7fff,%d1 # clear sign bit
8958 andi.w &0x8000,%d2 # keep old sign
8962 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
8966 fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
8969 andi.b &0x30,%d1 # keep rnd mode
8974 fmov.l &0x0,%fpcr # clear FPCR
8980 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
8982 fmov.l &rz_mode*0x10,%fpcr # set FPCR
8983 fmov.l &0x0,%fpsr # clear FPSR
8988 fmov.l &0x0,%fpcr # clear FPCR
8993 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
8997 fmovm.x &0x80,FP_SCR0(%a6) # store out result
9003 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
9010 fmovm.x FP_SCR1(%a6),&0x40 # load dst op
9013 andi.b &0xc0,%d1 # is precision extended?
9019 fmov.l &0x0,%fpsr # clear FPSR
9023 fmov.l &0x0,%fpcr # clear FPCR
9025 fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
9029 andi.l &0x7fff,%d1 # strip sign
9030 andi.w &0x8000,%d2 # keep old sign
9032 addi.l &0x6000,%d1 # add bias
9033 andi.w &0x7fff,%d1
9037 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
9042 andi.b &0x30,%d1 # use only rnd mode
9051 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
9054 fmov.l &0x0,%fpsr # clear FPSR
9059 fmov.l &0x0,%fpcr # clear FPCR
9064 fcmp.b %fp1,&0x1 # is |result| > 1.b?
9075 fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
9078 andi.b &0xc0,%d1 # keep rnd prec
9079 ori.b &rz_mode*0x10,%d1 # insert RZ
9082 fmov.l &0x0,%fpsr # clear FPSR
9086 fmov.l &0x0,%fpcr # clear FPCR
9088 fcmp.b %fp1,&0x1 # is |result| < 1.b?
9170 fmov.s &0x80000000,%fp0 # load a -ZERO
9174 fmov.s &0x00000000,%fp0 # load a +ZERO
9190 fmov.s &0xff800000,%fp0 # make result -INF
9194 fmov.s &0x7f800000,%fp0 # make result +INF
9211 fmovm.x DST(%a1),&0x80 # return result in fp0
9218 fmovm.x DST(%a1),&0x80 # return result in fp0
9258 andi.b &0x30,%d0 # clear rnd prec
9259 ori.b &s_mode*0x10,%d0 # insert sgl precision
9264 andi.b &0x30,%d0 # clear rnd prec
9265 ori.b &d_mode*0x10,%d0 # insert dbl prec
9277 andi.b &0xc0,%d0 # is precision extended?
9288 eori.w &0x8000,%d0 # negate sign
9293 fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
9301 andi.b &0xc0,%d0 # is precision extended?
9309 eori.w &0x8000,%d0 # negate sign
9314 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
9322 # normalize the mantissa and add the bias of 0x6000 to the resulting negative
9329 addi.w &0x6000,%d0 # add new bias to exponent
9331 andi.w &0x8000,%d1 # keep old sign
9332 andi.w &0x7fff,%d0 # clear sign position
9335 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
9342 cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
9354 cmpi.l %d0,&0x3fff-0x3f80 # will move in underflow?
9356 cmpi.l %d0,&0x3fff-0x407e # will move in overflow?
9364 fmov.l &0x0,%fpsr # clear FPSR
9370 fmov.l &0x0,%fpcr # clear FPCR
9376 fmovm.x &0x80,FP_SCR0(%a6) # store out result
9379 andi.l &0x7fff,%d1 # strip sign
9381 andi.w &0x8000,%d2 # keep old sign
9385 fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
9397 cmpi.l %d0,&0x3fff-0x3c00 # will move in underflow?
9399 cmpi.l %d0,&0x3fff-0x43fe # will move in overflow?
9410 eori.b &0x80,FP_SCR0_EX(%a6) # negate sign
9417 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
9425 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
9439 andi.l &0x7fff,%d1 # strip sign
9440 andi.w &0x8000,%d2 # keep old sign
9442 addi.l &0x6000,%d1 # add new bias
9443 andi.w &0x7fff,%d1
9446 fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
9454 fmov.l &0x0,%fpsr # clear FPSR
9459 fmov.l &0x0,%fpcr # clear FPCR
9468 andi.b &0x13,%d1 # is OVFL or INEX enabled?
9481 fmovm.x (%a0),&0x80 # return default result in fp0
9493 andi.l &0x7fff,%d1 # strip sign
9494 andi.w &0x8000,%d2 # keep old sign
9496 subi.l &0x6000,%d1 # subtract bias
9497 andi.w &0x7fff,%d1
9500 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
9508 fmov.l &0x0,%fpsr # clear FPSR
9514 fmov.l &0x0,%fpcr # clear FPCR
9519 fcmp.b %fp1,&0x2 # is |result| >= 2.b?
9545 rol.l &0x8,%d0 # put ccodes in lo byte
9669 andi.b &0x30,%d0 # set prec = ext
9672 fmov.l &0x0,%fpsr # clear FPSR
9676 fmov.l &0x0,%fpcr # clear FPCR
9707 mov.b &0x80,FP_SCR0_HI(%a6) # force DENORM ==> small NORM
9718 fmov.s &0x00000000,%fp0 # return +ZERO in fp0
9722 fmov.s &0x80000000,%fp0 # return -ZERO in fp0
9730 fmovm.x SRC(%a0),&0x80 # return result in fp0
9775 fmov.l &0x0,%fpsr # clear FPSR
9809 mov.b &0x80,FP_SCR0_HI(%a6) # force DENORM ==> small NORM
9820 fmov.s &0x00000000,%fp0 # return +ZERO in fp0
9824 fmov.s &0x80000000,%fp0 # return -ZERO in fp0
9832 fmovm.x SRC(%a0),&0x80 # return result in fp0
9850 # scale_to_zero_src() - make exponent. = 0; get scale factor #
9881 andi.b &0x30,%d0 # clear rnd prec
9882 ori.b &s_mode*0x10,%d0 # insert sgl precision
9887 andi.b &0x30,%d0 # clear rnd prec
9888 ori.b &d_mode*0x10,%d0 # insert dbl precision
9900 andi.b &0xc0,%d0 # is precision extended?
9913 fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
9921 andi.b &0xc0,%d0 # is precision extended?
9932 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
9940 # normalize the mantissa and add the bias of 0x6000 to the resulting negative
9947 addi.w &0x6000,%d0 # add new bias to exponent
9949 andi.w &0x8000,%d1 # keep old sign
9950 andi.w &0x7fff,%d0 # clear sign position
9953 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
9960 cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
9972 cmpi.l %d0,&0x3fff-0x3f80 # will move in underflow?
9974 cmpi.l %d0,&0x3fff-0x407e # will move in overflow?
9982 fmov.l &0x0,%fpsr # clear FPSR
9988 fmov.l &0x0,%fpcr # clear FPCR
9994 fmovm.x &0x80,FP_SCR0(%a6) # store out result
9997 andi.l &0x7fff,%d1 # strip sign
9999 andi.w &0x8000,%d2 # keep old sign
10003 fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
10015 cmpi.l %d0,&0x3fff-0x3c00 # will move in underflow?
10017 cmpi.l %d0,&0x3fff-0x43fe # will move in overflow?
10028 bclr &0x7,FP_SCR0_EX(%a6) # force absolute value
10032 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
10040 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
10054 andi.l &0x7fff,%d1 # strip sign
10055 andi.w &0x8000,%d2 # keep old sign
10057 addi.l &0x6000,%d1 # add new bias
10058 andi.w &0x7fff,%d1
10061 fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
10069 fmov.l &0x0,%fpsr # clear FPSR
10074 fmov.l &0x0,%fpcr # clear FPCR
10083 andi.b &0x13,%d1 # is OVFL or INEX enabled?
10096 fmovm.x (%a0),&0x80 # return default result in fp0
10108 andi.l &0x7fff,%d1 # strip sign
10109 andi.w &0x8000,%d2 # keep old sign
10111 subi.l &0x6000,%d1 # subtract bias
10112 andi.w &0x7fff,%d1
10115 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
10123 fmov.l &0x0,%fpsr # clear FPSR
10129 fmov.l &0x0,%fpcr # clear FPCR
10134 fcmp.b %fp1,&0x2 # is |result| >= 2.b?
10191 lsl.b &0x3,%d1
10199 fmovm.x DST(%a1),&0x80 # load dst op
10204 rol.l &0x8,%d0 # extract ccode bits
10276 andi.b &0xf7,FPSR_CC(%a6)
10280 andi.b &0xf7,FPSR_CC(%a6)
10395 lsl.b &0x3,%d1
10416 cmpi.l %d0,&0x3fff-0x7ffe # would result ovfl?
10420 cmpi.l %d0,&0x3fff+0x0001 # would result unfl?
10425 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
10428 fmov.l &0x0,%fpsr # clear FPSR
10433 fmov.l &0x0,%fpcr # clear FPCR
10438 fmovm.x &0x80,FP_SCR0(%a6) # store out result
10442 andi.l &0x7fff,%d1 # strip sign
10443 andi.w &0x8000,%d2 # keep old sign
10448 fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
10452 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
10455 fmov.l &0x0,%fpsr # clear FPSR
10460 fmov.l &0x0,%fpcr # clear FPCR
10470 andi.b &0x13,%d1 # is OVFL or INEX enabled?
10477 andi.b &0x30,%d0 # force prec = ext
10480 fmovm.x (%a0),&0x80 # return default result in fp0
10484 fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
10489 andi.l &0x7fff,%d1 # strip sign
10491 subi.l &0x6000,%d1 # subtract bias
10492 andi.w &0x7fff,%d1
10493 andi.w &0x8000,%d2 # keep old sign
10497 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
10501 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
10504 fmov.l &0x0,%fpsr # clear FPSR
10509 fmov.l &0x0,%fpcr # clear FPCR
10514 fcmp.b %fp1,&0x2 # is |result| >= 2.b?
10523 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
10525 fmov.l &rz_mode*0x10,%fpcr # set FPCR
10526 fmov.l &0x0,%fpsr # clear FPSR
10531 fmov.l &0x0,%fpcr # clear FPCR
10536 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
10540 fmovm.x &0x80,FP_SCR0(%a6) # store out result
10546 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
10553 fmovm.x FP_SCR1(%a6),&0x40 # load dst op
10556 fmov.l &0x0,%fpsr # clear FPSR
10560 fmov.l &0x0,%fpcr # clear FPCR
10562 fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
10566 andi.l &0x7fff,%d1 # strip sign
10567 andi.w &0x8000,%d2 # keep old sign
10569 addi.l &0x6000,%d1 # add bias
10570 andi.w &0x7fff,%d1
10574 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
10578 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
10581 fmov.l &0x0,%fpsr # clear FPSR
10586 fmov.l &0x0,%fpcr # clear FPCR
10591 fcmp.b %fp1,&0x2 # is |result| > 2.b?
10602 fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
10605 andi.b &0xc0,%d1 # keep rnd prec
10606 ori.b &rz_mode*0x10,%d1 # insert RZ
10609 fmov.l &0x0,%fpsr # clear FPSR
10613 fmov.l &0x0,%fpcr # clear FPCR
10615 fcmp.b %fp1,&0x2 # is |result| < 2.b?
10736 lsl.b &0x3,%d1
10762 lsr.b &0x6,%d1
10764 cmpi.l %d0,&0x3fff-0x7ffe
10767 cmpi.l %d0,&0x3fff-0x0000 # will result underflow?
10772 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
10775 fmov.l &0x0,%fpsr # clear FPSR
10780 fmov.l &0x0,%fpcr # clear FPCR
10785 fmovm.x &0x80,FP_SCR0(%a6) # store result on stack
10789 andi.l &0x7fff,%d1 # strip sign
10790 andi.w &0x8000,%d2 # keep old sign
10795 fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
10799 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
10802 fmov.l &0x0,%fpsr # set FPSR
10807 fmov.l &0x0,%fpcr
10811 fmovm.x &0x01,-(%sp) # save result to stack
10813 add.l &0xc,%sp # clear result
10814 andi.l &0x7fff,%d1 # strip sign
10816 cmp.l %d1,&0x7fff # did divide overflow?
10823 andi.b &0x13,%d1 # is OVFL or INEX enabled?
10830 andi.b &0x30,%d0 # kill precision
10833 fmovm.x (%a0),&0x80 # return default result in fp0
10837 fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
10842 andi.l &0x7fff,%d1 # strip sign
10843 andi.w &0x8000,%d2 # keep old sign
10845 subi.l &0x6000,%d1 # subtract new bias
10846 andi.w &0x7fff,%d1 # clear ms bit
10850 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
10856 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
10858 fmov.l &rz_mode*0x10,%fpcr # set FPCR
10859 fmov.l &0x0,%fpsr # clear FPSR
10864 fmov.l &0x0,%fpcr # clear FPCR
10869 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
10873 fmovm.x &0x80,FP_SCR0(%a6) # store out result
10879 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
10886 fmovm.x FP_SCR1(%a6),&0x40 # load dst op
10889 fmov.l &0x0,%fpsr # clear FPSR
10893 fmov.l &0x0,%fpcr # clear FPCR
10895 fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
10899 andi.l &0x7fff,%d1 # strip sign
10900 andi.w &0x8000,%d2 # keep old sign
10902 addi.l &0x6000,%d1 # add bias
10903 andi.w &0x7fff,%d1 # clear top bit
10907 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
10914 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
10917 fmov.l &0x0,%fpsr # clear FPSR
10922 fmov.l &0x0,%fpcr # clear FPCR
10927 fcmp.b %fp1,&0x1 # is |result| > 1.b?
10938 fmovm.x FP_SCR1(%a6),&0x40 # load dst op into %fp1
10941 ori.b &rz_mode*0x10,%d1 # force RZ rnd mode
10944 fmov.l &0x0,%fpsr # clear FPSR
10948 fmov.l &0x0,%fpcr # clear FPCR
10950 fcmp.b %fp1,&0x1 # is |result| < 1.b?
11068 andi.b &0x30,%d0 # clear rnd prec
11069 ori.b &s_mode*0x10,%d0 # insert sgl prec
11074 andi.b &0x30,%d0 # clear rnd prec
11075 ori.b &d_mode*0x10,%d0 # insert dbl prec
11083 lsl.b &0x3,%d1
11095 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
11097 fmov.l &0x0,%fpsr # clear FPSR
11102 fmov.l &0x0,%fpcr # clear FPCR
11111 fmovm.x &0x01,-(%sp) # save result to stack
11114 lsr.b &0x6,%d1
11117 andi.l &0x7fff,%d2 # strip sign
11129 andi.w &0x8000,%d1 # keep sign
11133 fmovm.x (%sp)+,&0x80 # return result in fp0
11139 # fmov.s &0x00000000,%fp0 # return zero in fp0
11143 long 0x7fff # ext ovfl
11144 long 0x407f # sgl ovfl
11145 long 0x43ff # dbl ovfl
11148 long 0x0000 # ext unfl
11149 long 0x3f81 # sgl unfl
11150 long 0x3c01 # dbl unfl
11156 andi.b &0x13,%d1 # is OVFL or INEX enabled?
11159 add.l &0xc,%sp
11166 fmovm.x (%a0),&0x80 # return default result in fp0
11172 andi.b &0xc0,%d1 # is precision extended?
11177 andi.w &0x8000,%d1 # keep sign
11178 subi.l &0x6000,%d2 # add extra bias
11179 andi.w &0x7fff,%d2
11183 fmovm.x (%sp)+,&0x40 # return EXOP in fp1
11187 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
11190 andi.b &0x30,%d1 # keep rnd mode
11195 fmov.l &0x0,%fpcr # clear FPCR
11197 add.l &0xc,%sp
11198 fmovm.x &0x01,-(%sp)
11204 add.l &0xc,%sp
11206 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
11208 fmov.l &rz_mode*0x10,%fpcr # set FPCR
11209 fmov.l &0x0,%fpsr # clear FPSR
11213 fmov.l &0x0,%fpcr # clear FPCR
11219 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
11223 fmovm.x &0x80,FP_SCR0(%a6) # store out result
11229 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
11234 fmovm.x FP_SCR1(%a6),&0x40 # load dst op
11237 andi.b &0xc0,%d1 # is precision extended?
11243 fmov.l &0x0,%fpsr # clear FPSR
11247 fmov.l &0x0,%fpcr # clear FPCR
11249 fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
11252 andi.l &0x7fff,%d1 # strip sign
11253 andi.w &0x8000,%d2 # keep old sign
11255 addi.l &0x6000,%d1 # add new bias
11256 andi.w &0x7fff,%d1 # clear top bit
11259 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
11264 andi.b &0x30,%d1 # use only rnd mode
11276 andi.b &0xc0,%d1
11279 mov.l 0x4(%sp),%d1 # extract hi(man)
11280 cmpi.l %d1,&0x80000000 # is hi(man) = 0x80000000?
11283 tst.l 0x8(%sp) # is lo(man) = 0x0?
11292 # 0x8000000000000000 and this mantissa is the result of rounding non-zero
11299 fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
11302 andi.b &0xc0,%d1 # keep rnd prec
11303 ori.b &rz_mode*0x10,%d1 # insert rnd mode
11305 fmov.l &0x0,%fpsr # clear FPSR
11309 fmov.l &0x0,%fpcr # clear FPCR
11401 fmov.s &0x00000000,%fp0 # return +ZERO
11412 andi.b &0x30,%d1 # extract rnd mode
11413 cmpi.b %d1,&rm_mode*0x10 # is rnd mode == RM?
11415 fmov.s &0x00000000,%fp0 # return +ZERO
11420 fmov.s &0x80000000,%fp0 # return -ZERO
11465 fmovm.x SRC(%a0),&0x80 # return src INF
11475 fmovm.x DST(%a1),&0x80 # return dst INF
11521 andi.b &0x30,%d0 # clear rnd prec
11522 ori.b &s_mode*0x10,%d0 # insert sgl prec
11527 andi.b &0x30,%d0 # clear rnd prec
11528 ori.b &d_mode*0x10,%d0 # insert dbl prec
11536 lsl.b &0x3,%d1
11548 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
11550 fmov.l &0x0,%fpsr # clear FPSR
11555 fmov.l &0x0,%fpcr # clear FPCR
11564 fmovm.x &0x01,-(%sp) # save result to stack
11567 lsr.b &0x6,%d1
11570 andi.l &0x7fff,%d2 # strip sign
11582 andi.w &0x8000,%d1 # keep sign
11586 fmovm.x (%sp)+,&0x80 # return result in fp0
11592 # fmov.s &0x00000000,%fp0 # return zero in fp0
11596 long 0x7fff # ext ovfl
11597 long 0x407f # sgl ovfl
11598 long 0x43ff # dbl ovfl
11601 long 0x0000 # ext unfl
11602 long 0x3f81 # sgl unfl
11603 long 0x3c01 # dbl unfl
11609 andi.b &0x13,%d1 # is OVFL or INEX enabled?
11612 add.l &0xc,%sp
11619 fmovm.x (%a0),&0x80 # return default result in fp0
11625 andi.b &0xc0,%d1 # is precision extended?
11630 andi.w &0x8000,%d1 # keep sign
11631 subi.l &0x6000,%d2 # subtract new bias
11632 andi.w &0x7fff,%d2 # clear top bit
11636 fmovm.x (%sp)+,&0x40 # return EXOP in fp1
11640 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
11643 andi.b &0x30,%d1 # clear rnd prec
11648 fmov.l &0x0,%fpcr # clear FPCR
11650 add.l &0xc,%sp
11651 fmovm.x &0x01,-(%sp)
11657 add.l &0xc,%sp
11659 fmovm.x FP_SCR1(%a6),&0x80 # load dst op
11661 fmov.l &rz_mode*0x10,%fpcr # set FPCR
11662 fmov.l &0x0,%fpsr # clear FPSR
11666 fmov.l &0x0,%fpcr # clear FPCR
11672 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
11676 fmovm.x &0x80,FP_SCR0(%a6) # store out result
11682 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
11687 fmovm.x FP_SCR1(%a6),&0x40
11690 andi.b &0xc0,%d1 # is precision extended?
11696 fmov.l &0x0,%fpsr # clear FPSR
11700 fmov.l &0x0,%fpcr # clear FPCR
11702 fmovm.x &0x40,FP_SCR0(%a6) # store result to stack
11705 andi.l &0x7fff,%d1 # strip sign
11706 andi.w &0x8000,%d2 # keep old sign
11708 addi.l &0x6000,%d1 # subtract new bias
11709 andi.w &0x7fff,%d1 # clear top bit
11712 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
11717 andi.b &0x30,%d1 # clear rnd prec
11729 andi.b &0xc0,%d1 # fetch rnd prec
11732 mov.l 0x4(%sp),%d1
11733 cmpi.l %d1,&0x80000000 # is hi(man) = 0x80000000?
11736 tst.l 0x8(%sp) # is lo(man) = 0x0?
11745 # 0x8000000000000000 and this mantissa is the result of rounding non-zero
11752 fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
11755 andi.b &0xc0,%d1 # keep rnd prec
11756 ori.b &rz_mode*0x10,%d1 # insert rnd mode
11758 fmov.l &0x0,%fpsr # clear FPSR
11762 fmov.l &0x0,%fpcr # clear FPCR
11853 fmov.s &0x00000000,%fp0 # no; return +ZERO
11864 andi.b &0x30,%d1 # extract rnd mode
11865 cmpi.b %d1,&rm_mode*0x10 # is rnd mode = RM?
11867 fmov.s &0x00000000,%fp0 # no; return +ZERO
11872 fmov.s &0x80000000,%fp0 # return -ZERO
11914 fmovm.x SRC(%a0),&0x80 # return src INF
11921 fmovm.x DST(%a1),&0x80 # return dst INF
11966 andi.b &0x30,%d0 # clear rnd prec
11967 ori.b &s_mode*0x10,%d0 # insert sgl precision
11972 andi.b &0x30,%d0 # clear rnd prec
11973 ori.b &d_mode*0x10,%d0 # insert dbl precision
11989 andi.b &0xc0,%d0 # is precision extended?
11993 fmov.l &0x0,%fpsr # clear FPSR
12006 andi.b &0xc0,%d0 # is precision extended?
12021 cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
12034 cmpi.l %d0,&0x3fff-0x3f81 # will move in underflow?
12037 cmpi.l %d0,&0x3fff-0x407f # will move in overflow?
12045 fmov.l &0x0,%fpsr # clear FPSR
12051 fmov.l &0x0,%fpcr # clear FPCR
12057 fmovm.x &0x80,FP_SCR0(%a6) # store out result
12060 andi.l &0x7fff,%d1 # strip sign
12062 andi.w &0x8000,%d2 # keep old sign
12066 fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
12079 cmpi.l %d0,&0x3fff-0x3c01 # will move in underflow?
12082 cmpi.l %d0,&0x3fff-0x43ff # will move in overflow?
12091 btst &0x0,1+FP_SCR0_EX(%a6) # is exponent 0x3fff?
12100 fmov.l &rz_mode*0x10,%fpcr # set FPCR
12101 fmov.l &0x0,%fpsr # clear FPSR
12106 fmov.l &0x0,%fpcr # clear FPCR
12112 andi.b &0x0b,%d1 # is UNFL or INEX enabled?
12116 fmovm.x &0x80,FP_SCR0(%a6) # store out result
12122 fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
12136 andi.l &0x7fff,%d1 # strip sign
12137 andi.w &0x8000,%d2 # keep old sign
12139 addi.l &0x6000,%d1 # add new bias
12140 andi.w &0x7fff,%d1
12143 fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
12151 fmov.l &0x0,%fpsr # clear FPSR
12156 fmov.l &0x0,%fpcr # clear FPCR
12165 andi.b &0x13,%d1 # is OVFL or INEX enabled?
12178 fmovm.x (%a0),&0x80 # return default result in fp0
12190 andi.l &0x7fff,%d1 # strip sign
12191 andi.w &0x8000,%d2 # keep old sign
12193 subi.l &0x6000,%d1 # subtract bias
12194 andi.w &0x7fff,%d1
12197 fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
12205 btst &0x0,1+FP_SCR0_EX(%a6) # is exponent 0x3fff?
12208 fmov.l &0x0,%fpsr # clear FPSR
12214 fmov.l &0x0,%fpcr # clear FPCR
12219 fcmp.b %fp1,&0x1 # is |result| >= 1.b?
12242 # fsqrt(+0) = +0
12243 # fsqrt(-0) = -0
12251 fmov.s &0x00000000,%fp0 # return +ZERO
12255 fmov.s &0x80000000,%fp0 # return -ZERO
12263 fmovm.x SRC(%a0),&0x80 # return +INF in fp0
12313 mov.l EXC_DREGS+0x0(%a6),%d0
12316 mov.l EXC_DREGS+0x4(%a6),%d0
12337 mov.l EXC_DREGS+0x8(%a6),%d0
12340 mov.l EXC_DREGS+0xc(%a6),%d0
12398 mov.l %d0,EXC_DREGS+0x0(%a6)
12401 mov.l %d0,EXC_DREGS+0x4(%a6)
12459 mov.w %d0,2+EXC_DREGS+0x0(%a6)
12462 mov.w %d0,2+EXC_DREGS+0x4(%a6)
12520 mov.b %d0,3+EXC_DREGS+0x0(%a6)
12523 mov.b %d0,3+EXC_DREGS+0x4(%a6)
12585 iareg0: add.l %d0,EXC_DREGS+0x8(%a6)
12587 iareg1: add.l %d0,EXC_DREGS+0xc(%a6)
12600 cmpi.b %d0,&0x1
12605 addq.l &0x2,EXC_A7(%a6)
12649 dareg0: sub.l %d0,EXC_DREGS+0x8(%a6)
12651 dareg1: sub.l %d0,EXC_DREGS+0xc(%a6)
12664 cmpi.b %d0,&0x1
12669 subq.l &0x2,EXC_A7(%a6)
12709 mov.l 0+EXC_FP0(%a6), 0+FP_SRC(%a6)
12715 mov.l 0+EXC_FP1(%a6), 0+FP_SRC(%a6)
12721 fmovm.x &0x20, FP_SRC(%a6)
12725 fmovm.x &0x10, FP_SRC(%a6)
12729 fmovm.x &0x08, FP_SRC(%a6)
12733 fmovm.x &0x04, FP_SRC(%a6)
12737 fmovm.x &0x02, FP_SRC(%a6)
12741 fmovm.x &0x01, FP_SRC(%a6)
12782 mov.l 0+EXC_FP0(%a6), 0+FP_DST(%a6)
12788 mov.l 0+EXC_FP1(%a6), 0+FP_DST(%a6)
12794 fmovm.x &0x20, FP_DST(%a6)
12798 fmovm.x &0x10, FP_DST(%a6)
12802 fmovm.x &0x08, FP_DST(%a6)
12806 fmovm.x &0x04, FP_DST(%a6)
12810 fmovm.x &0x02, FP_DST(%a6)
12814 fmovm.x &0x01, FP_DST(%a6)
12857 fmovm.x &0x80, EXC_FP0(%a6)
12860 fmovm.x &0x80, EXC_FP1(%a6)
12863 fmovm.x &0x01, -(%sp)
12864 fmovm.x (%sp)+, &0x20
12867 fmovm.x &0x01, -(%sp)
12868 fmovm.x (%sp)+, &0x10
12871 fmovm.x &0x01, -(%sp)
12872 fmovm.x (%sp)+, &0x08
12875 fmovm.x &0x01, -(%sp)
12876 fmovm.x (%sp)+, &0x04
12879 fmovm.x &0x01, -(%sp)
12880 fmovm.x (%sp)+, &0x02
12883 fmovm.x &0x01, -(%sp)
12884 fmovm.x (%sp)+, &0x01
12920 mov.l &0xc,%d0 # packed is 12 bytes
12924 mov.l &0xc,%d0 # pass: 12 bytes
12932 cmpi.w %d0,&0x7fff # INF or NAN?
12940 andi.b &0x0f,%d0 # clear all but last nybble
12950 fmovm.x &0x80,FP_SRC(%a6) # make this the srcop
12984 # added if SM = 1 and subtracted if SM = 0. Scale the #
12987 # SM = 0 a non-zero digit in the integer position #
13019 byte 0,0,0,0
13025 set FSTRT,0
13032 mov.l 0x0(%a0),FP_SCR0_EX(%a6) # make a copy of input
13033 mov.l 0x4(%a0),FP_SCR0_HI(%a6) # so we don't alter it
13034 mov.l 0x8(%a0),FP_SCR0_LO(%a6)
13038 movm.l &0x3c00,-(%sp) # save d2-d5
13039 fmovm.x &0x1,-(%sp) # save fp1
13067 mulu.l &0xa,%d1 # mul partial product by one digit place
13079 or.l &0x40000000,%d4 # set SE in d4,
13080 or.l &0x40000000,(%a0) # and in working bcd
13106 fmov.s &0x00000000,%fp0 # accumulator
13124 fmul.s &0x41200000,%fp0 # fp0 = fp0 * 10
13129 # If all the digits (8) in that long word have been converted (d2=0),
13130 # then inc d1 (=2) to point to the next long word and reset d3 to 0
13230 or.l &0x40000000,%d4 # and set SE in d4
13231 or.l &0x40000000,(%a0) # and in memory
13239 fmov.s &0x3f800000,%fp1 # init fp1 to 1
13278 and.l &0xbfffffff,%d4 # and clr SE in d4
13279 and.l &0xbfffffff,(%a0) # and in memory
13287 fmov.s &0x3f800000,%fp1 # init fp1 to 1
13347 bfextu %d4{&0:&2},%d0 # {FPCR[6],FPCR[5],SM,SE}
13369 or.l &0x40000000,(%a0) # and set SE bit
13372 fmov.s &0x3f800000,%fp1 # init fp1 to 1
13414 add.l &0x4,%sp # clear 1 lw param
13415 fmovm.x (%sp)+,&0x40 # restore fp1
13416 movm.l (%sp)+,&0x3c # restore d2-d5
13417 fmov.l &0x0,%fpcr
13418 fmov.l &0x0,%fpsr
13445 # approximated by adding e + 0.f when the original #
13452 # A5. Set ICTR = 0; #
13527 long 0x3FFD0000,0x9A209A84,0xFBCFF798,0x00000000
13529 long 0x3FFD0000,0x9A209A84,0xFBCFF799,0x00000000
13533 long 0x3F800000,0x00000000,0x00000000,0x00000000
13535 long 0x40000000,0x00000000,0x00000000,0x00000000
13537 long 0x41200000,0x00000000,0x00000000,0x00000000
13539 long 0x459A2800,0x00000000,0x00000000,0x00000000
13542 byte 0,0,0,0
13572 movm.l &0x3f20,-(%sp) # {%d2-%d7/%a2}
13573 fmovm.x &0x7,-(%sp) # {%fp0-%fp2}
13582 fmov.l &rm_mode*0x10,%fpcr # set RM and ext
13595 and.w &0x7fff,%d0 # strip sign of normalized exp
13611 and.w &0x7fff,%d0 # strip sign of normalized exp
13622 and.l &0x7fffffff,FP_SCR1(%a6) # create abs(X)
13626 # imated by adding e + 0.f when the original value is viewed
13656 mov.w &0x3fff,FP_SCR1(%a6) # replace exponent with 0x3fff
13658 sub.w &0x3fff,%d0 # strip off bias
13674 fmov.l &0,%fpsr # zero all of fpsr - nothing needed
13677 # A5. Set ICTR = 0;
13714 ble.b k_neg # if k <= 0, LEN = ILOG + 1 - k
13715 mov.l %d7,%d4 # if k > 0, LEN = k
13747 # RN 00 0 0 00/0 RN
13748 # RN 00 0 1 00/0 RN
13749 # RN 00 1 0 00/0 RN
13750 # RN 00 1 1 00/0 RN
13751 # RZ 01 0 0 11/3 RP
13752 # RZ 01 0 1 11/3 RP
13753 # RZ 01 1 0 10/2 RM
13755 # RM 10 0 0 11/3 RP
13756 # RM 10 0 1 10/2 RM
13757 # RM 10 1 0 10/2 RM
13759 # RP 11 0 0 10/2 RM
13760 # RP 11 0 1 11/3 RP
13761 # RP 11 1 0 11/3 RP
13766 # d0: exponent/scratch - final is 0
13767 # d2: x/0 or 24 for A9
13770 # d5: 0/ICTR:LAMBDA
13771 # d6: ILOG/ILOG or k if ((k<=0)&(ILOG<k))
13786 bgt.b k_pos # if pos and > 0, skip this
13789 mov.l %d7,%d6 # if ((k<0) & (ILOG < k)) ILOG = k
13800 cmp.l %d0,&0xffffecd4 # test iscale <= -4908
13813 bge.b x_pos # if pos, don't set bit 0
13814 addq.l &1,%d1 # if neg, set bit 0
13852 fmov.l &0,%fpsr # clr INEX
13853 fmov.l &rz_mode*0x10,%fpcr # set RZ rounding mode
13870 # d2: 0 or 24/unchanged
13905 fmovm.x &0x2,-(%sp) # save 10^ISCALE to stack
13908 andi.w &0x7fff,%d3 # clear sign
13909 ori.w &0x8000,(%a0) # make DENORM exp negative
13911 subi.w &0x3fff,%d3 # subtract BIAS
13913 subi.w &0x3fff,%d3 # subtract BIAS
13915 subi.w &0x3fff,%d3 # subtract BIAS
13919 andi.w &0x8000,(%sp) # keep sign
13921 andi.w &0x7fff,(%a0) # clear sign bit on DENORM again
13922 mov.l 0x8(%a0),-(%sp) # put input op mantissa on stk
13923 mov.l 0x4(%a0),-(%sp)
13924 mov.l &0x3fff0000,-(%sp) # force exp to zero
13925 fmovm.x (%sp)+,&0x80 # load normalized DENORM into fp0
13932 mov.l &0x3fff0000,-(%sp) # force exp to zero
13935 mov.l &0x3fff0000,-(%sp)# force exp to zero
13989 and.l &0x00000030,USER_FPCR(%a6) # set size to ext,
14018 movm.l &0xc0c0,-(%sp) # save regs used by sintd0 {%d0-%d1/%a0-%a1}
14026 or.l &0x80000000,(%a0) # if neg, use -Y
14032 fmov.l &0x0,%fpsr # clear the AEXC bits!!!
14034 ## andi.l &0x00000030,%d0
14039 ## fmov.l &0x0,%fpcr
14048 movm.l (%sp)+,&0x303 # restore regs used by sint {%d0-%d1/%a0-%a1}
14066 # d0: FPCR with size set to ext/scratch final = 0
14086 tst.w %d5 # check if ICTR = 0
14120 fmov.l &rm_mode*0x10,%fpcr # set rmode to RM
14134 fmov.l &rm_mode*0x10,%fpcr # set rmode to RM
14137 # Since ICTR <> 0, we have already been through one adjustment,
14173 # d0: x/LEN call to binstr - final is 0
14174 # d1: x/0
14194 fmov.l &rz_mode*0x10,%fpcr # force rz for conversion
14205 sub.l &0x3ffd,%d0 # sub bias less 2 to make fract
14210 lsr.l &1,%d2 # shift d2:d3 right, add 0s
14220 add.l &0x00000080,%d3 # inc at bit 7
14222 and.l &0xffffff80,%d3 # strip off lsb not used by 882
14235 # 32 16 15 0
14237 # | 0 | e3 | e2 | e1 | e4 | X | X | X |
14246 # d0: x/LEN call to binstr - final is 0
14247 # d1: x/scratch (0);shift count for final exponent packing
14297 sub.w &0x3ffd,%d0 # subtract off bias
14305 add.l &0x00000080,%d3 # inc at bit 6
14307 and.l &0xffffff80,%d3 # strip off lsb not used by 882
14347 and.b &0x0f,FP_SCR0(%a6) # clear first nibble of FP_SCR0
14354 addq.l &1,%d0 # set bit 0 in d0 for SE
14356 bfins %d0,FP_SCR0(%a6){&0:&2} # insert SM and SE into FP_SCR0
14360 fmov.l &0,%fpsr # clear possible inex2/ainex bits
14361 fmovm.x (%sp)+,&0xe0 # {%fp0-%fp2}
14362 movm.l (%sp)+,&0x4fc # {%d2-%d7/%a2}
14367 long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
14368 long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
14369 long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
14370 long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
14371 long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
14372 long 0x40690000,0x9DC5ADA8,0x2B70B59E # 10 ^ 32
14373 long 0x40D30000,0xC2781F49,0xFFCFA6D5 # 10 ^ 64
14374 long 0x41A80000,0x93BA47C9,0x80E98CE0 # 10 ^ 128
14375 long 0x43510000,0xAA7EEBFB,0x9DF9DE8E # 10 ^ 256
14376 long 0x46A30000,0xE319A0AE,0xA60E91C7 # 10 ^ 512
14377 long 0x4D480000,0xC9767586,0x81750C17 # 10 ^ 1024
14378 long 0x5A920000,0x9E8B3B5D,0xC53D5DE5 # 10 ^ 2048
14379 long 0x75250000,0xC4605202,0x8A20979B # 10 ^ 4096
14383 long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
14384 long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
14385 long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
14386 long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
14387 long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
14388 long 0x40690000,0x9DC5ADA8,0x2B70B59E # 10 ^ 32
14389 long 0x40D30000,0xC2781F49,0xFFCFA6D6 # 10 ^ 64
14390 long 0x41A80000,0x93BA47C9,0x80E98CE0 # 10 ^ 128
14391 long 0x43510000,0xAA7EEBFB,0x9DF9DE8E # 10 ^ 256
14392 long 0x46A30000,0xE319A0AE,0xA60E91C7 # 10 ^ 512
14393 long 0x4D480000,0xC9767586,0x81750C18 # 10 ^ 1024
14394 long 0x5A920000,0x9E8B3B5D,0xC53D5DE5 # 10 ^ 2048
14395 long 0x75250000,0xC4605202,0x8A20979B # 10 ^ 4096
14399 long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
14400 long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
14401 long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
14402 long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
14403 long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
14404 long 0x40690000,0x9DC5ADA8,0x2B70B59D # 10 ^ 32
14405 long 0x40D30000,0xC2781F49,0xFFCFA6D5 # 10 ^ 64
14406 long 0x41A80000,0x93BA47C9,0x80E98CDF # 10 ^ 128
14407 long 0x43510000,0xAA7EEBFB,0x9DF9DE8D # 10 ^ 256
14408 long 0x46A30000,0xE319A0AE,0xA60E91C6 # 10 ^ 512
14409 long 0x4D480000,0xC9767586,0x81750C17 # 10 ^ 1024
14410 long 0x5A920000,0x9E8B3B5D,0xC53D5DE4 # 10 ^ 2048
14411 long 0x75250000,0xC4605202,0x8A20979A # 10 ^ 4096
14435 # to force the first byte formed to have a 0 in the upper 4 bits. #
14469 # d7: byte digit formation word;digit count {0,1}
14475 movm.l &0xff00,-(%sp) # {%d0-%d7}
14491 bfextu %d2{&0:&3},%d1 # copy 3 msbs of d2 into d1
14493 bfextu %d3{&0:&3},%d6 # copy 3 msbs of d3 into d6
14501 swap %d6 # put 0 in d6 lower word
14511 swap %d6 # with d6 = 0; put 0 in upper word
14539 movm.l (%sp)+,&0xff # {%d0-%d7}
14576 movq.l &0x1,%d0 # one byte
14579 mov.w &0x0121,EXC_VOFF(%a6) # set FSLW
14583 movq.l &0x2,%d0 # two bytes
14586 mov.w &0x0141,EXC_VOFF(%a6) # set FSLW
14590 movq.l &0x4,%d0 # four bytes
14593 mov.w &0x0101,EXC_VOFF(%a6) # set FSLW
14597 movq.l &0x8,%d0 # eight bytes
14600 mov.w &0x0161,EXC_VOFF(%a6) # set FSLW
14604 movq.l &0xc,%d0 # twelve bytes
14607 mov.w &0x0161,EXC_VOFF(%a6) # set FSLW
14613 movq.l &0x1,%d0 # one byte
14616 mov.w &0x00a1,EXC_VOFF(%a6) # set FSLW
14620 movq.l &0x2,%d0 # two bytes
14623 mov.w &0x00c1,EXC_VOFF(%a6) # set FSLW
14627 movq.l &0x4,%d0 # four bytes
14630 mov.w &0x0081,EXC_VOFF(%a6) # set FSLW
14634 movq.l &0x8,%d0 # eight bytes
14637 mov.w &0x00e1,EXC_VOFF(%a6) # set FSLW
14641 mov.l &0xc,%d0 # twelve bytes
14644 mov.w &0x00e1,EXC_VOFF(%a6) # set FSLW
14651 fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
14653 movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
14658 mov.l 0x8(%sp),0x4(%sp) # store lo(PC)
14659 mov.l 0xc(%sp),0x8(%sp) # store EA
14660 mov.l &0x00000001,0xc(%sp) # store FSLW
14661 mov.w 0x6(%sp),0xc(%sp) # fix FSLW (size)
14662 mov.w &0x4008,0x6(%sp) # store voff
14664 btst &0x5,(%sp) # supervisor or user mode?
14666 bset &0x2,0xd(%sp) # set supervisor TM bit
14679 mov.b EXC_OPWORD+0x1(%a6),%d1
14680 andi.b &0x38,%d1 # extract opmode
14681 cmpi.b %d1,&0x18 # postinc?
14683 cmpi.b %d1,&0x20 # predec?
14688 mov.b EXC_OPWORD+0x1(%a6),%d1
14689 andi.w &0x0007,%d1 # fetch An
14705 sub.l %d0,EXC_DREGS+0x8(%a6) # fix stacked a0
14708 sub.l %d0,EXC_DREGS+0xc(%a6) # fix stacked a1
14731 cmpi.b EXC_VOFF(%a6),&0x30 # move in or out?
14734 btst &0x5,EXC_SR(%a6) # user or supervisor?