1// cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova. 2// 3// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. 4// Portions Copyright © 1995-1997 C H Forsyth ([email protected]) 5// Portions Copyright © 1997-1999 Vita Nuova Limited 6// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com) 7// Portions Copyright © 2004,2006 Bruce Ellis 8// Portions Copyright © 2005-2007 C H Forsyth ([email protected]) 9// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others 10// Portions Copyright © 2009 The Go Authors. All rights reserved. 11// 12// Permission is hereby granted, free of charge, to any person obtaining a copy 13// of this software and associated documentation files (the "Software"), to deal 14// in the Software without restriction, including without limitation the rights 15// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 16// copies of the Software, and to permit persons to whom the Software is 17// furnished to do so, subject to the following conditions: 18// 19// The above copyright notice and this permission notice shall be included in 20// all copies or substantial portions of the Software. 21// 22// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 25// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 26// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 27// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 28// THE SOFTWARE. 29 30package ppc64 31 32import ( 33 "cmd/internal/obj" 34 "cmd/internal/objabi" 35 "encoding/binary" 36 "fmt" 37 "internal/buildcfg" 38 "log" 39 "math" 40 "math/bits" 41 "sort" 42) 43 44// ctxt9 holds state while assembling a single function. 45// Each function gets a fresh ctxt9. 46// This allows for multiple functions to be safely concurrently assembled. 47type ctxt9 struct { 48 ctxt *obj.Link 49 newprog obj.ProgAlloc 50 cursym *obj.LSym 51 autosize int32 52 instoffset int64 53 pc int64 54} 55 56// Instruction layout. 57 58const ( 59 r0iszero = 1 60) 61 62const ( 63 // R bit option in prefixed load/store/add D-form operations 64 PFX_R_ABS = 0 // Offset is absolute 65 PFX_R_PCREL = 1 // Offset is relative to PC, RA should be 0 66) 67 68const ( 69 // The preferred hardware nop instruction. 70 NOP = 0x60000000 71) 72 73type Optab struct { 74 as obj.As // Opcode 75 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog. 76 a2 uint8 // p.Reg argument (int16 Register) 77 a3 uint8 // p.RestArgs[0] (obj.AddrPos) 78 a4 uint8 // p.RestArgs[1] 79 a5 uint8 // p.RestARgs[2] 80 a6 uint8 // p.To (obj.Addr) 81 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r 82 size int8 // Text space in bytes to lay operation 83 84 // A prefixed instruction is generated by this opcode. This cannot be placed 85 // across a 64B PC address. Opcodes should not translate to more than one 86 // prefixed instruction. The prefixed instruction should be written first 87 // (e.g when Optab.size > 8). 88 ispfx bool 89 90 asmout func(*ctxt9, *obj.Prog, *Optab, *[5]uint32) 91} 92 93// optab contains an array to be sliced of accepted operand combinations for an 94// instruction. Unused arguments and fields are not explicitly enumerated, and 95// should not be listed for clarity. Unused arguments and values should always 96// assume the default value for the given type. 97// 98// optab does not list every valid ppc64 opcode, it enumerates representative 99// operand combinations for a class of instruction. The variable oprange indexes 100// all valid ppc64 opcodes. 101// 102// oprange is initialized to point a slice within optab which contains the valid 103// operand combinations for a given instruction. This is initialized from buildop. 104// 105// Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface 106// to arrange entries to minimize text size of each opcode. 107// 108// optab is the sorted result of combining optabBase, optabGen, and prefixableOptab. 109var optab []Optab 110 111var optabBase = []Optab{ 112 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0}, 113 {as: obj.ATEXT, a1: C_LOREG, a3: C_32CON, a6: C_TEXTSIZE, type_: 0, size: 0}, 114 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0}, 115 {as: obj.ATEXT, a1: C_ADDR, a3: C_32CON, a6: C_TEXTSIZE, type_: 0, size: 0}, 116 /* move register */ 117 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, 118 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4}, 119 {as: AADD, a1: C_S16CON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, 120 {as: AADD, a1: C_S16CON, a6: C_REG, type_: 4, size: 4}, 121 {as: AADD, a1: C_U16CON, a2: C_REG, a6: C_REG, type_: 22, size: 8}, 122 {as: AADD, a1: C_U16CON, a6: C_REG, type_: 22, size: 8}, 123 {as: AADDIS, a1: C_S16CON, a2: C_REG, a6: C_REG, type_: 20, size: 4}, 124 {as: AADDIS, a1: C_S16CON, a6: C_REG, type_: 20, size: 4}, 125 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, 126 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4}, 127 {as: AADDC, a1: C_S16CON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, 128 {as: AADDC, a1: C_S16CON, a6: C_REG, type_: 4, size: 4}, 129 {as: AADDC, a1: C_32CON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, 130 {as: AADDC, a1: C_32CON, a6: C_REG, type_: 22, size: 12}, 131 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */ 132 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 133 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, 134 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 135 {as: AANDCC, a1: C_U16CON, a6: C_REG, type_: 58, size: 4}, 136 {as: AANDCC, a1: C_U16CON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, 137 {as: AANDCC, a1: C_S16CON, a6: C_REG, type_: 23, size: 8}, 138 {as: AANDCC, a1: C_S16CON, a2: C_REG, a6: C_REG, type_: 23, size: 8}, 139 {as: AANDCC, a1: C_32CON, a6: C_REG, type_: 23, size: 12}, 140 {as: AANDCC, a1: C_32CON, a2: C_REG, a6: C_REG, type_: 23, size: 12}, 141 {as: AANDISCC, a1: C_U16CON, a6: C_REG, type_: 58, size: 4}, 142 {as: AANDISCC, a1: C_U16CON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, 143 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, 144 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4}, 145 {as: AMULLW, a1: C_S16CON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, 146 {as: AMULLW, a1: C_S16CON, a6: C_REG, type_: 4, size: 4}, 147 {as: AMULLW, a1: C_32CON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, 148 {as: AMULLW, a1: C_32CON, a6: C_REG, type_: 22, size: 12}, 149 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, 150 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4}, 151 {as: ASUBC, a1: C_REG, a3: C_S16CON, a6: C_REG, type_: 27, size: 4}, 152 {as: ASUBC, a1: C_REG, a3: C_32CON, a6: C_REG, type_: 28, size: 12}, 153 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */ 154 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 155 {as: AOR, a1: C_U16CON, a6: C_REG, type_: 58, size: 4}, 156 {as: AOR, a1: C_U16CON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, 157 {as: AOR, a1: C_S16CON, a6: C_REG, type_: 23, size: 8}, 158 {as: AOR, a1: C_S16CON, a2: C_REG, a6: C_REG, type_: 23, size: 8}, 159 {as: AOR, a1: C_U32CON, a2: C_REG, a6: C_REG, type_: 21, size: 8}, 160 {as: AOR, a1: C_U32CON, a6: C_REG, type_: 21, size: 8}, 161 {as: AOR, a1: C_32CON, a6: C_REG, type_: 23, size: 12}, 162 {as: AOR, a1: C_32CON, a2: C_REG, a6: C_REG, type_: 23, size: 12}, 163 {as: AORIS, a1: C_U16CON, a6: C_REG, type_: 58, size: 4}, 164 {as: AORIS, a1: C_U16CON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, 165 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */ 166 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4}, 167 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */ 168 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4}, 169 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 170 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, 171 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 172 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, 173 {as: ASLD, a1: C_U15CON, a2: C_REG, a6: C_REG, type_: 25, size: 4}, 174 {as: ASLD, a1: C_U15CON, a6: C_REG, type_: 25, size: 4}, 175 {as: AEXTSWSLI, a1: C_U15CON, a6: C_REG, type_: 25, size: 4}, 176 {as: AEXTSWSLI, a1: C_U15CON, a2: C_REG, a6: C_REG, type_: 25, size: 4}, 177 {as: ASLW, a1: C_U15CON, a2: C_REG, a6: C_REG, type_: 57, size: 4}, 178 {as: ASLW, a1: C_U15CON, a6: C_REG, type_: 57, size: 4}, 179 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 180 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, 181 {as: ASRAW, a1: C_U15CON, a2: C_REG, a6: C_REG, type_: 56, size: 4}, 182 {as: ASRAW, a1: C_U15CON, a6: C_REG, type_: 56, size: 4}, 183 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 184 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, 185 {as: ASRAD, a1: C_U15CON, a2: C_REG, a6: C_REG, type_: 56, size: 4}, 186 {as: ASRAD, a1: C_U15CON, a6: C_REG, type_: 56, size: 4}, 187 {as: ARLWNM, a1: C_U15CON, a2: C_REG, a3: C_32CON, a6: C_REG, type_: 63, size: 4}, 188 {as: ARLWNM, a1: C_U15CON, a2: C_REG, a3: C_U15CON, a4: C_U15CON, a6: C_REG, type_: 63, size: 4}, 189 {as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_32CON, a6: C_REG, type_: 63, size: 4}, 190 {as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_U15CON, a4: C_U15CON, a6: C_REG, type_: 63, size: 4}, 191 {as: ACLRLSLWI, a1: C_U15CON, a2: C_REG, a3: C_32CON, a6: C_REG, type_: 62, size: 4}, 192 {as: ARLDMI, a1: C_U15CON, a2: C_REG, a3: C_32CON, a6: C_REG, type_: 30, size: 4}, 193 {as: ARLDC, a1: C_U15CON, a2: C_REG, a3: C_32CON, a6: C_REG, type_: 29, size: 4}, 194 {as: ARLDC, a1: C_REG, a3: C_U8CON, a4: C_U8CON, a6: C_REG, type_: 9, size: 4}, 195 {as: ARLDCL, a1: C_U15CON, a2: C_REG, a3: C_32CON, a6: C_REG, type_: 29, size: 4}, 196 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_32CON, a6: C_REG, type_: 14, size: 4}, 197 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_32CON, a6: C_REG, type_: 14, size: 4}, 198 {as: ARLDICL, a1: C_U15CON, a2: C_REG, a3: C_32CON, a6: C_REG, type_: 14, size: 4}, 199 {as: ARLDCL, a1: C_REG, a3: C_32CON, a6: C_REG, type_: 14, size: 4}, 200 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4}, 201 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4}, 202 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4}, 203 {as: AFABS, a6: C_FREG, type_: 33, size: 4}, 204 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4}, 205 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4}, 206 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4}, 207 208 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, 209 {as: AMOVBU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, 210 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8}, 211 {as: AMOVBU, a1: C_XOREG, a6: C_REG, type_: 109, size: 8}, 212 213 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, 214 {as: AMOVBZU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, 215 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, 216 {as: AMOVBZU, a1: C_XOREG, a6: C_REG, type_: 109, size: 4}, 217 218 {as: AMOVHBR, a1: C_REG, a6: C_XOREG, type_: 44, size: 4}, 219 {as: AMOVHBR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4}, 220 221 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8}, 222 {as: AMOVB, a1: C_XOREG, a6: C_REG, type_: 109, size: 8}, 223 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, 224 {as: AMOVB, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, 225 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4}, 226 227 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, 228 {as: AMOVBZ, a1: C_XOREG, a6: C_REG, type_: 109, size: 4}, 229 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, 230 {as: AMOVBZ, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, 231 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4}, 232 233 {as: AMOVD, a1: C_16CON, a6: C_REG, type_: 3, size: 4}, 234 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4}, 235 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, 236 {as: AMOVD, a1: C_XOREG, a6: C_REG, type_: 109, size: 4}, 237 {as: AMOVD, a1: C_SOREG, a6: C_SPR, type_: 107, size: 8}, 238 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4}, 239 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, 240 {as: AMOVD, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, 241 {as: AMOVD, a1: C_SPR, a6: C_SOREG, type_: 106, size: 8}, 242 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4}, 243 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4}, 244 245 {as: AMOVW, a1: C_16CON, a6: C_REG, type_: 3, size: 4}, 246 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4}, 247 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4}, 248 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, 249 {as: AMOVW, a1: C_XOREG, a6: C_REG, type_: 109, size: 4}, 250 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4}, 251 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4}, 252 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, 253 {as: AMOVW, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, 254 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4}, 255 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4}, 256 257 {as: AFMOVD, a1: C_S16CON, a6: C_FREG, type_: 24, size: 8}, 258 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4}, 259 {as: AFMOVD, a1: C_XOREG, a6: C_FREG, type_: 109, size: 4}, 260 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4}, 261 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4}, 262 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4}, 263 {as: AFMOVD, a1: C_FREG, a6: C_XOREG, type_: 108, size: 4}, 264 265 {as: AFMOVSX, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4}, 266 {as: AFMOVSX, a1: C_FREG, a6: C_XOREG, type_: 44, size: 4}, 267 268 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4}, 269 {as: AFMOVSZ, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4}, 270 271 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4}, 272 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4}, 273 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4}, 274 {as: AMOVFL, a1: C_FREG, a3: C_32CON, a6: C_FPSCR, type_: 64, size: 4}, 275 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4}, 276 {as: AMOVFL, a1: C_32CON, a6: C_FPSCR, type_: 65, size: 4}, 277 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4}, 278 {as: AMOVFL, a1: C_REG, a6: C_32CON, type_: 69, size: 4}, 279 280 {as: ASYSCALL, type_: 5, size: 4}, 281 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12}, 282 {as: ASYSCALL, a1: C_U15CON, type_: 77, size: 12}, 283 {as: ABEQ, a6: C_BRA, type_: 16, size: 4}, 284 {as: ABEQ, a1: C_CREG, a6: C_BRA, type_: 16, size: 4}, 285 {as: ABR, a6: C_BRA, type_: 11, size: 4}, // b label 286 {as: ABR, a6: C_BRAPIC, type_: 11, size: 8}, // b label; nop 287 {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr 288 {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr 289 {as: ABC, a1: C_U15CON, a2: C_CRBIT, a6: C_BRA, type_: 16, size: 4}, // bc bo, bi, label 290 {as: ABC, a1: C_U15CON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi 291 {as: ABC, a1: C_U15CON, a2: C_CRBIT, a3: C_U15CON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh 292 {as: ABC, a1: C_U15CON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi 293 {as: ABDNZ, a6: C_BRA, type_: 16, size: 4}, 294 {as: ASYNC, type_: 46, size: 4}, 295 {as: AWORD, a1: C_32CON, type_: 40, size: 4}, 296 {as: ADWORD, a1: C_64CON, type_: 31, size: 8}, 297 {as: ADWORD, a1: C_LACON, type_: 31, size: 8}, 298 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4}, 299 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4}, 300 {as: AEXTSB, a6: C_REG, type_: 48, size: 4}, 301 {as: AISEL, a1: C_U5CON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4}, 302 {as: AISEL, a1: C_CRBIT, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4}, 303 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4}, 304 {as: ANEG, a6: C_REG, type_: 47, size: 4}, 305 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12}, 306 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12}, 307 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16}, 308 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16}, 309 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12}, 310 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12}, 311 {as: AMTFSB0, a1: C_U15CON, type_: 52, size: 4}, 312 /* Other ISA 2.05+ instructions */ 313 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */ 314 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */ 315 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */ 316 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4}, 317 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_U15CON, type_: 92, size: 4}, /* floating test for sw divide, x-form */ 318 {as: AFTSQRT, a1: C_FREG, a6: C_U15CON, type_: 93, size: 4}, /* floating test for sw square root, x-form */ 319 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */ 320 {as: ADARN, a1: C_U15CON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */ 321 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */ 322 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_U15CON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */ 323 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */ 324 325 /* Misc ISA 3.0 instructions */ 326 {as: ASETB, a1: C_CREG, a6: C_REG, type_: 110, size: 4}, 327 {as: AVCLZLSBB, a1: C_VREG, a6: C_REG, type_: 85, size: 4}, 328 329 /* Vector instructions */ 330 331 /* Vector load */ 332 {as: ALVEBX, a1: C_XOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */ 333 334 /* Vector store */ 335 {as: ASTVEBX, a1: C_VREG, a6: C_XOREG, type_: 44, size: 4}, /* vector store, x-form */ 336 337 /* Vector logical */ 338 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */ 339 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */ 340 341 /* Vector add */ 342 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */ 343 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */ 344 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */ 345 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */ 346 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */ 347 348 /* Vector subtract */ 349 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */ 350 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */ 351 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */ 352 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */ 353 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */ 354 355 /* Vector multiply */ 356 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */ 357 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */ 358 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */ 359 360 /* Vector rotate */ 361 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */ 362 363 /* Vector shift */ 364 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */ 365 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */ 366 {as: AVSOI, a1: C_U16CON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */ 367 368 /* Vector count */ 369 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */ 370 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */ 371 372 /* Vector compare */ 373 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */ 374 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */ 375 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */ 376 377 /* Vector merge */ 378 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */ 379 380 /* Vector permute */ 381 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */ 382 383 /* Vector bit permute */ 384 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */ 385 386 /* Vector select */ 387 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */ 388 389 /* Vector splat */ 390 {as: AVSPLTB, a1: C_S16CON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, 391 {as: AVSPLTISB, a1: C_S16CON, a6: C_VREG, type_: 82, size: 4}, 392 393 /* Vector AES */ 394 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */ 395 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */ 396 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */ 397 398 /* Vector SHA */ 399 {as: AVSHASIGMA, a1: C_U16CON, a2: C_VREG, a3: C_U16CON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */ 400 401 /* VSX vector load */ 402 {as: ALXVD2X, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */ 403 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */ 404 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */ 405 406 /* VSX vector store */ 407 {as: ASTXVD2X, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */ 408 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */ 409 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */ 410 411 /* VSX scalar load */ 412 {as: ALXSDX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */ 413 414 /* VSX scalar store */ 415 {as: ASTXSDX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */ 416 417 /* VSX scalar as integer load */ 418 {as: ALXSIWAX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */ 419 420 /* VSX scalar store as integer */ 421 {as: ASTXSIWX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */ 422 423 /* VSX move from VSR */ 424 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4}, 425 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4}, 426 427 /* VSX move to VSR */ 428 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4}, 429 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4}, 430 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4}, 431 432 /* VSX xx3-form */ 433 {as: AXXLAND, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 90, size: 4}, /* vsx xx3-form (FPR usage) */ 434 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx xx3-form */ 435 436 /* VSX select */ 437 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */ 438 439 /* VSX merge */ 440 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */ 441 442 /* VSX splat */ 443 {as: AXXSPLTW, a1: C_VSREG, a3: C_U15CON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */ 444 {as: AXXSPLTIB, a1: C_U15CON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */ 445 446 /* VSX permute */ 447 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */ 448 449 /* VSX shift */ 450 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_U15CON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */ 451 452 /* VSX reverse bytes */ 453 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */ 454 455 /* VSX scalar FP-FP conversion */ 456 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */ 457 458 /* VSX vector FP-FP conversion */ 459 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */ 460 461 /* VSX scalar FP-integer conversion */ 462 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */ 463 464 /* VSX scalar integer-FP conversion */ 465 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */ 466 467 /* VSX vector FP-integer conversion */ 468 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */ 469 470 /* VSX vector integer-FP conversion */ 471 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */ 472 473 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4}, 474 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4}, 475 {as: ACMP, a1: C_REG, a6: C_S16CON, type_: 70, size: 4}, 476 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_S16CON, type_: 70, size: 4}, 477 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4}, 478 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4}, 479 {as: ACMPU, a1: C_REG, a6: C_U16CON, type_: 70, size: 4}, 480 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_U16CON, type_: 70, size: 4}, 481 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4}, 482 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4}, 483 {as: ATW, a1: C_32CON, a2: C_REG, a6: C_REG, type_: 60, size: 4}, 484 {as: ATW, a1: C_32CON, a2: C_REG, a6: C_S16CON, type_: 61, size: 4}, 485 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4}, 486 {as: ADCBF, a1: C_XOREG, type_: 43, size: 4}, 487 {as: ADCBF, a1: C_XOREG, a2: C_REG, a6: C_U15CON, type_: 43, size: 4}, 488 {as: ADCBF, a1: C_SOREG, a6: C_U15CON, type_: 43, size: 4}, 489 {as: ADCBF, a1: C_XOREG, a6: C_U15CON, type_: 43, size: 4}, 490 {as: ASTDCCC, a1: C_REG, a2: C_REG, a6: C_XOREG, type_: 44, size: 4}, 491 {as: ASTDCCC, a1: C_REG, a6: C_XOREG, type_: 44, size: 4}, 492 {as: ALDAR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4}, 493 {as: ALDAR, a1: C_XOREG, a3: C_U16CON, a6: C_REG, type_: 45, size: 4}, 494 {as: AEIEIO, type_: 46, size: 4}, 495 {as: ATLBIE, a1: C_REG, type_: 49, size: 4}, 496 {as: ATLBIE, a1: C_U15CON, a6: C_REG, type_: 49, size: 4}, 497 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4}, 498 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4}, 499 {as: ASTSW, a1: C_REG, a6: C_XOREG, type_: 44, size: 4}, 500 {as: ASTSW, a1: C_REG, a3: C_32CON, a6: C_ZOREG, type_: 41, size: 4}, 501 {as: ALSW, a1: C_XOREG, a6: C_REG, type_: 45, size: 4}, 502 {as: ALSW, a1: C_ZOREG, a3: C_32CON, a6: C_REG, type_: 42, size: 4}, 503 504 {as: obj.AUNDEF, type_: 78, size: 4}, 505 {as: obj.APCDATA, a1: C_32CON, a6: C_32CON, type_: 0, size: 0}, 506 {as: obj.AFUNCDATA, a1: C_U15CON, a6: C_ADDR, type_: 0, size: 0}, 507 {as: obj.ANOP, type_: 0, size: 0}, 508 {as: obj.ANOP, a1: C_32CON, type_: 0, size: 0}, // NOP operand variations added for #40689 509 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior 510 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0}, 511 {as: obj.ADUFFZERO, a6: C_BRA, type_: 11, size: 4}, // same as ABR/ABL 512 {as: obj.ADUFFCOPY, a6: C_BRA, type_: 11, size: 4}, // same as ABR/ABL 513 {as: obj.APCALIGN, a1: C_32CON, type_: 0, size: 0}, // align code 514} 515 516// These are opcodes above which may generate different sequences depending on whether prefix opcode support 517// is available 518type PrefixableOptab struct { 519 Optab 520 minGOPPC64 int // Minimum GOPPC64 required to support this. 521 pfxsize int8 // Instruction sequence size when prefixed opcodes are used 522} 523 524// The prefixable optab entry contains the pseudo-opcodes which generate relocations, or may generate 525// a more efficient sequence of instructions if a prefixed version exists (ex. paddi instead of oris/ori/add). 526// 527// This table is meant to transform all sequences which might be TOC-relative into an equivalent PC-relative 528// sequence. It also encompasses several transformations which do not involve relocations, those could be 529// separated and applied to AIX and other non-ELF targets. Likewise, the prefixed forms do not have encoding 530// restrictions on the offset, so they are also used for static binary to allow better code generation. e.x 531// 532// MOVD something-byte-aligned(Rx), Ry 533// MOVD 3(Rx), Ry 534// 535// is allowed when the prefixed forms are used. 536// 537// This requires an ISA 3.1 compatible cpu (e.g Power10), and when linking externally an ELFv2 1.5 compliant. 538var prefixableOptab = []PrefixableOptab{ 539 {Optab: Optab{as: AMOVD, a1: C_S34CON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8}, 540 {Optab: Optab{as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8}, 541 {Optab: Optab{as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8}, minGOPPC64: 10, pfxsize: 8}, 542 {Optab: Optab{as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12}, minGOPPC64: 10, pfxsize: 12}, 543 {Optab: Optab{as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8}, 544 {Optab: Optab{as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8}, 545 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8}, 546 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8}, 547 548 {Optab: Optab{as: AMOVW, a1: C_32CON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8}, 549 {Optab: Optab{as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8}, 550 {Optab: Optab{as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8}, 551 {Optab: Optab{as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8}, 552 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8}, 553 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8}, 554 555 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8}, 556 {Optab: Optab{as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12}, minGOPPC64: 10, pfxsize: 12}, 557 {Optab: Optab{as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12}, minGOPPC64: 10, pfxsize: 12}, 558 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8}, 559 560 {Optab: Optab{as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8}, 561 {Optab: Optab{as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8}, 562 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8}, 563 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8}, 564 565 {Optab: Optab{as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8}, 566 {Optab: Optab{as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8}, 567 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8}, 568 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8}, 569 570 {Optab: Optab{as: AADD, a1: C_32CON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8}, 571 {Optab: Optab{as: AADD, a1: C_32CON, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8}, 572 {Optab: Optab{as: AADD, a1: C_S34CON, a2: C_REG, a6: C_REG, type_: 22, size: 20}, minGOPPC64: 10, pfxsize: 8}, 573 {Optab: Optab{as: AADD, a1: C_S34CON, a6: C_REG, type_: 22, size: 20}, minGOPPC64: 10, pfxsize: 8}, 574} 575 576var oprange [ALAST & obj.AMask][]Optab 577 578var xcmp [C_NCLASS][C_NCLASS]bool 579 580var pfxEnabled = false // ISA 3.1 prefixed instructions are supported. 581var buildOpCfg = "" // Save the os/cpu/arch tuple used to configure the assembler in buildop 582 583// padding bytes to add to align code as requested. 584func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int { 585 switch a { 586 case 8, 16, 32, 64: 587 // By default function alignment is 16. If an alignment > 16 is 588 // requested then the function alignment must also be promoted. 589 // The function alignment is not promoted on AIX at this time. 590 // TODO: Investigate AIX function alignment. 591 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < int32(a) { 592 cursym.Func().Align = int32(a) 593 } 594 if pc&(a-1) != 0 { 595 return int(a - (pc & (a - 1))) 596 } 597 default: 598 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a) 599 } 600 return 0 601} 602 603func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { 604 p := cursym.Func().Text 605 if p == nil || p.Link == nil { // handle external functions and ELF section symbols 606 return 607 } 608 609 if oprange[AANDN&obj.AMask] == nil { 610 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first") 611 } 612 613 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)} 614 615 pc := int64(0) 616 p.Pc = pc 617 618 var m int 619 var o *Optab 620 for p = p.Link; p != nil; p = p.Link { 621 p.Pc = pc 622 o = c.oplook(p) 623 m = int(o.size) 624 if m == 0 { 625 if p.As == obj.APCALIGN { 626 a := c.vregoff(&p.From) 627 m = addpad(pc, a, ctxt, cursym) 628 } else { 629 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA { 630 ctxt.Diag("zero-width instruction\n%v", p) 631 } 632 continue 633 } 634 } 635 pc += int64(m) 636 } 637 638 c.cursym.Size = pc 639 640 /* 641 * if any procedure is large enough to 642 * generate a large SBRA branch, then 643 * generate extra passes putting branches 644 * around jmps to fix. this is rare. 645 */ 646 bflag := 1 647 648 var otxt int64 649 var q *obj.Prog 650 var out [5]uint32 651 var falign int32 // Track increased alignment requirements for prefix. 652 for bflag != 0 { 653 bflag = 0 654 pc = 0 655 falign = 0 // Note, linker bumps function symbols to funcAlign. 656 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link { 657 p.Pc = pc 658 o = c.oplook(p) 659 660 // very large conditional branches 661 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil { 662 otxt = p.To.Target().Pc - pc 663 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 { 664 // Assemble the instruction with a target not too far to figure out BI and BO fields. 665 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted, 666 // and only one extra branch is needed to reach the target. 667 tgt := p.To.Target() 668 p.To.SetTarget(p.Link) 669 o.asmout(&c, p, o, &out) 670 p.To.SetTarget(tgt) 671 672 bo := int64(out[0]>>21) & 31 673 bi := int16((out[0] >> 16) & 31) 674 invertible := false 675 676 if bo&0x14 == 0x14 { 677 // A conditional branch that is unconditionally taken. This cannot be inverted. 678 } else if bo&0x10 == 0x10 { 679 // A branch based on the value of CTR. Invert the CTR comparison against zero bit. 680 bo ^= 0x2 681 invertible = true 682 } else if bo&0x04 == 0x04 { 683 // A branch based on CR bit. Invert the BI comparison bit. 684 bo ^= 0x8 685 invertible = true 686 } 687 688 if invertible { 689 // Rewrite 690 // BC bo,...,far_away_target 691 // NEXT_INSN 692 // to: 693 // BC invert(bo),next_insn 694 // JMP far_away_target 695 // next_insn: 696 // NEXT_INSN 697 p.As = ABC 698 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo} 699 q = c.newprog() 700 q.As = ABR 701 q.To.Type = obj.TYPE_BRANCH 702 q.To.SetTarget(p.To.Target()) 703 q.Link = p.Link 704 p.To.SetTarget(p.Link) 705 p.Link = q 706 p.Reg = REG_CRBIT0 + bi 707 } else { 708 // Rewrite 709 // BC ...,far_away_target 710 // NEXT_INSN 711 // to 712 // BC ...,tmp 713 // JMP next_insn 714 // tmp: 715 // JMP far_away_target 716 // next_insn: 717 // NEXT_INSN 718 q = c.newprog() 719 q.Link = p.Link 720 p.Link = q 721 q.As = ABR 722 q.To.Type = obj.TYPE_BRANCH 723 q.To.SetTarget(p.To.Target()) 724 p.To.SetTarget(q) 725 q = c.newprog() 726 q.Link = p.Link 727 p.Link = q 728 q.As = ABR 729 q.To.Type = obj.TYPE_BRANCH 730 q.To.SetTarget(q.Link.Link) 731 } 732 bflag = 1 733 } 734 } 735 736 m = int(o.size) 737 if m == 0 { 738 if p.As == obj.APCALIGN { 739 a := c.vregoff(&p.From) 740 m = addpad(pc, a, ctxt, cursym) 741 } else { 742 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA { 743 ctxt.Diag("zero-width instruction\n%v", p) 744 } 745 continue 746 } 747 } 748 749 // Prefixed instructions cannot be placed across a 64B boundary. 750 // Mark and adjust the PC of those which do. A nop will be 751 // inserted during final assembly. 752 if o.ispfx { 753 mark := p.Mark &^ PFX_X64B 754 if pc&63 == 60 { 755 p.Pc += 4 756 m += 4 757 mark |= PFX_X64B 758 } 759 760 // Marks may be adjusted if a too-far conditional branch is 761 // fixed up above. Likewise, inserting a NOP may cause a 762 // branch target to become too far away. We need to run 763 // another iteration and verify no additional changes 764 // are needed. 765 if mark != p.Mark { 766 bflag = 1 767 p.Mark = mark 768 } 769 770 // Check for 16 or 32B crossing of this prefixed insn. 771 // These do no require padding, but do require increasing 772 // the function alignment to prevent them from potentially 773 // crossing a 64B boundary when the linker assigns the final 774 // PC. 775 switch p.Pc & 31 { 776 case 28: // 32B crossing 777 falign = 64 778 case 12: // 16B crossing 779 if falign < 64 { 780 falign = 32 781 } 782 } 783 } 784 785 pc += int64(m) 786 } 787 788 c.cursym.Size = pc 789 } 790 791 c.cursym.Size = pc 792 c.cursym.Func().Align = falign 793 c.cursym.Grow(c.cursym.Size) 794 795 // lay out the code, emitting code and data relocations. 796 797 bp := c.cursym.P 798 var i int32 799 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link { 800 c.pc = p.Pc 801 o = c.oplook(p) 802 if int(o.size) > 4*len(out) { 803 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p) 804 } 805 // asmout is not set up to add large amounts of padding 806 if o.type_ == 0 && p.As == obj.APCALIGN { 807 aln := c.vregoff(&p.From) 808 v := addpad(p.Pc, aln, c.ctxt, c.cursym) 809 if v > 0 { 810 // Same padding instruction for all 811 for i = 0; i < int32(v/4); i++ { 812 c.ctxt.Arch.ByteOrder.PutUint32(bp, NOP) 813 bp = bp[4:] 814 } 815 } 816 } else { 817 if p.Mark&PFX_X64B != 0 { 818 c.ctxt.Arch.ByteOrder.PutUint32(bp, NOP) 819 bp = bp[4:] 820 } 821 o.asmout(&c, p, o, &out) 822 for i = 0; i < int32(o.size/4); i++ { 823 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i]) 824 bp = bp[4:] 825 } 826 } 827 } 828} 829 830func isint32(v int64) bool { 831 return int64(int32(v)) == v 832} 833 834func isuint32(v uint64) bool { 835 return uint64(uint32(v)) == v 836} 837 838func (c *ctxt9) aclassreg(reg int16) int { 839 if REG_R0 <= reg && reg <= REG_R31 { 840 return C_REGP + int(reg&1) 841 } 842 if REG_F0 <= reg && reg <= REG_F31 { 843 return C_FREGP + int(reg&1) 844 } 845 if REG_V0 <= reg && reg <= REG_V31 { 846 return C_VREG 847 } 848 if REG_VS0 <= reg && reg <= REG_VS63 { 849 return C_VSREGP + int(reg&1) 850 } 851 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR { 852 return C_CREG 853 } 854 if REG_CR0LT <= reg && reg <= REG_CR7SO { 855 return C_CRBIT 856 } 857 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 { 858 switch reg { 859 case REG_LR: 860 return C_LR 861 862 case REG_CTR: 863 return C_CTR 864 } 865 866 return C_SPR 867 } 868 if REG_A0 <= reg && reg <= REG_A7 { 869 return C_AREG 870 } 871 if reg == REG_FPSCR { 872 return C_FPSCR 873 } 874 return C_GOK 875} 876 877func (c *ctxt9) aclass(a *obj.Addr) int { 878 switch a.Type { 879 case obj.TYPE_NONE: 880 return C_NONE 881 882 case obj.TYPE_REG: 883 return c.aclassreg(a.Reg) 884 885 case obj.TYPE_MEM: 886 if a.Index != 0 { 887 if a.Name != obj.NAME_NONE || a.Offset != 0 { 888 c.ctxt.Logf("Unexpected Instruction operand index %d offset %d class %d \n", a.Index, a.Offset, a.Class) 889 890 } 891 return C_XOREG 892 } 893 switch a.Name { 894 case obj.NAME_GOTREF, obj.NAME_TOCREF: 895 return C_ADDR 896 897 case obj.NAME_EXTERN, 898 obj.NAME_STATIC: 899 c.instoffset = a.Offset 900 if a.Sym == nil { 901 break 902 } else if a.Sym.Type == objabi.STLSBSS { 903 // For PIC builds, use 12 byte got initial-exec TLS accesses. 904 if c.ctxt.Flag_shared { 905 return C_TLS_IE 906 } 907 // Otherwise, use 8 byte local-exec TLS accesses. 908 return C_TLS_LE 909 } else { 910 return C_ADDR 911 } 912 913 case obj.NAME_AUTO: 914 a.Reg = REGSP 915 c.instoffset = int64(c.autosize) + a.Offset 916 if c.instoffset >= -BIG && c.instoffset < BIG { 917 return C_SOREG 918 } 919 return C_LOREG 920 921 case obj.NAME_PARAM: 922 a.Reg = REGSP 923 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize 924 if c.instoffset >= -BIG && c.instoffset < BIG { 925 return C_SOREG 926 } 927 return C_LOREG 928 929 case obj.NAME_NONE: 930 c.instoffset = a.Offset 931 if a.Offset == 0 && a.Index == 0 { 932 return C_ZOREG 933 } else if c.instoffset >= -BIG && c.instoffset < BIG { 934 return C_SOREG 935 } else { 936 return C_LOREG 937 } 938 } 939 940 return C_GOK 941 942 case obj.TYPE_TEXTSIZE: 943 return C_TEXTSIZE 944 945 case obj.TYPE_FCONST: 946 // The only cases where FCONST will occur are with float64 +/- 0. 947 // All other float constants are generated in memory. 948 f64 := a.Val.(float64) 949 if f64 == 0 { 950 if math.Signbit(f64) { 951 return C_S16CON 952 } 953 return C_ZCON 954 } 955 log.Fatalf("Unexpected nonzero FCONST operand %v", a) 956 957 case obj.TYPE_CONST, 958 obj.TYPE_ADDR: 959 switch a.Name { 960 case obj.NAME_NONE: 961 c.instoffset = a.Offset 962 if a.Reg != 0 { 963 if -BIG <= c.instoffset && c.instoffset < BIG { 964 return C_SACON 965 } 966 if isint32(c.instoffset) { 967 return C_LACON 968 } 969 return C_DACON 970 } 971 972 case obj.NAME_EXTERN, 973 obj.NAME_STATIC: 974 s := a.Sym 975 if s == nil { 976 return C_GOK 977 } 978 c.instoffset = a.Offset 979 return C_LACON 980 981 case obj.NAME_AUTO: 982 a.Reg = REGSP 983 c.instoffset = int64(c.autosize) + a.Offset 984 if c.instoffset >= -BIG && c.instoffset < BIG { 985 return C_SACON 986 } 987 return C_LACON 988 989 case obj.NAME_PARAM: 990 a.Reg = REGSP 991 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize 992 if c.instoffset >= -BIG && c.instoffset < BIG { 993 return C_SACON 994 } 995 return C_LACON 996 997 default: 998 return C_GOK 999 } 1000 1001 if c.instoffset >= 0 { 1002 sbits := bits.Len64(uint64(c.instoffset)) 1003 switch { 1004 case sbits <= 5: 1005 return C_ZCON + sbits 1006 case sbits <= 8: 1007 return C_U8CON 1008 case sbits <= 15: 1009 return C_U15CON 1010 case sbits <= 16: 1011 return C_U16CON 1012 case sbits <= 31: 1013 return C_U31CON 1014 case sbits <= 32: 1015 return C_U32CON 1016 case sbits <= 33: 1017 return C_S34CON 1018 default: 1019 return C_64CON 1020 } 1021 } else { 1022 sbits := bits.Len64(uint64(^c.instoffset)) 1023 switch { 1024 case sbits <= 15: 1025 return C_S16CON 1026 case sbits <= 31: 1027 return C_S32CON 1028 case sbits <= 33: 1029 return C_S34CON 1030 default: 1031 return C_64CON 1032 } 1033 } 1034 1035 case obj.TYPE_BRANCH: 1036 if a.Sym != nil && c.ctxt.Flag_dynlink && !pfxEnabled { 1037 return C_BRAPIC 1038 } 1039 return C_BRA 1040 } 1041 1042 return C_GOK 1043} 1044 1045func prasm(p *obj.Prog) { 1046 fmt.Printf("%v\n", p) 1047} 1048 1049func (c *ctxt9) oplook(p *obj.Prog) *Optab { 1050 a1 := int(p.Optab) 1051 if a1 != 0 { 1052 return &optab[a1-1] 1053 } 1054 a1 = int(p.From.Class) 1055 if a1 == 0 { 1056 a1 = c.aclass(&p.From) + 1 1057 p.From.Class = int8(a1) 1058 } 1059 a1-- 1060 1061 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1} 1062 for i, ap := range p.RestArgs { 1063 argsv[i] = int(ap.Addr.Class) 1064 if argsv[i] == 0 { 1065 argsv[i] = c.aclass(&ap.Addr) + 1 1066 ap.Addr.Class = int8(argsv[i]) 1067 } 1068 1069 } 1070 a3 := argsv[0] - 1 1071 a4 := argsv[1] - 1 1072 a5 := argsv[2] - 1 1073 1074 a6 := int(p.To.Class) 1075 if a6 == 0 { 1076 a6 = c.aclass(&p.To) + 1 1077 p.To.Class = int8(a6) 1078 } 1079 a6-- 1080 1081 a2 := C_NONE 1082 if p.Reg != 0 { 1083 a2 = c.aclassreg(p.Reg) 1084 } 1085 1086 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6) 1087 ops := oprange[p.As&obj.AMask] 1088 c1 := &xcmp[a1] 1089 c2 := &xcmp[a2] 1090 c3 := &xcmp[a3] 1091 c4 := &xcmp[a4] 1092 c5 := &xcmp[a5] 1093 c6 := &xcmp[a6] 1094 for i := range ops { 1095 op := &ops[i] 1096 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] { 1097 p.Optab = uint16(cap(optab) - cap(ops) + i + 1) 1098 return op 1099 } 1100 } 1101 1102 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6)) 1103 prasm(p) 1104 if ops == nil { 1105 ops = optab 1106 } 1107 return &ops[0] 1108} 1109 1110// Compare two operand types (ex C_REG, or C_U15CON) 1111// and return true if b is compatible with a. 1112// 1113// Argument comparison isn't reflexitive, so care must be taken. 1114// a is the argument type as found in optab, b is the argument as 1115// fitted by aclass. 1116func cmp(a int, b int) bool { 1117 if a == b { 1118 return true 1119 } 1120 switch a { 1121 1122 case C_SPR: 1123 if b == C_LR || b == C_CTR { 1124 return true 1125 } 1126 1127 case C_U1CON: 1128 return cmp(C_ZCON, b) 1129 case C_U2CON: 1130 return cmp(C_U1CON, b) 1131 case C_U3CON: 1132 return cmp(C_U2CON, b) 1133 case C_U4CON: 1134 return cmp(C_U3CON, b) 1135 case C_U5CON: 1136 return cmp(C_U4CON, b) 1137 case C_U8CON: 1138 return cmp(C_U5CON, b) 1139 case C_U15CON: 1140 return cmp(C_U8CON, b) 1141 case C_S16CON: 1142 return cmp(C_U15CON, b) 1143 case C_U16CON: 1144 return cmp(C_U15CON, b) 1145 case C_16CON: 1146 return cmp(C_S16CON, b) || cmp(C_U16CON, b) 1147 case C_U31CON: 1148 return cmp(C_U16CON, b) 1149 case C_U32CON: 1150 return cmp(C_U31CON, b) 1151 case C_S32CON: 1152 return cmp(C_U31CON, b) || cmp(C_S16CON, b) 1153 case C_32CON: 1154 return cmp(C_S32CON, b) || cmp(C_U32CON, b) 1155 case C_S34CON: 1156 return cmp(C_32CON, b) 1157 case C_64CON: 1158 return cmp(C_S34CON, b) 1159 1160 case C_LACON: 1161 return cmp(C_SACON, b) 1162 1163 case C_SOREG: 1164 return cmp(C_ZOREG, b) 1165 1166 case C_LOREG: 1167 return cmp(C_SOREG, b) 1168 1169 case C_XOREG: 1170 return cmp(C_REG, b) || cmp(C_ZOREG, b) 1171 1172 // An even/odd register input always matches the regular register types. 1173 case C_REG: 1174 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0) 1175 case C_FREG: 1176 return cmp(C_FREGP, b) 1177 case C_VSREG: 1178 /* Allow any VR argument as a VSR operand. */ 1179 return cmp(C_VSREGP, b) || cmp(C_VREG, b) 1180 1181 case C_ANY: 1182 return true 1183 } 1184 1185 return false 1186} 1187 1188// Used when sorting the optab. Sorting is 1189// done in a way so that the best choice of 1190// opcode/operand combination is considered first. 1191func optabLess(i, j int) bool { 1192 p1 := &optab[i] 1193 p2 := &optab[j] 1194 n := int(p1.as) - int(p2.as) 1195 // same opcode 1196 if n != 0 { 1197 return n < 0 1198 } 1199 // Consider those that generate fewer 1200 // instructions first. 1201 n = int(p1.size) - int(p2.size) 1202 if n != 0 { 1203 return n < 0 1204 } 1205 // operand order should match 1206 // better choices first 1207 n = int(p1.a1) - int(p2.a1) 1208 if n != 0 { 1209 return n < 0 1210 } 1211 n = int(p1.a2) - int(p2.a2) 1212 if n != 0 { 1213 return n < 0 1214 } 1215 n = int(p1.a3) - int(p2.a3) 1216 if n != 0 { 1217 return n < 0 1218 } 1219 n = int(p1.a4) - int(p2.a4) 1220 if n != 0 { 1221 return n < 0 1222 } 1223 n = int(p1.a5) - int(p2.a5) 1224 if n != 0 { 1225 return n < 0 1226 } 1227 n = int(p1.a6) - int(p2.a6) 1228 if n != 0 { 1229 return n < 0 1230 } 1231 return false 1232} 1233 1234// Add an entry to the opcode table for 1235// a new opcode b0 with the same operand combinations 1236// as opcode a. 1237func opset(a, b0 obj.As) { 1238 oprange[a&obj.AMask] = oprange[b0] 1239} 1240 1241// Determine if the build configuration requires a TOC pointer. 1242// It is assumed this always called after buildop. 1243func NeedTOCpointer(ctxt *obj.Link) bool { 1244 return !pfxEnabled && ctxt.Flag_shared 1245} 1246 1247// Build the opcode table 1248func buildop(ctxt *obj.Link) { 1249 // Limit PC-relative prefix instruction usage to supported and tested targets. 1250 pfxEnabled = buildcfg.GOPPC64 >= 10 && buildcfg.GOOS == "linux" 1251 cfg := fmt.Sprintf("power%d/%s/%s", buildcfg.GOPPC64, buildcfg.GOARCH, buildcfg.GOOS) 1252 if cfg == buildOpCfg { 1253 // Already initialized to correct OS/cpu; stop now. 1254 // This happens in the cmd/asm tests, 1255 // each of which re-initializes the arch. 1256 return 1257 } 1258 buildOpCfg = cfg 1259 1260 // Configure the optab entries which may generate prefix opcodes. 1261 prefixOptab := make([]Optab, 0, len(prefixableOptab)) 1262 for _, entry := range prefixableOptab { 1263 entry := entry 1264 if pfxEnabled && buildcfg.GOPPC64 >= entry.minGOPPC64 { 1265 // Enable prefix opcode generation and resize. 1266 entry.ispfx = true 1267 entry.size = entry.pfxsize 1268 } 1269 prefixOptab = append(prefixOptab, entry.Optab) 1270 1271 } 1272 1273 for i := 0; i < C_NCLASS; i++ { 1274 for n := 0; n < C_NCLASS; n++ { 1275 if cmp(n, i) { 1276 xcmp[i][n] = true 1277 } 1278 } 1279 } 1280 1281 // Append the generated entries, sort, and fill out oprange. 1282 optab = make([]Optab, 0, len(optabBase)+len(optabGen)+len(prefixOptab)) 1283 optab = append(optab, optabBase...) 1284 optab = append(optab, optabGen...) 1285 optab = append(optab, prefixOptab...) 1286 sort.Slice(optab, optabLess) 1287 1288 for i := range optab { 1289 // Use the legacy assembler function if none provided. 1290 if optab[i].asmout == nil { 1291 optab[i].asmout = asmout 1292 } 1293 } 1294 1295 for i := 0; i < len(optab); { 1296 r := optab[i].as 1297 r0 := r & obj.AMask 1298 start := i 1299 for i < len(optab) && optab[i].as == r { 1300 i++ 1301 } 1302 oprange[r0] = optab[start:i] 1303 1304 switch r { 1305 default: 1306 if !opsetGen(r) { 1307 ctxt.Diag("unknown op in build: %v", r) 1308 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r) 1309 } 1310 1311 case ADCBF: /* unary indexed: op (b+a); op (b) */ 1312 opset(ADCBI, r0) 1313 1314 opset(ADCBST, r0) 1315 opset(ADCBT, r0) 1316 opset(ADCBTST, r0) 1317 opset(ADCBZ, r0) 1318 opset(AICBI, r0) 1319 1320 case ASTDCCC: /* indexed store: op s,(b+a); op s,(b) */ 1321 opset(ASTWCCC, r0) 1322 opset(ASTHCCC, r0) 1323 opset(ASTBCCC, r0) 1324 1325 case AREM: /* macro */ 1326 opset(AREM, r0) 1327 1328 case AREMU: 1329 opset(AREMU, r0) 1330 1331 case AREMD: 1332 opset(AREMDU, r0) 1333 1334 case AMULLW: 1335 opset(AMULLD, r0) 1336 1337 case ADIVW: /* op Rb[,Ra],Rd */ 1338 opset(AMULHW, r0) 1339 1340 opset(AMULHWCC, r0) 1341 opset(AMULHWU, r0) 1342 opset(AMULHWUCC, r0) 1343 opset(AMULLWCC, r0) 1344 opset(AMULLWVCC, r0) 1345 opset(AMULLWV, r0) 1346 opset(ADIVWCC, r0) 1347 opset(ADIVWV, r0) 1348 opset(ADIVWVCC, r0) 1349 opset(ADIVWU, r0) 1350 opset(ADIVWUCC, r0) 1351 opset(ADIVWUV, r0) 1352 opset(ADIVWUVCC, r0) 1353 opset(AMODUD, r0) 1354 opset(AMODUW, r0) 1355 opset(AMODSD, r0) 1356 opset(AMODSW, r0) 1357 opset(AADDCC, r0) 1358 opset(AADDCV, r0) 1359 opset(AADDCVCC, r0) 1360 opset(AADDV, r0) 1361 opset(AADDVCC, r0) 1362 opset(AADDE, r0) 1363 opset(AADDECC, r0) 1364 opset(AADDEV, r0) 1365 opset(AADDEVCC, r0) 1366 opset(AMULHD, r0) 1367 opset(AMULHDCC, r0) 1368 opset(AMULHDU, r0) 1369 opset(AMULHDUCC, r0) 1370 opset(AMULLDCC, r0) 1371 opset(AMULLDVCC, r0) 1372 opset(AMULLDV, r0) 1373 opset(ADIVD, r0) 1374 opset(ADIVDCC, r0) 1375 opset(ADIVDE, r0) 1376 opset(ADIVDEU, r0) 1377 opset(ADIVDECC, r0) 1378 opset(ADIVDEUCC, r0) 1379 opset(ADIVDVCC, r0) 1380 opset(ADIVDV, r0) 1381 opset(ADIVDU, r0) 1382 opset(ADIVDUV, r0) 1383 opset(ADIVDUVCC, r0) 1384 opset(ADIVDUCC, r0) 1385 1386 case ACRAND: 1387 opset(ACRANDN, r0) 1388 opset(ACREQV, r0) 1389 opset(ACRNAND, r0) 1390 opset(ACRNOR, r0) 1391 opset(ACROR, r0) 1392 opset(ACRORN, r0) 1393 opset(ACRXOR, r0) 1394 1395 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */ 1396 opset(APOPCNTW, r0) 1397 opset(APOPCNTB, r0) 1398 opset(ACNTTZW, r0) 1399 opset(ACNTTZWCC, r0) 1400 opset(ACNTTZD, r0) 1401 opset(ACNTTZDCC, r0) 1402 1403 case ACOPY: /* copy, paste. */ 1404 opset(APASTECC, r0) 1405 1406 case AMADDHD: /* maddhd, maddhdu, maddld */ 1407 opset(AMADDHDU, r0) 1408 opset(AMADDLD, r0) 1409 1410 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */ 1411 opset(AMOVH, r0) 1412 opset(AMOVHZ, r0) 1413 1414 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */ 1415 opset(AMOVHU, r0) 1416 1417 opset(AMOVHZU, r0) 1418 opset(AMOVWU, r0) 1419 opset(AMOVWZU, r0) 1420 opset(AMOVDU, r0) 1421 opset(AMOVMW, r0) 1422 1423 case ALVEBX: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */ 1424 opset(ALVEHX, r0) 1425 opset(ALVEWX, r0) 1426 opset(ALVX, r0) 1427 opset(ALVXL, r0) 1428 opset(ALVSL, r0) 1429 opset(ALVSR, r0) 1430 1431 case ASTVEBX: /* stvebx, stvehx, stvewx, stvx, stvxl */ 1432 opset(ASTVEHX, r0) 1433 opset(ASTVEWX, r0) 1434 opset(ASTVX, r0) 1435 opset(ASTVXL, r0) 1436 1437 case AVAND: /* vand, vandc, vnand */ 1438 opset(AVAND, r0) 1439 opset(AVANDC, r0) 1440 opset(AVNAND, r0) 1441 1442 case AVMRGOW: /* vmrgew, vmrgow */ 1443 opset(AVMRGEW, r0) 1444 1445 case AVOR: /* vor, vorc, vxor, vnor, veqv */ 1446 opset(AVOR, r0) 1447 opset(AVORC, r0) 1448 opset(AVXOR, r0) 1449 opset(AVNOR, r0) 1450 opset(AVEQV, r0) 1451 1452 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */ 1453 opset(AVADDUBM, r0) 1454 opset(AVADDUHM, r0) 1455 opset(AVADDUWM, r0) 1456 opset(AVADDUDM, r0) 1457 opset(AVADDUQM, r0) 1458 1459 case AVADDCU: /* vaddcuq, vaddcuw */ 1460 opset(AVADDCUQ, r0) 1461 opset(AVADDCUW, r0) 1462 1463 case AVADDUS: /* vaddubs, vadduhs, vadduws */ 1464 opset(AVADDUBS, r0) 1465 opset(AVADDUHS, r0) 1466 opset(AVADDUWS, r0) 1467 1468 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */ 1469 opset(AVADDSBS, r0) 1470 opset(AVADDSHS, r0) 1471 opset(AVADDSWS, r0) 1472 1473 case AVADDE: /* vaddeuqm, vaddecuq */ 1474 opset(AVADDEUQM, r0) 1475 opset(AVADDECUQ, r0) 1476 1477 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */ 1478 opset(AVSUBUBM, r0) 1479 opset(AVSUBUHM, r0) 1480 opset(AVSUBUWM, r0) 1481 opset(AVSUBUDM, r0) 1482 opset(AVSUBUQM, r0) 1483 1484 case AVSUBCU: /* vsubcuq, vsubcuw */ 1485 opset(AVSUBCUQ, r0) 1486 opset(AVSUBCUW, r0) 1487 1488 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */ 1489 opset(AVSUBUBS, r0) 1490 opset(AVSUBUHS, r0) 1491 opset(AVSUBUWS, r0) 1492 1493 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */ 1494 opset(AVSUBSBS, r0) 1495 opset(AVSUBSHS, r0) 1496 opset(AVSUBSWS, r0) 1497 1498 case AVSUBE: /* vsubeuqm, vsubecuq */ 1499 opset(AVSUBEUQM, r0) 1500 opset(AVSUBECUQ, r0) 1501 1502 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */ 1503 opset(AVMULOSB, r0) 1504 opset(AVMULEUB, r0) 1505 opset(AVMULOUB, r0) 1506 opset(AVMULESH, r0) 1507 opset(AVMULOSH, r0) 1508 opset(AVMULEUH, r0) 1509 opset(AVMULOUH, r0) 1510 opset(AVMULESW, r0) 1511 opset(AVMULOSW, r0) 1512 opset(AVMULEUW, r0) 1513 opset(AVMULOUW, r0) 1514 opset(AVMULUWM, r0) 1515 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */ 1516 opset(AVPMSUMB, r0) 1517 opset(AVPMSUMH, r0) 1518 opset(AVPMSUMW, r0) 1519 opset(AVPMSUMD, r0) 1520 1521 case AVR: /* vrlb, vrlh, vrlw, vrld */ 1522 opset(AVRLB, r0) 1523 opset(AVRLH, r0) 1524 opset(AVRLW, r0) 1525 opset(AVRLD, r0) 1526 1527 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */ 1528 opset(AVSLB, r0) 1529 opset(AVSLH, r0) 1530 opset(AVSLW, r0) 1531 opset(AVSL, r0) 1532 opset(AVSLO, r0) 1533 opset(AVSRB, r0) 1534 opset(AVSRH, r0) 1535 opset(AVSRW, r0) 1536 opset(AVSR, r0) 1537 opset(AVSRO, r0) 1538 opset(AVSLD, r0) 1539 opset(AVSRD, r0) 1540 1541 case AVSA: /* vsrab, vsrah, vsraw, vsrad */ 1542 opset(AVSRAB, r0) 1543 opset(AVSRAH, r0) 1544 opset(AVSRAW, r0) 1545 opset(AVSRAD, r0) 1546 1547 case AVSOI: /* vsldoi */ 1548 opset(AVSLDOI, r0) 1549 1550 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */ 1551 opset(AVCLZB, r0) 1552 opset(AVCLZH, r0) 1553 opset(AVCLZW, r0) 1554 opset(AVCLZD, r0) 1555 1556 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */ 1557 opset(AVPOPCNTB, r0) 1558 opset(AVPOPCNTH, r0) 1559 opset(AVPOPCNTW, r0) 1560 opset(AVPOPCNTD, r0) 1561 1562 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */ 1563 opset(AVCMPEQUB, r0) 1564 opset(AVCMPEQUBCC, r0) 1565 opset(AVCMPEQUH, r0) 1566 opset(AVCMPEQUHCC, r0) 1567 opset(AVCMPEQUW, r0) 1568 opset(AVCMPEQUWCC, r0) 1569 opset(AVCMPEQUD, r0) 1570 opset(AVCMPEQUDCC, r0) 1571 1572 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */ 1573 opset(AVCMPGTUB, r0) 1574 opset(AVCMPGTUBCC, r0) 1575 opset(AVCMPGTUH, r0) 1576 opset(AVCMPGTUHCC, r0) 1577 opset(AVCMPGTUW, r0) 1578 opset(AVCMPGTUWCC, r0) 1579 opset(AVCMPGTUD, r0) 1580 opset(AVCMPGTUDCC, r0) 1581 opset(AVCMPGTSB, r0) 1582 opset(AVCMPGTSBCC, r0) 1583 opset(AVCMPGTSH, r0) 1584 opset(AVCMPGTSHCC, r0) 1585 opset(AVCMPGTSW, r0) 1586 opset(AVCMPGTSWCC, r0) 1587 opset(AVCMPGTSD, r0) 1588 opset(AVCMPGTSDCC, r0) 1589 1590 case AVCMPNEZB: /* vcmpnezb[.] */ 1591 opset(AVCMPNEZBCC, r0) 1592 opset(AVCMPNEB, r0) 1593 opset(AVCMPNEBCC, r0) 1594 opset(AVCMPNEH, r0) 1595 opset(AVCMPNEHCC, r0) 1596 opset(AVCMPNEW, r0) 1597 opset(AVCMPNEWCC, r0) 1598 1599 case AVPERM: /* vperm */ 1600 opset(AVPERMXOR, r0) 1601 opset(AVPERMR, r0) 1602 1603 case AVBPERMQ: /* vbpermq, vbpermd */ 1604 opset(AVBPERMD, r0) 1605 1606 case AVSEL: /* vsel */ 1607 opset(AVSEL, r0) 1608 1609 case AVSPLTB: /* vspltb, vsplth, vspltw */ 1610 opset(AVSPLTH, r0) 1611 opset(AVSPLTW, r0) 1612 1613 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */ 1614 opset(AVSPLTISH, r0) 1615 opset(AVSPLTISW, r0) 1616 1617 case AVCIPH: /* vcipher, vcipherlast */ 1618 opset(AVCIPHER, r0) 1619 opset(AVCIPHERLAST, r0) 1620 1621 case AVNCIPH: /* vncipher, vncipherlast */ 1622 opset(AVNCIPHER, r0) 1623 opset(AVNCIPHERLAST, r0) 1624 1625 case AVSBOX: /* vsbox */ 1626 opset(AVSBOX, r0) 1627 1628 case AVSHASIGMA: /* vshasigmaw, vshasigmad */ 1629 opset(AVSHASIGMAW, r0) 1630 opset(AVSHASIGMAD, r0) 1631 1632 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */ 1633 opset(ALXVDSX, r0) 1634 opset(ALXVW4X, r0) 1635 opset(ALXVH8X, r0) 1636 opset(ALXVB16X, r0) 1637 1638 case ALXV: /* lxv */ 1639 opset(ALXV, r0) 1640 1641 case ALXVL: /* lxvl, lxvll, lxvx */ 1642 opset(ALXVLL, r0) 1643 opset(ALXVX, r0) 1644 1645 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */ 1646 opset(ASTXVW4X, r0) 1647 opset(ASTXVH8X, r0) 1648 opset(ASTXVB16X, r0) 1649 1650 case ASTXV: /* stxv */ 1651 opset(ASTXV, r0) 1652 1653 case ASTXVL: /* stxvl, stxvll, stvx */ 1654 opset(ASTXVLL, r0) 1655 opset(ASTXVX, r0) 1656 1657 case ALXSDX: /* lxsdx */ 1658 opset(ALXSDX, r0) 1659 1660 case ASTXSDX: /* stxsdx */ 1661 opset(ASTXSDX, r0) 1662 1663 case ALXSIWAX: /* lxsiwax, lxsiwzx */ 1664 opset(ALXSIWZX, r0) 1665 1666 case ASTXSIWX: /* stxsiwx */ 1667 opset(ASTXSIWX, r0) 1668 1669 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */ 1670 opset(AMFFPRD, r0) 1671 opset(AMFVRD, r0) 1672 opset(AMFVSRWZ, r0) 1673 opset(AMFVSRLD, r0) 1674 1675 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */ 1676 opset(AMTFPRD, r0) 1677 opset(AMTVRD, r0) 1678 opset(AMTVSRWA, r0) 1679 opset(AMTVSRWZ, r0) 1680 opset(AMTVSRWS, r0) 1681 1682 case AXXLAND: 1683 opset(AXXLANDC, r0) 1684 opset(AXXLEQV, r0) 1685 opset(AXXLNAND, r0) 1686 opset(AXXLORC, r0) 1687 opset(AXXLNOR, r0) 1688 opset(AXXLORQ, r0) 1689 opset(AXXLXOR, r0) 1690 opset(AXXLOR, r0) 1691 opset(AXSMAXJDP, r0) 1692 opset(AXSMINJDP, r0) 1693 1694 case AXXSEL: /* xxsel */ 1695 opset(AXXSEL, r0) 1696 1697 case AXXMRGHW: /* xxmrghw, xxmrglw */ 1698 opset(AXXMRGLW, r0) 1699 1700 case AXXSPLTW: /* xxspltw */ 1701 opset(AXXSPLTW, r0) 1702 1703 case AXXSPLTIB: /* xxspltib */ 1704 opset(AXXSPLTIB, r0) 1705 1706 case AXXPERM: /* xxpermdi */ 1707 opset(AXXPERM, r0) 1708 1709 case AXXSLDWI: /* xxsldwi */ 1710 opset(AXXPERMDI, r0) 1711 opset(AXXSLDWI, r0) 1712 1713 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */ 1714 opset(AXXBRD, r0) 1715 opset(AXXBRW, r0) 1716 opset(AXXBRH, r0) 1717 1718 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */ 1719 opset(AXSCVSPDP, r0) 1720 opset(AXSCVDPSPN, r0) 1721 opset(AXSCVSPDPN, r0) 1722 1723 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */ 1724 opset(AXVCVSPDP, r0) 1725 1726 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */ 1727 opset(AXSCVDPSXWS, r0) 1728 opset(AXSCVDPUXDS, r0) 1729 opset(AXSCVDPUXWS, r0) 1730 1731 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */ 1732 opset(AXSCVUXDDP, r0) 1733 opset(AXSCVSXDSP, r0) 1734 opset(AXSCVUXDSP, r0) 1735 1736 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */ 1737 opset(AXVCVDPSXDS, r0) 1738 opset(AXVCVDPSXWS, r0) 1739 opset(AXVCVDPUXDS, r0) 1740 opset(AXVCVDPUXWS, r0) 1741 opset(AXVCVSPSXDS, r0) 1742 opset(AXVCVSPSXWS, r0) 1743 opset(AXVCVSPUXDS, r0) 1744 opset(AXVCVSPUXWS, r0) 1745 1746 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */ 1747 opset(AXVCVSXWDP, r0) 1748 opset(AXVCVUXDDP, r0) 1749 opset(AXVCVUXWDP, r0) 1750 opset(AXVCVSXDSP, r0) 1751 opset(AXVCVSXWSP, r0) 1752 opset(AXVCVUXDSP, r0) 1753 opset(AXVCVUXWSP, r0) 1754 1755 case AAND: /* logical op Rb,Rs,Ra; no literal */ 1756 opset(AANDN, r0) 1757 opset(AANDNCC, r0) 1758 opset(AEQV, r0) 1759 opset(AEQVCC, r0) 1760 opset(ANAND, r0) 1761 opset(ANANDCC, r0) 1762 opset(ANOR, r0) 1763 opset(ANORCC, r0) 1764 opset(AORCC, r0) 1765 opset(AORN, r0) 1766 opset(AORNCC, r0) 1767 opset(AXORCC, r0) 1768 1769 case AADDME: /* op Ra, Rd */ 1770 opset(AADDMECC, r0) 1771 1772 opset(AADDMEV, r0) 1773 opset(AADDMEVCC, r0) 1774 opset(AADDZE, r0) 1775 opset(AADDZECC, r0) 1776 opset(AADDZEV, r0) 1777 opset(AADDZEVCC, r0) 1778 opset(ASUBME, r0) 1779 opset(ASUBMECC, r0) 1780 opset(ASUBMEV, r0) 1781 opset(ASUBMEVCC, r0) 1782 opset(ASUBZE, r0) 1783 opset(ASUBZECC, r0) 1784 opset(ASUBZEV, r0) 1785 opset(ASUBZEVCC, r0) 1786 1787 case AADDC: 1788 opset(AADDCCC, r0) 1789 1790 case ABEQ: 1791 opset(ABGE, r0) 1792 opset(ABGT, r0) 1793 opset(ABLE, r0) 1794 opset(ABLT, r0) 1795 opset(ABNE, r0) 1796 opset(ABVC, r0) 1797 opset(ABVS, r0) 1798 1799 case ABR: 1800 opset(ABL, r0) 1801 1802 case ABC: 1803 opset(ABCL, r0) 1804 1805 case ABDNZ: 1806 opset(ABDZ, r0) 1807 1808 case AEXTSB: /* op Rs, Ra */ 1809 opset(AEXTSBCC, r0) 1810 1811 opset(AEXTSH, r0) 1812 opset(AEXTSHCC, r0) 1813 opset(ACNTLZW, r0) 1814 opset(ACNTLZWCC, r0) 1815 opset(ACNTLZD, r0) 1816 opset(AEXTSW, r0) 1817 opset(AEXTSWCC, r0) 1818 opset(ACNTLZDCC, r0) 1819 1820 case AFABS: /* fop [s,]d */ 1821 opset(AFABSCC, r0) 1822 1823 opset(AFNABS, r0) 1824 opset(AFNABSCC, r0) 1825 opset(AFNEG, r0) 1826 opset(AFNEGCC, r0) 1827 opset(AFRSP, r0) 1828 opset(AFRSPCC, r0) 1829 opset(AFCTIW, r0) 1830 opset(AFCTIWCC, r0) 1831 opset(AFCTIWZ, r0) 1832 opset(AFCTIWZCC, r0) 1833 opset(AFCTID, r0) 1834 opset(AFCTIDCC, r0) 1835 opset(AFCTIDZ, r0) 1836 opset(AFCTIDZCC, r0) 1837 opset(AFCFID, r0) 1838 opset(AFCFIDCC, r0) 1839 opset(AFCFIDU, r0) 1840 opset(AFCFIDUCC, r0) 1841 opset(AFCFIDS, r0) 1842 opset(AFCFIDSCC, r0) 1843 opset(AFRES, r0) 1844 opset(AFRESCC, r0) 1845 opset(AFRIM, r0) 1846 opset(AFRIMCC, r0) 1847 opset(AFRIP, r0) 1848 opset(AFRIPCC, r0) 1849 opset(AFRIZ, r0) 1850 opset(AFRIZCC, r0) 1851 opset(AFRIN, r0) 1852 opset(AFRINCC, r0) 1853 opset(AFRSQRTE, r0) 1854 opset(AFRSQRTECC, r0) 1855 opset(AFSQRT, r0) 1856 opset(AFSQRTCC, r0) 1857 opset(AFSQRTS, r0) 1858 opset(AFSQRTSCC, r0) 1859 1860 case AFADD: 1861 opset(AFADDS, r0) 1862 opset(AFADDCC, r0) 1863 opset(AFADDSCC, r0) 1864 opset(AFCPSGN, r0) 1865 opset(AFCPSGNCC, r0) 1866 opset(AFDIV, r0) 1867 opset(AFDIVS, r0) 1868 opset(AFDIVCC, r0) 1869 opset(AFDIVSCC, r0) 1870 opset(AFSUB, r0) 1871 opset(AFSUBS, r0) 1872 opset(AFSUBCC, r0) 1873 opset(AFSUBSCC, r0) 1874 1875 case AFMADD: 1876 opset(AFMADDCC, r0) 1877 opset(AFMADDS, r0) 1878 opset(AFMADDSCC, r0) 1879 opset(AFMSUB, r0) 1880 opset(AFMSUBCC, r0) 1881 opset(AFMSUBS, r0) 1882 opset(AFMSUBSCC, r0) 1883 opset(AFNMADD, r0) 1884 opset(AFNMADDCC, r0) 1885 opset(AFNMADDS, r0) 1886 opset(AFNMADDSCC, r0) 1887 opset(AFNMSUB, r0) 1888 opset(AFNMSUBCC, r0) 1889 opset(AFNMSUBS, r0) 1890 opset(AFNMSUBSCC, r0) 1891 opset(AFSEL, r0) 1892 opset(AFSELCC, r0) 1893 1894 case AFMUL: 1895 opset(AFMULS, r0) 1896 opset(AFMULCC, r0) 1897 opset(AFMULSCC, r0) 1898 1899 case AFCMPO: 1900 opset(AFCMPU, r0) 1901 1902 case AMTFSB0: 1903 opset(AMTFSB0CC, r0) 1904 opset(AMTFSB1, r0) 1905 opset(AMTFSB1CC, r0) 1906 1907 case ANEG: /* op [Ra,] Rd */ 1908 opset(ANEGCC, r0) 1909 1910 opset(ANEGV, r0) 1911 opset(ANEGVCC, r0) 1912 1913 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */ 1914 opset(AXOR, r0) 1915 1916 case AORIS: /* oris/xoris $uimm,Rs,Ra */ 1917 opset(AXORIS, r0) 1918 1919 case ASLW: 1920 opset(ASLWCC, r0) 1921 opset(ASRW, r0) 1922 opset(ASRWCC, r0) 1923 opset(AROTLW, r0) 1924 1925 case ASLD: 1926 opset(ASLDCC, r0) 1927 opset(ASRD, r0) 1928 opset(ASRDCC, r0) 1929 opset(AROTL, r0) 1930 1931 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */ 1932 opset(ASRAWCC, r0) 1933 1934 case AEXTSWSLI: 1935 opset(AEXTSWSLICC, r0) 1936 1937 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */ 1938 opset(ASRADCC, r0) 1939 1940 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */ 1941 opset(ASUB, r0) 1942 1943 opset(ASUBCC, r0) 1944 opset(ASUBV, r0) 1945 opset(ASUBVCC, r0) 1946 opset(ASUBCCC, r0) 1947 opset(ASUBCV, r0) 1948 opset(ASUBCVCC, r0) 1949 opset(ASUBE, r0) 1950 opset(ASUBECC, r0) 1951 opset(ASUBEV, r0) 1952 opset(ASUBEVCC, r0) 1953 1954 case ASYNC: 1955 opset(AISYNC, r0) 1956 opset(ALWSYNC, r0) 1957 opset(APTESYNC, r0) 1958 opset(ATLBSYNC, r0) 1959 1960 case ARLWNM: 1961 opset(ARLWNMCC, r0) 1962 opset(ARLWMI, r0) 1963 opset(ARLWMICC, r0) 1964 1965 case ARLDMI: 1966 opset(ARLDMICC, r0) 1967 opset(ARLDIMI, r0) 1968 opset(ARLDIMICC, r0) 1969 1970 case ARLDC: 1971 opset(ARLDCCC, r0) 1972 1973 case ARLDCL: 1974 opset(ARLDCR, r0) 1975 opset(ARLDCLCC, r0) 1976 opset(ARLDCRCC, r0) 1977 1978 case ARLDICL: 1979 opset(ARLDICLCC, r0) 1980 opset(ARLDICR, r0) 1981 opset(ARLDICRCC, r0) 1982 opset(ARLDIC, r0) 1983 opset(ARLDICCC, r0) 1984 opset(ACLRLSLDI, r0) 1985 1986 case AFMOVD: 1987 opset(AFMOVDCC, r0) 1988 opset(AFMOVDU, r0) 1989 opset(AFMOVS, r0) 1990 opset(AFMOVSU, r0) 1991 1992 case ALDAR: 1993 opset(ALBAR, r0) 1994 opset(ALHAR, r0) 1995 opset(ALWAR, r0) 1996 1997 case ASYSCALL: /* just the op; flow of control */ 1998 opset(ARFI, r0) 1999 2000 opset(ARFCI, r0) 2001 opset(ARFID, r0) 2002 opset(AHRFID, r0) 2003 2004 case AMOVHBR: 2005 opset(AMOVWBR, r0) 2006 opset(AMOVDBR, r0) 2007 2008 case ASLBMFEE: 2009 opset(ASLBMFEV, r0) 2010 2011 case ATW: 2012 opset(ATD, r0) 2013 2014 case ATLBIE: 2015 opset(ASLBIE, r0) 2016 opset(ATLBIEL, r0) 2017 2018 case AEIEIO: 2019 opset(ASLBIA, r0) 2020 2021 case ACMP: 2022 opset(ACMPW, r0) 2023 2024 case ACMPU: 2025 opset(ACMPWU, r0) 2026 2027 case ACMPB: 2028 opset(ACMPB, r0) 2029 2030 case AFTDIV: 2031 opset(AFTDIV, r0) 2032 2033 case AFTSQRT: 2034 opset(AFTSQRT, r0) 2035 2036 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */ 2037 opset(AMOVWZ, r0) /* Same as above, but zero extended */ 2038 2039 case AVCLZLSBB: 2040 opset(AVCTZLSBB, r0) 2041 2042 case AADD, 2043 AADDIS, 2044 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */ 2045 AANDISCC, 2046 AFMOVSX, 2047 AFMOVSZ, 2048 ALSW, 2049 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */ 2050 AMOVB, /* macro: move byte with sign extension */ 2051 AMOVBU, /* macro: move byte with sign extension & update */ 2052 AMOVFL, 2053 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */ 2054 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */ 2055 ASTSW, 2056 ASLBMTE, 2057 AWORD, 2058 ADWORD, 2059 ADARN, 2060 AVMSUMUDM, 2061 AADDEX, 2062 ACMPEQB, 2063 ACLRLSLWI, 2064 AMTVSRDD, 2065 APNOP, 2066 AISEL, 2067 ASETB, 2068 obj.ANOP, 2069 obj.ATEXT, 2070 obj.AUNDEF, 2071 obj.AFUNCDATA, 2072 obj.APCALIGN, 2073 obj.APCDATA, 2074 obj.ADUFFZERO, 2075 obj.ADUFFCOPY: 2076 break 2077 } 2078 } 2079} 2080 2081func OPVXX1(o uint32, xo uint32, oe uint32) uint32 { 2082 return o<<26 | xo<<1 | oe<<11 2083} 2084 2085func OPVXX2(o uint32, xo uint32, oe uint32) uint32 { 2086 return o<<26 | xo<<2 | oe<<11 2087} 2088 2089func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 { 2090 return o<<26 | xo<<2 | oe<<16 2091} 2092 2093func OPVXX3(o uint32, xo uint32, oe uint32) uint32 { 2094 return o<<26 | xo<<3 | oe<<11 2095} 2096 2097func OPVXX4(o uint32, xo uint32, oe uint32) uint32 { 2098 return o<<26 | xo<<4 | oe<<11 2099} 2100 2101func OPDQ(o uint32, xo uint32, oe uint32) uint32 { 2102 return o<<26 | xo | oe<<4 2103} 2104 2105func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 { 2106 return o<<26 | xo | oe<<11 | rc&1 2107} 2108 2109func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 { 2110 return o<<26 | xo | oe<<11 | (rc&1)<<10 2111} 2112 2113func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 { 2114 return o<<26 | xo<<1 | oe<<10 | rc&1 2115} 2116 2117func OPCC(o uint32, xo uint32, rc uint32) uint32 { 2118 return OPVCC(o, xo, 0, rc) 2119} 2120 2121/* Generate MD-form opcode */ 2122func OPMD(o, xo, rc uint32) uint32 { 2123 return o<<26 | xo<<2 | rc&1 2124} 2125 2126/* the order is dest, a/s, b/imm for both arithmetic and logical operations. */ 2127func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 { 2128 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 2129} 2130 2131/* VX-form 2-register operands, r/none/r */ 2132func AOP_RR(op uint32, d uint32, a uint32) uint32 { 2133 return op | (d&31)<<21 | (a&31)<<11 2134} 2135 2136/* VA-form 4-register operands */ 2137func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 { 2138 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6 2139} 2140 2141func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 { 2142 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF 2143} 2144 2145/* VX-form 2-register + UIM operands */ 2146func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 { 2147 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11 2148} 2149 2150/* VX-form 2-register + ST + SIX operands */ 2151func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 { 2152 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11 2153} 2154 2155/* VA-form 3-register + SHB operands */ 2156func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 { 2157 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6 2158} 2159 2160/* VX-form 1-register + SIM operands */ 2161func AOP_IR(op uint32, d uint32, simm uint32) uint32 { 2162 return op | (d&31)<<21 | (simm&31)<<16 2163} 2164 2165/* XX1-form 3-register operands, 1 VSR operand */ 2166func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 { 2167 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5 2168} 2169 2170/* XX2-form 3-register operands, 2 VSR operands */ 2171func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 { 2172 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5 2173} 2174 2175/* XX3-form 3 VSR operands */ 2176func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 { 2177 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5 2178} 2179 2180/* XX3-form 3 VSR operands + immediate */ 2181func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 { 2182 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5 2183} 2184 2185/* XX4-form, 4 VSR operands */ 2186func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 { 2187 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5 2188} 2189 2190/* DQ-form, VSR register, register + offset operands */ 2191func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 { 2192 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */ 2193 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */ 2194 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */ 2195 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */ 2196 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */ 2197 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */ 2198 dq := b >> 4 2199 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2 2200} 2201 2202/* Z23-form, 3-register operands + CY field */ 2203func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 { 2204 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9 2205} 2206 2207/* X-form, 3-register operands + EH field */ 2208func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 { 2209 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1) 2210} 2211 2212func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 { 2213 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11 2214} 2215 2216func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 { 2217 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF 2218} 2219 2220func OP_BR(op uint32, li uint32, aa uint32) uint32 { 2221 return op | li&0x03FFFFFC | aa<<1 2222} 2223 2224func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 { 2225 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1 2226} 2227 2228func OP_BCR(op uint32, bo uint32, bi uint32) uint32 { 2229 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 2230} 2231 2232func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 { 2233 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1 2234} 2235 2236func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 { 2237 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 2238} 2239 2240func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 { 2241 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6 2242} 2243 2244/* MD-form 2-register, 2 6-bit immediate operands */ 2245func AOP_MD(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 { 2246 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5 2247} 2248 2249/* MDS-form 3-register, 1 6-bit immediate operands. rsh argument is a register. */ 2250func AOP_MDS(op, to, from, rsh, m uint32) uint32 { 2251 return AOP_MD(op, to, from, rsh&31, m) 2252} 2253 2254func AOP_PFX_00_8LS(r, ie uint32) uint32 { 2255 return 1<<26 | 0<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF) 2256} 2257func AOP_PFX_10_MLS(r, ie uint32) uint32 { 2258 return 1<<26 | 2<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF) 2259} 2260 2261const ( 2262 /* each rhs is OPVCC(_, _, _, _) */ 2263 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0 2264 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0 2265 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0 2266 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0 2267 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0 2268 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0 2269 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0 2270 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0 2271 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0 2272 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0 2273 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0 2274 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0 2275 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0 2276 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0 2277 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0 2278 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0 2279 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0 2280 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0 2281 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0 2282 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0 2283 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0 2284 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0 2285 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0 2286 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0 2287 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0 2288 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0 2289 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0 2290 OP_XORI = 26<<26 | 0<<1 | 0<<10 | 0 2291 OP_XORIS = 27<<26 | 0<<1 | 0<<10 | 0 2292 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0 2293 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0 2294 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0 2295 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0 2296 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0 2297 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0 2298 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0 2299 OP_EXTSWSLI = 31<<26 | 445<<2 2300 OP_SETB = 31<<26 | 128<<1 2301) 2302 2303func pfxadd(rt, ra int16, r uint32, imm32 int64) (uint32, uint32) { 2304 return AOP_PFX_10_MLS(r, uint32(imm32>>16)), AOP_IRR(14<<26, uint32(rt), uint32(ra), uint32(imm32)) 2305} 2306 2307func pfxload(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) { 2308 switch a { 2309 case AMOVH: 2310 return AOP_PFX_10_MLS(r, 0), AOP_IRR(42<<26, uint32(reg), uint32(base), 0) 2311 case AMOVW: 2312 return AOP_PFX_00_8LS(r, 0), AOP_IRR(41<<26, uint32(reg), uint32(base), 0) 2313 case AMOVD: 2314 return AOP_PFX_00_8LS(r, 0), AOP_IRR(57<<26, uint32(reg), uint32(base), 0) 2315 case AMOVBZ, AMOVB: 2316 return AOP_PFX_10_MLS(r, 0), AOP_IRR(34<<26, uint32(reg), uint32(base), 0) 2317 case AMOVHZ: 2318 return AOP_PFX_10_MLS(r, 0), AOP_IRR(40<<26, uint32(reg), uint32(base), 0) 2319 case AMOVWZ: 2320 return AOP_PFX_10_MLS(r, 0), AOP_IRR(32<<26, uint32(reg), uint32(base), 0) 2321 case AFMOVS: 2322 return AOP_PFX_10_MLS(r, 0), AOP_IRR(48<<26, uint32(reg), uint32(base), 0) 2323 case AFMOVD: 2324 return AOP_PFX_10_MLS(r, 0), AOP_IRR(50<<26, uint32(reg), uint32(base), 0) 2325 } 2326 log.Fatalf("Error no pfxload for %v\n", a) 2327 return 0, 0 2328} 2329 2330func pfxstore(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) { 2331 switch a { 2332 case AMOVD: 2333 return AOP_PFX_00_8LS(r, 0), AOP_IRR(61<<26, uint32(reg), uint32(base), 0) 2334 case AMOVBZ, AMOVB: 2335 return AOP_PFX_10_MLS(r, 0), AOP_IRR(38<<26, uint32(reg), uint32(base), 0) 2336 case AMOVHZ, AMOVH: 2337 return AOP_PFX_10_MLS(r, 0), AOP_IRR(44<<26, uint32(reg), uint32(base), 0) 2338 case AMOVWZ, AMOVW: 2339 return AOP_PFX_10_MLS(r, 0), AOP_IRR(36<<26, uint32(reg), uint32(base), 0) 2340 case AFMOVS: 2341 return AOP_PFX_10_MLS(r, 0), AOP_IRR(52<<26, uint32(reg), uint32(base), 0) 2342 case AFMOVD: 2343 return AOP_PFX_10_MLS(r, 0), AOP_IRR(54<<26, uint32(reg), uint32(base), 0) 2344 } 2345 log.Fatalf("Error no pfxstore for %v\n", a) 2346 return 0, 0 2347} 2348 2349func oclass(a *obj.Addr) int { 2350 return int(a.Class) - 1 2351} 2352 2353const ( 2354 D_FORM = iota 2355 DS_FORM 2356) 2357 2358// This function determines when a non-indexed load or store is D or 2359// DS form for use in finding the size of the offset field in the instruction. 2360// The size is needed when setting the offset value in the instruction 2361// and when generating relocation for that field. 2362// DS form instructions include: ld, ldu, lwa, std, stdu. All other 2363// loads and stores with an offset field are D form. This function should 2364// only be called with the same opcodes as are handled by opstore and opload. 2365func (c *ctxt9) opform(insn uint32) int { 2366 switch insn { 2367 default: 2368 c.ctxt.Diag("bad insn in loadform: %x", insn) 2369 case OPVCC(58, 0, 0, 0), // ld 2370 OPVCC(58, 0, 0, 1), // ldu 2371 OPVCC(58, 0, 0, 0) | 1<<1, // lwa 2372 OPVCC(62, 0, 0, 0), // std 2373 OPVCC(62, 0, 0, 1): //stdu 2374 return DS_FORM 2375 case OP_ADDI, // add 2376 OPVCC(32, 0, 0, 0), // lwz 2377 OPVCC(33, 0, 0, 0), // lwzu 2378 OPVCC(34, 0, 0, 0), // lbz 2379 OPVCC(35, 0, 0, 0), // lbzu 2380 OPVCC(40, 0, 0, 0), // lhz 2381 OPVCC(41, 0, 0, 0), // lhzu 2382 OPVCC(42, 0, 0, 0), // lha 2383 OPVCC(43, 0, 0, 0), // lhau 2384 OPVCC(46, 0, 0, 0), // lmw 2385 OPVCC(48, 0, 0, 0), // lfs 2386 OPVCC(49, 0, 0, 0), // lfsu 2387 OPVCC(50, 0, 0, 0), // lfd 2388 OPVCC(51, 0, 0, 0), // lfdu 2389 OPVCC(36, 0, 0, 0), // stw 2390 OPVCC(37, 0, 0, 0), // stwu 2391 OPVCC(38, 0, 0, 0), // stb 2392 OPVCC(39, 0, 0, 0), // stbu 2393 OPVCC(44, 0, 0, 0), // sth 2394 OPVCC(45, 0, 0, 0), // sthu 2395 OPVCC(47, 0, 0, 0), // stmw 2396 OPVCC(52, 0, 0, 0), // stfs 2397 OPVCC(53, 0, 0, 0), // stfsu 2398 OPVCC(54, 0, 0, 0), // stfd 2399 OPVCC(55, 0, 0, 0): // stfdu 2400 return D_FORM 2401 } 2402 return 0 2403} 2404 2405// Encode instructions and create relocation for accessing s+d according to the 2406// instruction op with source or destination (as appropriate) register reg. 2407func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32, rel *obj.Reloc) { 2408 if c.ctxt.Headtype == objabi.Haix { 2409 // Every symbol access must be made via a TOC anchor. 2410 c.ctxt.Diag("symbolAccess called for %s", s.Name) 2411 } 2412 var base uint32 2413 form := c.opform(op) 2414 if c.ctxt.Flag_shared { 2415 base = REG_R2 2416 } else { 2417 base = REG_R0 2418 } 2419 // If reg can be reused when computing the symbol address, 2420 // use it instead of REGTMP. 2421 if !reuse { 2422 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0) 2423 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0) 2424 } else { 2425 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0) 2426 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0) 2427 } 2428 rel = obj.Addrel(c.cursym) 2429 rel.Off = int32(c.pc) 2430 rel.Siz = 8 2431 rel.Sym = s 2432 rel.Add = d 2433 if c.ctxt.Flag_shared { 2434 switch form { 2435 case D_FORM: 2436 rel.Type = objabi.R_ADDRPOWER_TOCREL 2437 case DS_FORM: 2438 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS 2439 } 2440 2441 } else { 2442 switch form { 2443 case D_FORM: 2444 rel.Type = objabi.R_ADDRPOWER 2445 case DS_FORM: 2446 rel.Type = objabi.R_ADDRPOWER_DS 2447 } 2448 } 2449 return 2450} 2451 2452// Determine the mask begin (mb) and mask end (me) values 2453// for a valid word rotate mask. A valid 32 bit mask is of 2454// the form 1+0*1+ or 0*1+0*. 2455// 2456// Note, me is inclusive. 2457func decodeMask32(mask uint32) (mb, me uint32, valid bool) { 2458 mb = uint32(bits.LeadingZeros32(mask)) 2459 me = uint32(32 - bits.TrailingZeros32(mask)) 2460 mbn := uint32(bits.LeadingZeros32(^mask)) 2461 men := uint32(32 - bits.TrailingZeros32(^mask)) 2462 // Check for a wrapping mask (e.g bits at 0 and 31) 2463 if mb == 0 && me == 32 { 2464 // swap the inverted values 2465 mb, me = men, mbn 2466 } 2467 2468 // Validate mask is of the binary form 1+0*1+ or 0*1+0* 2469 // Isolate rightmost 1 (if none 0) and add. 2470 v := mask 2471 vp := (v & -v) + v 2472 // Likewise, check for the wrapping (inverted) case. 2473 vn := ^v 2474 vpn := (vn & -vn) + vn 2475 return mb, (me - 1) & 31, (v&vp == 0 || vn&vpn == 0) && v != 0 2476} 2477 2478// Decompose a mask of contiguous bits into a begin (mb) and 2479// end (me) value. 2480// 2481// 64b mask values cannot wrap on any valid PPC64 instruction. 2482// Only masks of the form 0*1+0* are valid. 2483// 2484// Note, me is inclusive. 2485func decodeMask64(mask int64) (mb, me uint32, valid bool) { 2486 m := uint64(mask) 2487 mb = uint32(bits.LeadingZeros64(m)) 2488 me = uint32(64 - bits.TrailingZeros64(m)) 2489 valid = ((m&-m)+m)&m == 0 && m != 0 2490 return mb, (me - 1) & 63, valid 2491} 2492 2493// Load the lower 16 bits of a constant into register r. 2494func loadl16(r int, d int64) uint32 { 2495 v := uint16(d) 2496 if v == 0 { 2497 // Avoid generating "ori r,r,0", r != 0. Instead, generate the architectually preferred nop. 2498 // For example, "ori r31,r31,0" is a special execution serializing nop on Power10 called "exser". 2499 return NOP 2500 } 2501 return LOP_IRR(OP_ORI, uint32(r), uint32(r), uint32(v)) 2502} 2503 2504// Load the upper 16 bits of a 32b constant into register r. 2505func loadu32(r int, d int64) uint32 { 2506 v := int32(d >> 16) 2507 if isuint32(uint64(d)) { 2508 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v)) 2509 } 2510 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v)) 2511} 2512 2513func high16adjusted(d int32) uint16 { 2514 if d&0x8000 != 0 { 2515 return uint16((d >> 16) + 1) 2516 } 2517 return uint16(d >> 16) 2518} 2519 2520func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) { 2521 o1 := uint32(0) 2522 o2 := uint32(0) 2523 o3 := uint32(0) 2524 o4 := uint32(0) 2525 o5 := uint32(0) 2526 2527 //print("%v => case %d\n", p, o->type); 2528 switch o.type_ { 2529 default: 2530 c.ctxt.Diag("unknown type %d", o.type_) 2531 prasm(p) 2532 2533 case 0: /* pseudo ops */ 2534 break 2535 2536 case 2: /* int/cr/fp op Rb,[Ra],Rd */ 2537 r := int(p.Reg) 2538 2539 if r == 0 { 2540 r = int(p.To.Reg) 2541 } 2542 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg)) 2543 2544 case 3: /* mov $soreg/16con, r ==> addi/ori $i,reg',r */ 2545 d := c.vregoff(&p.From) 2546 2547 v := int32(d) 2548 r := int(p.From.Reg) 2549 2550 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) { 2551 c.ctxt.Diag("literal operation on R0\n%v", p) 2552 } 2553 if int64(int16(d)) == d { 2554 // MOVD $int16, Ry or MOVD $offset(Rx), Ry 2555 o1 = AOP_IRR(uint32(OP_ADDI), uint32(p.To.Reg), uint32(r), uint32(v)) 2556 } else { 2557 // MOVD $uint16, Ry 2558 if int64(uint16(d)) != d || (r != 0 && r != REGZERO) { 2559 c.ctxt.Diag("Rule expects a uint16 constant load. got:\n%v", p) 2560 } 2561 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v)) 2562 } 2563 2564 case 4: /* add/mul $scon,[r1],r2 */ 2565 v := c.regoff(&p.From) 2566 2567 r := int(p.Reg) 2568 if r == 0 { 2569 r = int(p.To.Reg) 2570 } 2571 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 { 2572 c.ctxt.Diag("literal operation on R0\n%v", p) 2573 } 2574 if int32(int16(v)) != v { 2575 log.Fatalf("mishandled instruction %v", p) 2576 } 2577 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) 2578 2579 case 5: /* syscall */ 2580 o1 = c.oprrr(p.As) 2581 2582 case 6: /* logical op Rb,[Rs,]Ra; no literal */ 2583 r := int(p.Reg) 2584 2585 if r == 0 { 2586 r = int(p.To.Reg) 2587 } 2588 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM. 2589 switch p.As { 2590 case AROTL: 2591 o1 = AOP_MD(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0)) 2592 case AROTLW: 2593 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31) 2594 default: 2595 if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 { 2596 // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred 2597 // hardware no-op. This happens because $0 matches C_REG before C_ZCON. 2598 o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0) 2599 } else { 2600 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg)) 2601 } 2602 } 2603 2604 case 7: /* mov r, soreg ==> stw o(r) */ 2605 r := int(p.To.Reg) 2606 v := c.regoff(&p.To) 2607 if int32(int16(v)) != v { 2608 log.Fatalf("mishandled instruction %v", p) 2609 } 2610 // Offsets in DS form stores must be a multiple of 4 2611 inst := c.opstore(p.As) 2612 if c.opform(inst) == DS_FORM && v&0x3 != 0 { 2613 log.Fatalf("invalid offset for DS form load/store %v", p) 2614 } 2615 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v)) 2616 2617 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */ 2618 r := int(p.From.Reg) 2619 v := c.regoff(&p.From) 2620 if int32(int16(v)) != v { 2621 log.Fatalf("mishandled instruction %v", p) 2622 } 2623 // Offsets in DS form loads must be a multiple of 4 2624 inst := c.opload(p.As) 2625 if c.opform(inst) == DS_FORM && v&0x3 != 0 { 2626 log.Fatalf("invalid offset for DS form load/store %v", p) 2627 } 2628 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v)) 2629 2630 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4). 2631 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) 2632 2633 case 9: /* RLDC Ra, $sh, $mb, Rb */ 2634 sh := uint32(p.RestArgs[0].Addr.Offset) & 0x3F 2635 mb := uint32(p.RestArgs[1].Addr.Offset) & 0x3F 2636 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), (uint32(sh) & 0x1F)) 2637 o1 |= (sh & 0x20) >> 4 // sh[5] is placed in bit 1. 2638 o1 |= (mb & 0x1F) << 6 // mb[0:4] is placed in bits 6-10. 2639 o1 |= (mb & 0x20) // mb[5] is placed in bit 5 2640 2641 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */ 2642 r := int(p.Reg) 2643 2644 if r == 0 { 2645 r = int(p.To.Reg) 2646 } 2647 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r)) 2648 2649 case 11: /* br/bl bra */ 2650 v := int32(0) 2651 2652 if p.To.Target() != nil { 2653 v = int32(p.To.Target().Pc - p.Pc) 2654 if v&03 != 0 { 2655 c.ctxt.Diag("odd branch target address\n%v", p) 2656 v &^= 03 2657 } 2658 2659 if v < -(1<<25) || v >= 1<<24 { 2660 c.ctxt.Diag("branch too far\n%v", p) 2661 } 2662 } 2663 2664 o1 = OP_BR(c.opirr(p.As), uint32(v), 0) 2665 if p.To.Sym != nil { 2666 rel := obj.Addrel(c.cursym) 2667 rel.Off = int32(c.pc) 2668 rel.Siz = 4 2669 rel.Sym = p.To.Sym 2670 v += int32(p.To.Offset) 2671 if v&03 != 0 { 2672 c.ctxt.Diag("odd branch target address\n%v", p) 2673 v &^= 03 2674 } 2675 2676 rel.Add = int64(v) 2677 rel.Type = objabi.R_CALLPOWER 2678 } 2679 o2 = NOP // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking 2680 2681 case 13: /* mov[bhwd]{z,} r,r */ 2682 // This needs to handle "MOV* $0, Rx". This shows up because $0 also 2683 // matches C_REG if r0iszero. This happens because C_REG sorts before C_U16CON 2684 // TODO: fix the above behavior and cleanup this exception. 2685 if p.From.Type == obj.TYPE_CONST { 2686 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0) 2687 break 2688 } 2689 if p.To.Type == obj.TYPE_CONST { 2690 c.ctxt.Diag("cannot move into constant 0\n%v", p) 2691 } 2692 2693 switch p.As { 2694 case AMOVB: 2695 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0) 2696 case AMOVBZ: 2697 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31) 2698 case AMOVH: 2699 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0) 2700 case AMOVHZ: 2701 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31) 2702 case AMOVW: 2703 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0) 2704 case AMOVWZ: 2705 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */ 2706 case AMOVD: 2707 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg)) 2708 default: 2709 c.ctxt.Diag("internal: bad register move/truncation\n%v", p) 2710 } 2711 2712 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */ 2713 r := uint32(p.Reg) 2714 2715 if r == 0 { 2716 r = uint32(p.To.Reg) 2717 } 2718 d := c.vregoff(p.GetFrom3()) 2719 switch p.As { 2720 2721 // These opcodes expect a mask operand that has to be converted into the 2722 // appropriate operand. The way these were defined, not all valid masks are possible. 2723 // Left here for compatibility in case they were used or generated. 2724 case ARLDCL, ARLDCLCC: 2725 mb, me, valid := decodeMask64(d) 2726 if me != 63 || !valid { 2727 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p) 2728 } 2729 o1 = AOP_MDS(c.oprrr(p.As), uint32(p.To.Reg), r, uint32(p.From.Reg), mb) 2730 2731 case ARLDCR, ARLDCRCC: 2732 mb, me, valid := decodeMask64(d) 2733 if mb != 0 || !valid { 2734 c.ctxt.Diag("invalid mask for rotate: %x (start != 0)\n%v", uint64(d), p) 2735 } 2736 o1 = AOP_MDS(c.oprrr(p.As), uint32(p.To.Reg), r, uint32(p.From.Reg), me) 2737 2738 // These opcodes use a shift count like the ppc64 asm, no mask conversion done 2739 case ARLDICR, ARLDICRCC: 2740 me := uint32(d) 2741 sh := c.regoff(&p.From) 2742 if me < 0 || me > 63 || sh > 63 { 2743 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p) 2744 } 2745 o1 = AOP_MD(c.oprrr(p.As), uint32(p.To.Reg), r, uint32(sh), me) 2746 2747 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC: 2748 mb := uint32(d) 2749 sh := c.regoff(&p.From) 2750 if mb < 0 || mb > 63 || sh > 63 { 2751 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p) 2752 } 2753 o1 = AOP_MD(c.oprrr(p.As), uint32(p.To.Reg), r, uint32(sh), mb) 2754 2755 case ACLRLSLDI: 2756 // This is an extended mnemonic defined in the ISA section C.8.1 2757 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n 2758 // It maps onto RLDIC so is directly generated here based on the operands from 2759 // the clrlsldi. 2760 n := int32(d) 2761 b := c.regoff(&p.From) 2762 if n > b || b > 63 { 2763 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p) 2764 } 2765 o1 = AOP_MD(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n)) 2766 2767 default: 2768 c.ctxt.Diag("unexpected op in rldc case\n%v", p) 2769 } 2770 2771 case 16: /* bc bo,bi,bra */ 2772 a := 0 2773 2774 r := int(p.Reg) 2775 2776 if p.From.Type == obj.TYPE_CONST { 2777 a = int(c.regoff(&p.From)) 2778 } else if p.From.Type == obj.TYPE_REG { 2779 if r != 0 { 2780 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r) 2781 } 2782 // BI values for the CR 2783 switch p.From.Reg { 2784 case REG_CR0: 2785 r = BI_CR0 2786 case REG_CR1: 2787 r = BI_CR1 2788 case REG_CR2: 2789 r = BI_CR2 2790 case REG_CR3: 2791 r = BI_CR3 2792 case REG_CR4: 2793 r = BI_CR4 2794 case REG_CR5: 2795 r = BI_CR5 2796 case REG_CR6: 2797 r = BI_CR6 2798 case REG_CR7: 2799 r = BI_CR7 2800 default: 2801 c.ctxt.Diag("unrecognized register: expecting CR\n") 2802 } 2803 } 2804 v := int32(0) 2805 if p.To.Target() != nil { 2806 v = int32(p.To.Target().Pc - p.Pc) 2807 } 2808 if v&03 != 0 { 2809 c.ctxt.Diag("odd branch target address\n%v", p) 2810 v &^= 03 2811 } 2812 2813 if v < -(1<<16) || v >= 1<<15 { 2814 c.ctxt.Diag("branch too far\n%v", p) 2815 } 2816 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0) 2817 2818 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */ 2819 var v int32 2820 var bh uint32 = 0 2821 if p.As == ABC || p.As == ABCL { 2822 v = c.regoff(&p.From) & 31 2823 } else { 2824 v = 20 /* unconditional */ 2825 } 2826 r := int(p.Reg) 2827 if r == 0 { 2828 r = 0 2829 } 2830 switch oclass(&p.To) { 2831 case C_CTR: 2832 o1 = OPVCC(19, 528, 0, 0) 2833 2834 case C_LR: 2835 o1 = OPVCC(19, 16, 0, 0) 2836 2837 default: 2838 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p) 2839 v = 0 2840 } 2841 2842 // Insert optional branch hint for bclr[l]/bcctr[l] 2843 if p.From3Type() != obj.TYPE_NONE { 2844 bh = uint32(p.GetFrom3().Offset) 2845 if bh == 2 || bh > 3 { 2846 log.Fatalf("BH must be 0,1,3 for %v", p) 2847 } 2848 o1 |= bh << 11 2849 } 2850 2851 if p.As == ABL || p.As == ABCL { 2852 o1 |= 1 2853 } 2854 o1 = OP_BCR(o1, uint32(v), uint32(r)) 2855 2856 case 19: /* mov $lcon,r ==> cau+or */ 2857 d := c.vregoff(&p.From) 2858 if o.ispfx { 2859 o1, o2 = pfxadd(p.To.Reg, REG_R0, PFX_R_ABS, d) 2860 } else { 2861 o1 = loadu32(int(p.To.Reg), d) 2862 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d))) 2863 } 2864 2865 case 20: /* add $ucon,,r | addis $addcon,r,r */ 2866 v := c.regoff(&p.From) 2867 2868 r := int(p.Reg) 2869 if r == 0 { 2870 r = int(p.To.Reg) 2871 } 2872 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) 2873 2874 case 21: /* or $u32con,rx[,ry] => oris + ori (similar for xor) */ 2875 var opu, opl uint32 2876 r := uint32(p.Reg) 2877 if r == 0 { 2878 r = uint32(p.To.Reg) 2879 } 2880 switch p.As { 2881 case AOR: 2882 opu, opl = OP_ORIS, OP_ORI 2883 case AXOR: 2884 opu, opl = OP_XORIS, OP_XORI 2885 default: 2886 c.ctxt.Diag("unhandled opcode.\n%v", p) 2887 } 2888 o1 = LOP_IRR(opu, uint32(p.To.Reg), r, uint32(p.From.Offset>>16)) 2889 o2 = LOP_IRR(opl, uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.From.Offset)&0xFFFF) 2890 2891 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add, add $s34con,r1 ==> addis+ori+slw+ori+add */ 2892 if p.To.Reg == REGTMP || p.Reg == REGTMP { 2893 c.ctxt.Diag("can't synthesize large constant\n%v", p) 2894 } 2895 d := c.vregoff(&p.From) 2896 r := int(p.Reg) 2897 if r == 0 { 2898 r = int(p.To.Reg) 2899 } 2900 if p.From.Sym != nil { 2901 c.ctxt.Diag("%v is not supported", p) 2902 } 2903 if o.ispfx { 2904 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, d) 2905 } else if o.size == 8 { 2906 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d))) // tmp = uint16(d) 2907 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) // to = tmp + from 2908 } else if o.size == 12 { 2909 // Note, o1 is ADDIS if d is negative, ORIS otherwise. 2910 o1 = loadu32(REGTMP, d) // tmp = d & 0xFFFF0000 2911 o2 = loadl16(REGTMP, d) // tmp |= d & 0xFFFF 2912 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) // to = from + tmp 2913 } else { 2914 // For backwards compatibility with GOPPC64 < 10, generate 34b constants in register. 2915 o1 = LOP_IRR(OP_ADDIS, REGZERO, REGTMP, uint32(d>>32)) // tmp = sign_extend((d>>32)&0xFFFF0000) 2916 o2 = loadl16(REGTMP, int64(d>>16)) // tmp |= (d>>16)&0xFFFF 2917 o3 = AOP_MD(OP_RLDICR, REGTMP, REGTMP, 16, 63-16) // tmp <<= 16 2918 o4 = loadl16(REGTMP, int64(uint16(d))) // tmp |= d&0xFFFF 2919 o5 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) 2920 } 2921 2922 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */ 2923 if p.To.Reg == REGTMP || p.Reg == REGTMP { 2924 c.ctxt.Diag("can't synthesize large constant\n%v", p) 2925 } 2926 d := c.vregoff(&p.From) 2927 r := int(p.Reg) 2928 if r == 0 { 2929 r = int(p.To.Reg) 2930 } 2931 2932 // With S16CON operand, generate 2 instructions using ADDI for signed value, 2933 // with 32CON operand generate 3 instructions. 2934 if o.size == 8 { 2935 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d))) 2936 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) 2937 } else { 2938 o1 = loadu32(REGTMP, d) 2939 o2 = loadl16(REGTMP, d) 2940 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) 2941 } 2942 if p.From.Sym != nil { 2943 c.ctxt.Diag("%v is not supported", p) 2944 } 2945 2946 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */ 2947 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0)) 2948 // This is needed for -0. 2949 if o.size == 8 { 2950 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg)) 2951 } 2952 2953 case 25: 2954 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */ 2955 v := c.regoff(&p.From) 2956 2957 if v < 0 { 2958 v = 0 2959 } else if v > 63 { 2960 v = 63 2961 } 2962 r := int(p.Reg) 2963 if r == 0 { 2964 r = int(p.To.Reg) 2965 } 2966 var a int 2967 op := uint32(0) 2968 switch p.As { 2969 case ASLD, ASLDCC: 2970 a = int(63 - v) 2971 op = OP_RLDICR 2972 2973 case ASRD, ASRDCC: 2974 a = int(v) 2975 v = 64 - v 2976 op = OP_RLDICL 2977 case AROTL: 2978 a = int(0) 2979 op = OP_RLDICL 2980 case AEXTSWSLI, AEXTSWSLICC: 2981 a = int(v) 2982 default: 2983 c.ctxt.Diag("unexpected op in sldi case\n%v", p) 2984 a = 0 2985 o1 = 0 2986 } 2987 2988 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC { 2989 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v)) 2990 2991 } else { 2992 o1 = AOP_MD(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a)) 2993 } 2994 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC { 2995 o1 |= 1 // Set the condition code bit 2996 } 2997 2998 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */ 2999 v := c.vregoff(&p.From) 3000 r := int(p.From.Reg) 3001 var rel *obj.Reloc 3002 3003 switch p.From.Name { 3004 case obj.NAME_EXTERN, obj.NAME_STATIC: 3005 // Load a 32 bit constant, or relocation depending on if a symbol is attached 3006 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true) 3007 default: 3008 // Add a 32 bit offset to a register. 3009 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v)))) 3010 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v)) 3011 } 3012 3013 if o.ispfx { 3014 if rel == nil { 3015 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, v) 3016 } else { 3017 o1, o2 = pfxadd(int16(p.To.Reg), REG_R0, PFX_R_PCREL, 0) 3018 rel.Type = objabi.R_ADDRPOWER_PCREL34 3019 } 3020 } 3021 3022 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */ 3023 v := c.regoff(p.GetFrom3()) 3024 3025 r := int(p.From.Reg) 3026 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) 3027 3028 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */ 3029 if p.To.Reg == REGTMP || p.From.Reg == REGTMP { 3030 c.ctxt.Diag("can't synthesize large constant\n%v", p) 3031 } 3032 v := c.vregoff(p.GetFrom3()) 3033 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16) 3034 o2 = loadl16(REGTMP, v) 3035 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP) 3036 if p.From.Sym != nil { 3037 c.ctxt.Diag("%v is not supported", p) 3038 } 3039 3040 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */ 3041 sh := uint32(c.regoff(&p.From)) 3042 d := c.vregoff(p.GetFrom3()) 3043 mb, me, valid := decodeMask64(d) 3044 var a uint32 3045 switch p.As { 3046 case ARLDC, ARLDCCC: 3047 a = mb 3048 if me != (63-sh) || !valid { 3049 c.ctxt.Diag("invalid mask for shift: %016x (mb=%d,me=%d) (shift %d)\n%v", uint64(d), mb, me, sh, p) 3050 } 3051 3052 case ARLDCL, ARLDCLCC: 3053 a = mb 3054 if mb != 63 || !valid { 3055 c.ctxt.Diag("invalid mask for shift: %016x (mb=%d,me=%d) (shift %d)\n%v", uint64(d), mb, me, sh, p) 3056 } 3057 3058 case ARLDCR, ARLDCRCC: 3059 a = me 3060 if mb != 0 || !valid { 3061 c.ctxt.Diag("invalid mask for shift: %016x (mb=%d,me=%d) (shift %d)\n%v", uint64(d), mb, me, sh, p) 3062 } 3063 3064 default: 3065 c.ctxt.Diag("unexpected op in rldic case\n%v", p) 3066 } 3067 o1 = AOP_MD(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, a) 3068 3069 case 30: /* rldimi $sh,s,$mask,a */ 3070 sh := uint32(c.regoff(&p.From)) 3071 d := c.vregoff(p.GetFrom3()) 3072 3073 // Original opcodes had mask operands which had to be converted to a shift count as expected by 3074 // the ppc64 asm. 3075 switch p.As { 3076 case ARLDMI, ARLDMICC: 3077 mb, me, valid := decodeMask64(d) 3078 if me != (63-sh) || !valid { 3079 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), me, sh, p) 3080 } 3081 o1 = AOP_MD(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb) 3082 3083 // Opcodes with shift count operands. 3084 case ARLDIMI, ARLDIMICC: 3085 o1 = AOP_MD(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, uint32(d)) 3086 } 3087 3088 case 31: /* dword */ 3089 d := c.vregoff(&p.From) 3090 3091 if c.ctxt.Arch.ByteOrder == binary.BigEndian { 3092 o1 = uint32(d >> 32) 3093 o2 = uint32(d) 3094 } else { 3095 o1 = uint32(d) 3096 o2 = uint32(d >> 32) 3097 } 3098 3099 if p.From.Sym != nil { 3100 rel := obj.Addrel(c.cursym) 3101 rel.Off = int32(c.pc) 3102 rel.Siz = 8 3103 rel.Sym = p.From.Sym 3104 rel.Add = p.From.Offset 3105 rel.Type = objabi.R_ADDR 3106 o2 = 0 3107 o1 = o2 3108 } 3109 3110 case 32: /* fmul frc,fra,frd */ 3111 r := int(p.Reg) 3112 3113 if r == 0 { 3114 r = int(p.To.Reg) 3115 } 3116 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6 3117 3118 case 33: /* fabs [frb,]frd; fmr. frb,frd */ 3119 r := int(p.From.Reg) 3120 3121 if oclass(&p.From) == C_NONE { 3122 r = int(p.To.Reg) 3123 } 3124 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r)) 3125 3126 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */ 3127 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6 3128 3129 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */ 3130 v := c.regoff(&p.To) 3131 r := int(p.To.Reg) 3132 // Offsets in DS form stores must be a multiple of 4 3133 if o.ispfx { 3134 o1, o2 = pfxstore(p.As, p.From.Reg, int16(r), PFX_R_ABS) 3135 o1 |= uint32((v >> 16) & 0x3FFFF) 3136 o2 |= uint32(v & 0xFFFF) 3137 } else { 3138 inst := c.opstore(p.As) 3139 if c.opform(inst) == DS_FORM && v&0x3 != 0 { 3140 log.Fatalf("invalid offset for DS form load/store %v", p) 3141 } 3142 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v))) 3143 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v)) 3144 } 3145 3146 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */ 3147 v := c.regoff(&p.From) 3148 r := int(p.From.Reg) 3149 3150 if o.ispfx { 3151 o1, o2 = pfxload(p.As, p.To.Reg, int16(r), PFX_R_ABS) 3152 o1 |= uint32((v >> 16) & 0x3FFFF) 3153 o2 |= uint32(v & 0xFFFF) 3154 } else { 3155 if o.a6 == C_REG { 3156 // Reuse the base register when loading a GPR (C_REG) to avoid 3157 // using REGTMP (R31) when possible. 3158 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v))) 3159 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v)) 3160 } else { 3161 o1 = AOP_IRR(OP_ADDIS, uint32(REGTMP), uint32(r), uint32(high16adjusted(v))) 3162 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(REGTMP), uint32(v)) 3163 } 3164 } 3165 3166 // Sign extend MOVB if needed 3167 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) 3168 3169 case 40: /* word */ 3170 o1 = uint32(c.regoff(&p.From)) 3171 3172 case 41: /* stswi */ 3173 if p.To.Type == obj.TYPE_MEM && p.To.Index == 0 && p.To.Offset != 0 { 3174 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As) 3175 } 3176 3177 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11 3178 3179 case 42: /* lswi */ 3180 if p.From.Type == obj.TYPE_MEM && p.From.Index == 0 && p.From.Offset != 0 { 3181 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As) 3182 } 3183 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11 3184 3185 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */ 3186 /* TH field for dcbt/dcbtst: */ 3187 /* 0 = Block access - program will soon access EA. */ 3188 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */ 3189 /* 16 = Block access - program will soon make a transient access to EA. */ 3190 /* 17 = Block access - program will not access EA for a long time. */ 3191 3192 /* L field for dcbf: */ 3193 /* 0 = invalidates the block containing EA in all processors. */ 3194 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */ 3195 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */ 3196 if p.To.Type == obj.TYPE_NONE { 3197 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg)) 3198 } else { 3199 th := c.regoff(&p.To) 3200 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg)) 3201 } 3202 3203 case 44: /* indexed store */ 3204 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg)) 3205 3206 case 45: /* indexed load */ 3207 switch p.As { 3208 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */ 3209 /* The EH field can be used as a lock acquire/release hint as follows: */ 3210 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */ 3211 /* 1 = Exclusive Access (lock acquire and release) */ 3212 case ALBAR, ALHAR, ALWAR, ALDAR: 3213 if p.From3Type() != obj.TYPE_NONE { 3214 eh := int(c.regoff(p.GetFrom3())) 3215 if eh > 1 { 3216 c.ctxt.Diag("illegal EH field\n%v", p) 3217 } 3218 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh)) 3219 } else { 3220 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg)) 3221 } 3222 default: 3223 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg)) 3224 } 3225 case 46: /* plain op */ 3226 o1 = c.oprrr(p.As) 3227 3228 case 47: /* op Ra, Rd; also op [Ra,] Rd */ 3229 r := int(p.From.Reg) 3230 3231 if r == 0 { 3232 r = int(p.To.Reg) 3233 } 3234 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) 3235 3236 case 48: /* op Rs, Ra */ 3237 r := int(p.From.Reg) 3238 3239 if r == 0 { 3240 r = int(p.To.Reg) 3241 } 3242 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) 3243 3244 case 49: /* op Rb; op $n, Rb */ 3245 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */ 3246 v := c.regoff(&p.From) & 1 3247 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21 3248 } else { 3249 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg)) 3250 } 3251 3252 case 50: /* rem[u] r1[,r2],r3 */ 3253 r := int(p.Reg) 3254 3255 if r == 0 { 3256 r = int(p.To.Reg) 3257 } 3258 v := c.oprrr(p.As) 3259 t := v & (1<<10 | 1) /* OE|Rc */ 3260 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg)) 3261 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg)) 3262 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r)) 3263 if p.As == AREMU { 3264 o4 = o3 3265 3266 /* Clear top 32 bits */ 3267 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5 3268 } 3269 3270 case 51: /* remd[u] r1[,r2],r3 */ 3271 r := int(p.Reg) 3272 3273 if r == 0 { 3274 r = int(p.To.Reg) 3275 } 3276 v := c.oprrr(p.As) 3277 t := v & (1<<10 | 1) /* OE|Rc */ 3278 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg)) 3279 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg)) 3280 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r)) 3281 /* cases 50,51: removed; can be reused. */ 3282 3283 /* cases 50,51: removed; can be reused. */ 3284 3285 case 52: /* mtfsbNx cr(n) */ 3286 v := c.regoff(&p.From) & 31 3287 3288 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0) 3289 3290 case 53: /* mffsX ,fr1 */ 3291 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0) 3292 3293 case 55: /* op Rb, Rd */ 3294 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg)) 3295 3296 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */ 3297 v := c.regoff(&p.From) 3298 3299 r := int(p.Reg) 3300 if r == 0 { 3301 r = int(p.To.Reg) 3302 } 3303 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31) 3304 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) { 3305 o1 |= 1 << 1 /* mb[5] */ 3306 } 3307 3308 case 57: /* slw $sh,[s,]a -> rlwinm ... */ 3309 v := c.regoff(&p.From) 3310 3311 r := int(p.Reg) 3312 if r == 0 { 3313 r = int(p.To.Reg) 3314 } 3315 3316 /* 3317 * Let user (gs) shoot himself in the foot. 3318 * qc has already complained. 3319 * 3320 if(v < 0 || v > 31) 3321 ctxt->diag("illegal shift %ld\n%v", v, p); 3322 */ 3323 if v < 0 { 3324 v = 0 3325 } else if v > 32 { 3326 v = 32 3327 } 3328 var mask [2]uint8 3329 switch p.As { 3330 case AROTLW: 3331 mask[0], mask[1] = 0, 31 3332 case ASRW, ASRWCC: 3333 mask[0], mask[1] = uint8(v), 31 3334 v = 32 - v 3335 default: 3336 mask[0], mask[1] = 0, uint8(31-v) 3337 } 3338 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1])) 3339 if p.As == ASLWCC || p.As == ASRWCC { 3340 o1 |= 1 // set the condition code 3341 } 3342 3343 case 58: /* logical $andcon,[s],a */ 3344 v := c.regoff(&p.From) 3345 3346 r := int(p.Reg) 3347 if r == 0 { 3348 r = int(p.To.Reg) 3349 } 3350 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) 3351 3352 case 60: /* tw to,a,b */ 3353 r := int(c.regoff(&p.From) & 31) 3354 3355 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg)) 3356 3357 case 61: /* tw to,a,$simm */ 3358 r := int(c.regoff(&p.From) & 31) 3359 3360 v := c.regoff(&p.To) 3361 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v)) 3362 3363 case 62: /* clrlslwi $sh,s,$mask,a */ 3364 v := c.regoff(&p.From) 3365 n := c.regoff(p.GetFrom3()) 3366 // This is an extended mnemonic described in the ISA C.8.2 3367 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n 3368 // It maps onto rlwinm which is directly generated here. 3369 if n > v || v >= 32 { 3370 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p) 3371 } 3372 3373 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n)) 3374 3375 case 63: /* rlwimi/rlwnm/rlwinm [$sh,b],s,[$mask or mb,me],a*/ 3376 var mb, me uint32 3377 if len(p.RestArgs) == 1 { // Mask needs decomposed into mb and me. 3378 var valid bool 3379 // Note, optab rules ensure $mask is a 32b constant. 3380 mb, me, valid = decodeMask32(uint32(p.RestArgs[0].Addr.Offset)) 3381 if !valid { 3382 c.ctxt.Diag("cannot generate mask #%x\n%v", uint64(p.RestArgs[0].Addr.Offset), p) 3383 } 3384 } else { // Otherwise, mask is already passed as mb and me in RestArgs. 3385 mb, me = uint32(p.RestArgs[0].Addr.Offset), uint32(p.RestArgs[1].Addr.Offset) 3386 } 3387 if p.From.Type == obj.TYPE_CONST { 3388 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Offset), mb, me) 3389 } else { 3390 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me) 3391 } 3392 3393 case 64: /* mtfsf fr[, $m] {,fpcsr} */ 3394 var v int32 3395 if p.From3Type() != obj.TYPE_NONE { 3396 v = c.regoff(p.GetFrom3()) & 255 3397 } else { 3398 v = 255 3399 } 3400 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11 3401 3402 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */ 3403 if p.To.Reg == 0 { 3404 c.ctxt.Diag("must specify FPSCR(n)\n%v", p) 3405 } 3406 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12 3407 3408 case 66: /* mov spr,r1; mov r1,spr */ 3409 var r int 3410 var v int32 3411 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 { 3412 r = int(p.From.Reg) 3413 v = int32(p.To.Reg) 3414 o1 = OPVCC(31, 467, 0, 0) /* mtspr */ 3415 } else { 3416 r = int(p.To.Reg) 3417 v = int32(p.From.Reg) 3418 o1 = OPVCC(31, 339, 0, 0) /* mfspr */ 3419 } 3420 3421 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11 3422 3423 case 67: /* mcrf crfD,crfS */ 3424 if p.From.Reg == REG_CR || p.To.Reg == REG_CR { 3425 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p) 3426 } 3427 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0) 3428 3429 case 68: /* mfcr rD; mfocrf CRM,rD */ 3430 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */ 3431 if p.From.Reg != REG_CR { 3432 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */ 3433 o1 |= 1<<20 | v<<12 /* new form, mfocrf */ 3434 } 3435 3436 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */ 3437 var v uint32 3438 if p.To.Reg == REG_CR { 3439 v = 0xff 3440 } else if p.To.Offset != 0 { // MOVFL gpr, constant 3441 v = uint32(p.To.Offset) 3442 } else { // p.To.Reg == REG_CRx 3443 v = 1 << uint(7-(p.To.Reg&7)) 3444 } 3445 // Use mtocrf form if only one CR field moved. 3446 if bits.OnesCount32(v) == 1 { 3447 v |= 1 << 8 3448 } 3449 3450 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12 3451 3452 case 70: /* cmp* r,r,cr or cmp*i r,i,cr or fcmp f,f,cr or cmpeqb r,r */ 3453 r := uint32(p.Reg&7) << 2 3454 if p.To.Type == obj.TYPE_CONST { 3455 o1 = AOP_IRR(c.opirr(p.As), r, uint32(p.From.Reg), uint32(uint16(p.To.Offset))) 3456 } else { 3457 o1 = AOP_RRR(c.oprrr(p.As), r, uint32(p.From.Reg), uint32(p.To.Reg)) 3458 } 3459 3460 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */ 3461 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg)) 3462 3463 case 73: /* mcrfs crfD,crfS */ 3464 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg { 3465 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p) 3466 } 3467 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0) 3468 3469 case 77: /* syscall $scon, syscall Rx */ 3470 if p.From.Type == obj.TYPE_CONST { 3471 if p.From.Offset > BIG || p.From.Offset < -BIG { 3472 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p) 3473 } 3474 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset)) 3475 } else if p.From.Type == obj.TYPE_REG { 3476 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg)) 3477 } else { 3478 c.ctxt.Diag("illegal syscall: %v", p) 3479 o1 = 0x7fe00008 // trap always 3480 } 3481 3482 o2 = c.oprrr(p.As) 3483 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0 3484 3485 case 78: /* undef */ 3486 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed 3487 always to be an illegal instruction." */ 3488 3489 /* relocation operations */ 3490 case 74: 3491 var rel *obj.Reloc 3492 v := c.vregoff(&p.To) 3493 // Offsets in DS form stores must be a multiple of 4 3494 inst := c.opstore(p.As) 3495 3496 // Can't reuse base for store instructions. 3497 o1, o2, rel = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false) 3498 3499 // Rewrite as a prefixed store if supported. 3500 if o.ispfx { 3501 o1, o2 = pfxstore(p.As, p.From.Reg, REG_R0, PFX_R_PCREL) 3502 rel.Type = objabi.R_ADDRPOWER_PCREL34 3503 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 { 3504 log.Fatalf("invalid offset for DS form load/store %v", p) 3505 } 3506 3507 case 75: // 32 bit offset symbol loads (got/toc/addr) 3508 var rel *obj.Reloc 3509 v := p.From.Offset 3510 3511 // Offsets in DS form loads must be a multiple of 4 3512 inst := c.opload(p.As) 3513 switch p.From.Name { 3514 case obj.NAME_GOTREF, obj.NAME_TOCREF: 3515 if v != 0 { 3516 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p) 3517 } 3518 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0) 3519 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0) 3520 rel = obj.Addrel(c.cursym) 3521 rel.Off = int32(c.pc) 3522 rel.Siz = 8 3523 rel.Sym = p.From.Sym 3524 switch p.From.Name { 3525 case obj.NAME_GOTREF: 3526 rel.Type = objabi.R_ADDRPOWER_GOT 3527 case obj.NAME_TOCREF: 3528 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS 3529 } 3530 default: 3531 reuseBaseReg := o.a6 == C_REG 3532 // Reuse To.Reg as base register if it is a GPR. 3533 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg) 3534 } 3535 3536 // Convert to prefixed forms if supported. 3537 if o.ispfx { 3538 switch rel.Type { 3539 case objabi.R_ADDRPOWER, objabi.R_ADDRPOWER_DS, 3540 objabi.R_ADDRPOWER_TOCREL, objabi.R_ADDRPOWER_TOCREL_DS: 3541 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL) 3542 rel.Type = objabi.R_ADDRPOWER_PCREL34 3543 case objabi.R_POWER_TLS_IE: 3544 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL) 3545 rel.Type = objabi.R_POWER_TLS_IE_PCREL34 3546 case objabi.R_ADDRPOWER_GOT: 3547 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL) 3548 rel.Type = objabi.R_ADDRPOWER_GOT_PCREL34 3549 default: 3550 // We've failed to convert a TOC-relative relocation to a PC-relative one. 3551 log.Fatalf("Unable convert TOC-relative relocation %v to PC-relative", rel.Type) 3552 } 3553 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 { 3554 log.Fatalf("invalid offset for DS form load/store %v", p) 3555 } 3556 3557 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) 3558 3559 case 79: 3560 if p.From.Offset != 0 { 3561 c.ctxt.Diag("invalid offset against tls var %v", p) 3562 } 3563 rel := obj.Addrel(c.cursym) 3564 rel.Off = int32(c.pc) 3565 rel.Siz = 8 3566 rel.Sym = p.From.Sym 3567 if !o.ispfx { 3568 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0) 3569 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0) 3570 rel.Type = objabi.R_POWER_TLS_LE 3571 } else { 3572 o1, o2 = pfxadd(p.To.Reg, REG_R13, PFX_R_ABS, 0) 3573 rel.Type = objabi.R_POWER_TLS_LE_TPREL34 3574 } 3575 3576 case 80: 3577 if p.From.Offset != 0 { 3578 c.ctxt.Diag("invalid offset against tls var %v", p) 3579 } 3580 rel := obj.Addrel(c.cursym) 3581 rel.Off = int32(c.pc) 3582 rel.Siz = 8 3583 rel.Sym = p.From.Sym 3584 rel.Type = objabi.R_POWER_TLS_IE 3585 if !o.ispfx { 3586 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0) 3587 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0) 3588 } else { 3589 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL) 3590 rel.Type = objabi.R_POWER_TLS_IE_PCREL34 3591 } 3592 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13) 3593 rel = obj.Addrel(c.cursym) 3594 rel.Off = int32(c.pc) + 8 3595 rel.Siz = 4 3596 rel.Sym = p.From.Sym 3597 rel.Type = objabi.R_POWER_TLS 3598 3599 case 82: /* vector instructions, VX-form and VC-form */ 3600 if p.From.Type == obj.TYPE_REG { 3601 /* reg reg none OR reg reg reg */ 3602 /* 3-register operand order: VRA, VRB, VRT */ 3603 /* 2-register operand order: VRA, VRT */ 3604 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) 3605 } else if p.From3Type() == obj.TYPE_CONST { 3606 /* imm imm reg reg */ 3607 /* operand order: SIX, VRA, ST, VRT */ 3608 six := int(c.regoff(&p.From)) 3609 st := int(c.regoff(p.GetFrom3())) 3610 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six)) 3611 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 { 3612 /* imm reg reg */ 3613 /* operand order: UIM, VRB, VRT */ 3614 uim := int(c.regoff(&p.From)) 3615 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim)) 3616 } else { 3617 /* imm reg */ 3618 /* operand order: SIM, VRT */ 3619 sim := int(c.regoff(&p.From)) 3620 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim)) 3621 } 3622 3623 case 83: /* vector instructions, VA-form */ 3624 if p.From.Type == obj.TYPE_REG { 3625 /* reg reg reg reg */ 3626 /* 4-register operand order: VRA, VRB, VRC, VRT */ 3627 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg)) 3628 } else if p.From.Type == obj.TYPE_CONST { 3629 /* imm reg reg reg */ 3630 /* operand order: SHB, VRA, VRB, VRT */ 3631 shb := int(c.regoff(&p.From)) 3632 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb)) 3633 } 3634 3635 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc 3636 bc := c.vregoff(&p.From) 3637 if o.a1 == C_CRBIT { 3638 // CR bit is encoded as a register, not a constant. 3639 bc = int64(p.From.Reg) 3640 } 3641 3642 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg 3643 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc)) 3644 3645 case 85: /* vector instructions, VX-form */ 3646 /* reg none reg */ 3647 /* 2-register operand order: VRB, VRT */ 3648 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg)) 3649 3650 case 86: /* VSX indexed store, XX1-form */ 3651 /* reg reg reg */ 3652 /* 3-register operand order: XT, (RB)(RA*1) */ 3653 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg)) 3654 3655 case 87: /* VSX indexed load, XX1-form */ 3656 /* reg reg reg */ 3657 /* 3-register operand order: (RB)(RA*1), XT */ 3658 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg)) 3659 3660 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */ 3661 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg)) 3662 3663 case 89: /* VSX instructions, XX2-form */ 3664 /* reg none reg OR reg imm reg */ 3665 /* 2-register operand order: XB, XT or XB, UIM, XT*/ 3666 uim := int(c.regoff(p.GetFrom3())) 3667 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg)) 3668 3669 case 90: /* VSX instructions, XX3-form */ 3670 if p.From3Type() == obj.TYPE_NONE { 3671 /* reg reg reg */ 3672 /* 3-register operand order: XA, XB, XT */ 3673 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) 3674 } else if p.From3Type() == obj.TYPE_CONST { 3675 /* reg reg reg imm */ 3676 /* operand order: XA, XB, DM, XT */ 3677 dm := int(c.regoff(p.GetFrom3())) 3678 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm)) 3679 } 3680 3681 case 91: /* VSX instructions, XX4-form */ 3682 /* reg reg reg reg */ 3683 /* 3-register operand order: XA, XB, XC, XT */ 3684 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg)) 3685 3686 case 92: /* X-form instructions, 3-operands */ 3687 if p.To.Type == obj.TYPE_CONST { 3688 /* imm reg reg */ 3689 xf := int32(p.From.Reg) 3690 if REG_F0 <= xf && xf <= REG_F31 { 3691 /* operand order: FRA, FRB, BF */ 3692 bf := int(c.regoff(&p.To)) << 2 3693 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg)) 3694 } else { 3695 /* operand order: RA, RB, L */ 3696 l := int(c.regoff(&p.To)) 3697 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg)) 3698 } 3699 } else if p.From3Type() == obj.TYPE_CONST { 3700 /* reg reg imm */ 3701 /* operand order: RB, L, RA */ 3702 l := int(c.regoff(p.GetFrom3())) 3703 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg)) 3704 } else if p.To.Type == obj.TYPE_REG { 3705 cr := int32(p.To.Reg) 3706 if REG_CR0 <= cr && cr <= REG_CR7 { 3707 /* cr reg reg */ 3708 /* operand order: RA, RB, BF */ 3709 bf := (int(p.To.Reg) & 7) << 2 3710 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg)) 3711 } else if p.From.Type == obj.TYPE_CONST { 3712 /* reg imm */ 3713 /* operand order: L, RT */ 3714 l := int(c.regoff(&p.From)) 3715 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg)) 3716 } else { 3717 switch p.As { 3718 case ACOPY, APASTECC: 3719 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg)) 3720 default: 3721 /* reg reg reg */ 3722 /* operand order: RS, RB, RA */ 3723 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg)) 3724 } 3725 } 3726 } 3727 3728 case 93: /* X-form instructions, 2-operands */ 3729 if p.To.Type == obj.TYPE_CONST { 3730 /* imm reg */ 3731 /* operand order: FRB, BF */ 3732 bf := int(c.regoff(&p.To)) << 2 3733 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg)) 3734 } else if p.Reg == 0 { 3735 /* popcnt* r,r, X-form */ 3736 /* operand order: RS, RA */ 3737 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg)) 3738 } 3739 3740 case 94: /* Z23-form instructions, 4-operands */ 3741 /* reg reg reg imm */ 3742 /* operand order: RA, RB, CY, RT */ 3743 cy := int(c.regoff(p.GetFrom3())) 3744 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy)) 3745 3746 case 96: /* VSX load, DQ-form */ 3747 /* reg imm reg */ 3748 /* operand order: (RA)(DQ), XT */ 3749 dq := int16(c.regoff(&p.From)) 3750 if (dq & 15) != 0 { 3751 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq) 3752 } 3753 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq)) 3754 3755 case 97: /* VSX store, DQ-form */ 3756 /* reg imm reg */ 3757 /* operand order: XT, (RA)(DQ) */ 3758 dq := int16(c.regoff(&p.To)) 3759 if (dq & 15) != 0 { 3760 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq) 3761 } 3762 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq)) 3763 case 98: /* VSX indexed load or load with length (also left-justified), x-form */ 3764 /* vsreg, reg, reg */ 3765 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) 3766 case 99: /* VSX store with length (also left-justified) x-form */ 3767 /* reg, reg, vsreg */ 3768 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg)) 3769 case 100: /* VSX X-form XXSPLTIB */ 3770 if p.From.Type == obj.TYPE_CONST { 3771 /* imm reg */ 3772 uim := int(c.regoff(&p.From)) 3773 /* imm reg */ 3774 /* Use AOP_XX1 form with 0 for one of the registers. */ 3775 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim)) 3776 } else { 3777 c.ctxt.Diag("invalid ops for %v", p.As) 3778 } 3779 case 101: 3780 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg)) 3781 3782 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */ 3783 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) 3784 3785 case 106: /* MOVD spr, soreg */ 3786 v := int32(p.From.Reg) 3787 o1 = OPVCC(31, 339, 0, 0) /* mfspr */ 3788 o1 = AOP_RRR(o1, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11 3789 so := c.regoff(&p.To) 3790 o2 = AOP_IRR(c.opstore(AMOVD), uint32(REGTMP), uint32(p.To.Reg), uint32(so)) 3791 if so&0x3 != 0 { 3792 log.Fatalf("invalid offset for DS form load/store %v", p) 3793 } 3794 if p.To.Reg == REGTMP { 3795 log.Fatalf("SPR move to memory will clobber R31 %v", p) 3796 } 3797 3798 case 107: /* MOVD soreg, spr */ 3799 v := int32(p.From.Reg) 3800 so := c.regoff(&p.From) 3801 o1 = AOP_IRR(c.opload(AMOVD), uint32(REGTMP), uint32(v), uint32(so)) 3802 o2 = OPVCC(31, 467, 0, 0) /* mtspr */ 3803 v = int32(p.To.Reg) 3804 o2 = AOP_RRR(o2, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11 3805 if so&0x3 != 0 { 3806 log.Fatalf("invalid offset for DS form load/store %v", p) 3807 } 3808 3809 case 108: /* mov r, xoreg ==> stwx rx,ry */ 3810 r := int(p.To.Reg) 3811 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r)) 3812 3813 case 109: /* mov xoreg, r ==> lbzx/lhzx/lwzx rx,ry, lbzx rx,ry + extsb r,r */ 3814 r := int(p.From.Reg) 3815 3816 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r)) 3817 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4). 3818 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) 3819 3820 case 110: /* SETB creg, rt */ 3821 bfa := uint32(p.From.Reg) << 2 3822 rt := uint32(p.To.Reg) 3823 o1 = LOP_RRR(OP_SETB, bfa, rt, 0) 3824 } 3825 3826 out[0] = o1 3827 out[1] = o2 3828 out[2] = o3 3829 out[3] = o4 3830 out[4] = o5 3831} 3832 3833func (c *ctxt9) vregoff(a *obj.Addr) int64 { 3834 c.instoffset = 0 3835 if a != nil { 3836 c.aclass(a) 3837 } 3838 return c.instoffset 3839} 3840 3841func (c *ctxt9) regoff(a *obj.Addr) int32 { 3842 return int32(c.vregoff(a)) 3843} 3844 3845func (c *ctxt9) oprrr(a obj.As) uint32 { 3846 switch a { 3847 case AADD: 3848 return OPVCC(31, 266, 0, 0) 3849 case AADDCC: 3850 return OPVCC(31, 266, 0, 1) 3851 case AADDV: 3852 return OPVCC(31, 266, 1, 0) 3853 case AADDVCC: 3854 return OPVCC(31, 266, 1, 1) 3855 case AADDC: 3856 return OPVCC(31, 10, 0, 0) 3857 case AADDCCC: 3858 return OPVCC(31, 10, 0, 1) 3859 case AADDCV: 3860 return OPVCC(31, 10, 1, 0) 3861 case AADDCVCC: 3862 return OPVCC(31, 10, 1, 1) 3863 case AADDE: 3864 return OPVCC(31, 138, 0, 0) 3865 case AADDECC: 3866 return OPVCC(31, 138, 0, 1) 3867 case AADDEV: 3868 return OPVCC(31, 138, 1, 0) 3869 case AADDEVCC: 3870 return OPVCC(31, 138, 1, 1) 3871 case AADDME: 3872 return OPVCC(31, 234, 0, 0) 3873 case AADDMECC: 3874 return OPVCC(31, 234, 0, 1) 3875 case AADDMEV: 3876 return OPVCC(31, 234, 1, 0) 3877 case AADDMEVCC: 3878 return OPVCC(31, 234, 1, 1) 3879 case AADDZE: 3880 return OPVCC(31, 202, 0, 0) 3881 case AADDZECC: 3882 return OPVCC(31, 202, 0, 1) 3883 case AADDZEV: 3884 return OPVCC(31, 202, 1, 0) 3885 case AADDZEVCC: 3886 return OPVCC(31, 202, 1, 1) 3887 case AADDEX: 3888 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */ 3889 3890 case AAND: 3891 return OPVCC(31, 28, 0, 0) 3892 case AANDCC: 3893 return OPVCC(31, 28, 0, 1) 3894 case AANDN: 3895 return OPVCC(31, 60, 0, 0) 3896 case AANDNCC: 3897 return OPVCC(31, 60, 0, 1) 3898 3899 case ACMP: 3900 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */ 3901 case ACMPU: 3902 return OPVCC(31, 32, 0, 0) | 1<<21 3903 case ACMPW: 3904 return OPVCC(31, 0, 0, 0) /* L=0 */ 3905 case ACMPWU: 3906 return OPVCC(31, 32, 0, 0) 3907 case ACMPB: 3908 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */ 3909 case ACMPEQB: 3910 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */ 3911 3912 case ACNTLZW: 3913 return OPVCC(31, 26, 0, 0) 3914 case ACNTLZWCC: 3915 return OPVCC(31, 26, 0, 1) 3916 case ACNTLZD: 3917 return OPVCC(31, 58, 0, 0) 3918 case ACNTLZDCC: 3919 return OPVCC(31, 58, 0, 1) 3920 3921 case ACRAND: 3922 return OPVCC(19, 257, 0, 0) 3923 case ACRANDN: 3924 return OPVCC(19, 129, 0, 0) 3925 case ACREQV: 3926 return OPVCC(19, 289, 0, 0) 3927 case ACRNAND: 3928 return OPVCC(19, 225, 0, 0) 3929 case ACRNOR: 3930 return OPVCC(19, 33, 0, 0) 3931 case ACROR: 3932 return OPVCC(19, 449, 0, 0) 3933 case ACRORN: 3934 return OPVCC(19, 417, 0, 0) 3935 case ACRXOR: 3936 return OPVCC(19, 193, 0, 0) 3937 3938 case ADCBF: 3939 return OPVCC(31, 86, 0, 0) 3940 case ADCBI: 3941 return OPVCC(31, 470, 0, 0) 3942 case ADCBST: 3943 return OPVCC(31, 54, 0, 0) 3944 case ADCBT: 3945 return OPVCC(31, 278, 0, 0) 3946 case ADCBTST: 3947 return OPVCC(31, 246, 0, 0) 3948 case ADCBZ: 3949 return OPVCC(31, 1014, 0, 0) 3950 3951 case AMODUD: 3952 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */ 3953 case AMODUW: 3954 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */ 3955 case AMODSD: 3956 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */ 3957 case AMODSW: 3958 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */ 3959 3960 case ADIVW, AREM: 3961 return OPVCC(31, 491, 0, 0) 3962 3963 case ADIVWCC: 3964 return OPVCC(31, 491, 0, 1) 3965 3966 case ADIVWV: 3967 return OPVCC(31, 491, 1, 0) 3968 3969 case ADIVWVCC: 3970 return OPVCC(31, 491, 1, 1) 3971 3972 case ADIVWU, AREMU: 3973 return OPVCC(31, 459, 0, 0) 3974 3975 case ADIVWUCC: 3976 return OPVCC(31, 459, 0, 1) 3977 3978 case ADIVWUV: 3979 return OPVCC(31, 459, 1, 0) 3980 3981 case ADIVWUVCC: 3982 return OPVCC(31, 459, 1, 1) 3983 3984 case ADIVD, AREMD: 3985 return OPVCC(31, 489, 0, 0) 3986 3987 case ADIVDCC: 3988 return OPVCC(31, 489, 0, 1) 3989 3990 case ADIVDE: 3991 return OPVCC(31, 425, 0, 0) 3992 3993 case ADIVDECC: 3994 return OPVCC(31, 425, 0, 1) 3995 3996 case ADIVDEU: 3997 return OPVCC(31, 393, 0, 0) 3998 3999 case ADIVDEUCC: 4000 return OPVCC(31, 393, 0, 1) 4001 4002 case ADIVDV: 4003 return OPVCC(31, 489, 1, 0) 4004 4005 case ADIVDVCC: 4006 return OPVCC(31, 489, 1, 1) 4007 4008 case ADIVDU, AREMDU: 4009 return OPVCC(31, 457, 0, 0) 4010 4011 case ADIVDUCC: 4012 return OPVCC(31, 457, 0, 1) 4013 4014 case ADIVDUV: 4015 return OPVCC(31, 457, 1, 0) 4016 4017 case ADIVDUVCC: 4018 return OPVCC(31, 457, 1, 1) 4019 4020 case AEIEIO: 4021 return OPVCC(31, 854, 0, 0) 4022 4023 case AEQV: 4024 return OPVCC(31, 284, 0, 0) 4025 case AEQVCC: 4026 return OPVCC(31, 284, 0, 1) 4027 4028 case AEXTSB: 4029 return OPVCC(31, 954, 0, 0) 4030 case AEXTSBCC: 4031 return OPVCC(31, 954, 0, 1) 4032 case AEXTSH: 4033 return OPVCC(31, 922, 0, 0) 4034 case AEXTSHCC: 4035 return OPVCC(31, 922, 0, 1) 4036 case AEXTSW: 4037 return OPVCC(31, 986, 0, 0) 4038 case AEXTSWCC: 4039 return OPVCC(31, 986, 0, 1) 4040 4041 case AFABS: 4042 return OPVCC(63, 264, 0, 0) 4043 case AFABSCC: 4044 return OPVCC(63, 264, 0, 1) 4045 case AFADD: 4046 return OPVCC(63, 21, 0, 0) 4047 case AFADDCC: 4048 return OPVCC(63, 21, 0, 1) 4049 case AFADDS: 4050 return OPVCC(59, 21, 0, 0) 4051 case AFADDSCC: 4052 return OPVCC(59, 21, 0, 1) 4053 case AFCMPO: 4054 return OPVCC(63, 32, 0, 0) 4055 case AFCMPU: 4056 return OPVCC(63, 0, 0, 0) 4057 case AFCFID: 4058 return OPVCC(63, 846, 0, 0) 4059 case AFCFIDCC: 4060 return OPVCC(63, 846, 0, 1) 4061 case AFCFIDU: 4062 return OPVCC(63, 974, 0, 0) 4063 case AFCFIDUCC: 4064 return OPVCC(63, 974, 0, 1) 4065 case AFCFIDS: 4066 return OPVCC(59, 846, 0, 0) 4067 case AFCFIDSCC: 4068 return OPVCC(59, 846, 0, 1) 4069 case AFCTIW: 4070 return OPVCC(63, 14, 0, 0) 4071 case AFCTIWCC: 4072 return OPVCC(63, 14, 0, 1) 4073 case AFCTIWZ: 4074 return OPVCC(63, 15, 0, 0) 4075 case AFCTIWZCC: 4076 return OPVCC(63, 15, 0, 1) 4077 case AFCTID: 4078 return OPVCC(63, 814, 0, 0) 4079 case AFCTIDCC: 4080 return OPVCC(63, 814, 0, 1) 4081 case AFCTIDZ: 4082 return OPVCC(63, 815, 0, 0) 4083 case AFCTIDZCC: 4084 return OPVCC(63, 815, 0, 1) 4085 case AFDIV: 4086 return OPVCC(63, 18, 0, 0) 4087 case AFDIVCC: 4088 return OPVCC(63, 18, 0, 1) 4089 case AFDIVS: 4090 return OPVCC(59, 18, 0, 0) 4091 case AFDIVSCC: 4092 return OPVCC(59, 18, 0, 1) 4093 case AFMADD: 4094 return OPVCC(63, 29, 0, 0) 4095 case AFMADDCC: 4096 return OPVCC(63, 29, 0, 1) 4097 case AFMADDS: 4098 return OPVCC(59, 29, 0, 0) 4099 case AFMADDSCC: 4100 return OPVCC(59, 29, 0, 1) 4101 4102 case AFMOVS, AFMOVD: 4103 return OPVCC(63, 72, 0, 0) /* load */ 4104 case AFMOVDCC: 4105 return OPVCC(63, 72, 0, 1) 4106 case AFMSUB: 4107 return OPVCC(63, 28, 0, 0) 4108 case AFMSUBCC: 4109 return OPVCC(63, 28, 0, 1) 4110 case AFMSUBS: 4111 return OPVCC(59, 28, 0, 0) 4112 case AFMSUBSCC: 4113 return OPVCC(59, 28, 0, 1) 4114 case AFMUL: 4115 return OPVCC(63, 25, 0, 0) 4116 case AFMULCC: 4117 return OPVCC(63, 25, 0, 1) 4118 case AFMULS: 4119 return OPVCC(59, 25, 0, 0) 4120 case AFMULSCC: 4121 return OPVCC(59, 25, 0, 1) 4122 case AFNABS: 4123 return OPVCC(63, 136, 0, 0) 4124 case AFNABSCC: 4125 return OPVCC(63, 136, 0, 1) 4126 case AFNEG: 4127 return OPVCC(63, 40, 0, 0) 4128 case AFNEGCC: 4129 return OPVCC(63, 40, 0, 1) 4130 case AFNMADD: 4131 return OPVCC(63, 31, 0, 0) 4132 case AFNMADDCC: 4133 return OPVCC(63, 31, 0, 1) 4134 case AFNMADDS: 4135 return OPVCC(59, 31, 0, 0) 4136 case AFNMADDSCC: 4137 return OPVCC(59, 31, 0, 1) 4138 case AFNMSUB: 4139 return OPVCC(63, 30, 0, 0) 4140 case AFNMSUBCC: 4141 return OPVCC(63, 30, 0, 1) 4142 case AFNMSUBS: 4143 return OPVCC(59, 30, 0, 0) 4144 case AFNMSUBSCC: 4145 return OPVCC(59, 30, 0, 1) 4146 case AFCPSGN: 4147 return OPVCC(63, 8, 0, 0) 4148 case AFCPSGNCC: 4149 return OPVCC(63, 8, 0, 1) 4150 case AFRES: 4151 return OPVCC(59, 24, 0, 0) 4152 case AFRESCC: 4153 return OPVCC(59, 24, 0, 1) 4154 case AFRIM: 4155 return OPVCC(63, 488, 0, 0) 4156 case AFRIMCC: 4157 return OPVCC(63, 488, 0, 1) 4158 case AFRIP: 4159 return OPVCC(63, 456, 0, 0) 4160 case AFRIPCC: 4161 return OPVCC(63, 456, 0, 1) 4162 case AFRIZ: 4163 return OPVCC(63, 424, 0, 0) 4164 case AFRIZCC: 4165 return OPVCC(63, 424, 0, 1) 4166 case AFRIN: 4167 return OPVCC(63, 392, 0, 0) 4168 case AFRINCC: 4169 return OPVCC(63, 392, 0, 1) 4170 case AFRSP: 4171 return OPVCC(63, 12, 0, 0) 4172 case AFRSPCC: 4173 return OPVCC(63, 12, 0, 1) 4174 case AFRSQRTE: 4175 return OPVCC(63, 26, 0, 0) 4176 case AFRSQRTECC: 4177 return OPVCC(63, 26, 0, 1) 4178 case AFSEL: 4179 return OPVCC(63, 23, 0, 0) 4180 case AFSELCC: 4181 return OPVCC(63, 23, 0, 1) 4182 case AFSQRT: 4183 return OPVCC(63, 22, 0, 0) 4184 case AFSQRTCC: 4185 return OPVCC(63, 22, 0, 1) 4186 case AFSQRTS: 4187 return OPVCC(59, 22, 0, 0) 4188 case AFSQRTSCC: 4189 return OPVCC(59, 22, 0, 1) 4190 case AFSUB: 4191 return OPVCC(63, 20, 0, 0) 4192 case AFSUBCC: 4193 return OPVCC(63, 20, 0, 1) 4194 case AFSUBS: 4195 return OPVCC(59, 20, 0, 0) 4196 case AFSUBSCC: 4197 return OPVCC(59, 20, 0, 1) 4198 4199 case AICBI: 4200 return OPVCC(31, 982, 0, 0) 4201 case AISYNC: 4202 return OPVCC(19, 150, 0, 0) 4203 4204 case AMTFSB0: 4205 return OPVCC(63, 70, 0, 0) 4206 case AMTFSB0CC: 4207 return OPVCC(63, 70, 0, 1) 4208 case AMTFSB1: 4209 return OPVCC(63, 38, 0, 0) 4210 case AMTFSB1CC: 4211 return OPVCC(63, 38, 0, 1) 4212 4213 case AMULHW: 4214 return OPVCC(31, 75, 0, 0) 4215 case AMULHWCC: 4216 return OPVCC(31, 75, 0, 1) 4217 case AMULHWU: 4218 return OPVCC(31, 11, 0, 0) 4219 case AMULHWUCC: 4220 return OPVCC(31, 11, 0, 1) 4221 case AMULLW: 4222 return OPVCC(31, 235, 0, 0) 4223 case AMULLWCC: 4224 return OPVCC(31, 235, 0, 1) 4225 case AMULLWV: 4226 return OPVCC(31, 235, 1, 0) 4227 case AMULLWVCC: 4228 return OPVCC(31, 235, 1, 1) 4229 4230 case AMULHD: 4231 return OPVCC(31, 73, 0, 0) 4232 case AMULHDCC: 4233 return OPVCC(31, 73, 0, 1) 4234 case AMULHDU: 4235 return OPVCC(31, 9, 0, 0) 4236 case AMULHDUCC: 4237 return OPVCC(31, 9, 0, 1) 4238 case AMULLD: 4239 return OPVCC(31, 233, 0, 0) 4240 case AMULLDCC: 4241 return OPVCC(31, 233, 0, 1) 4242 case AMULLDV: 4243 return OPVCC(31, 233, 1, 0) 4244 case AMULLDVCC: 4245 return OPVCC(31, 233, 1, 1) 4246 4247 case ANAND: 4248 return OPVCC(31, 476, 0, 0) 4249 case ANANDCC: 4250 return OPVCC(31, 476, 0, 1) 4251 case ANEG: 4252 return OPVCC(31, 104, 0, 0) 4253 case ANEGCC: 4254 return OPVCC(31, 104, 0, 1) 4255 case ANEGV: 4256 return OPVCC(31, 104, 1, 0) 4257 case ANEGVCC: 4258 return OPVCC(31, 104, 1, 1) 4259 case ANOR: 4260 return OPVCC(31, 124, 0, 0) 4261 case ANORCC: 4262 return OPVCC(31, 124, 0, 1) 4263 case AOR: 4264 return OPVCC(31, 444, 0, 0) 4265 case AORCC: 4266 return OPVCC(31, 444, 0, 1) 4267 case AORN: 4268 return OPVCC(31, 412, 0, 0) 4269 case AORNCC: 4270 return OPVCC(31, 412, 0, 1) 4271 4272 case APOPCNTD: 4273 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */ 4274 case APOPCNTW: 4275 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */ 4276 case APOPCNTB: 4277 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */ 4278 case ACNTTZW: 4279 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */ 4280 case ACNTTZWCC: 4281 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */ 4282 case ACNTTZD: 4283 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */ 4284 case ACNTTZDCC: 4285 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */ 4286 4287 case ARFI: 4288 return OPVCC(19, 50, 0, 0) 4289 case ARFCI: 4290 return OPVCC(19, 51, 0, 0) 4291 case ARFID: 4292 return OPVCC(19, 18, 0, 0) 4293 case AHRFID: 4294 return OPVCC(19, 274, 0, 0) 4295 4296 case ARLWNM: 4297 return OPVCC(23, 0, 0, 0) 4298 case ARLWNMCC: 4299 return OPVCC(23, 0, 0, 1) 4300 4301 case ARLDCL: 4302 return OPVCC(30, 8, 0, 0) 4303 case ARLDCLCC: 4304 return OPVCC(30, 0, 0, 1) 4305 4306 case ARLDCR: 4307 return OPVCC(30, 9, 0, 0) 4308 case ARLDCRCC: 4309 return OPVCC(30, 9, 0, 1) 4310 4311 case ARLDICL: 4312 return OPVCC(30, 0, 0, 0) 4313 case ARLDICLCC: 4314 return OPVCC(30, 0, 0, 1) 4315 case ARLDICR: 4316 return OPMD(30, 1, 0) // rldicr 4317 case ARLDICRCC: 4318 return OPMD(30, 1, 1) // rldicr. 4319 4320 case ARLDIC: 4321 return OPMD(30, 2, 0) // rldic 4322 case ARLDICCC: 4323 return OPMD(30, 2, 1) // rldic. 4324 4325 case ASYSCALL: 4326 return OPVCC(17, 1, 0, 0) 4327 4328 case ASLW: 4329 return OPVCC(31, 24, 0, 0) 4330 case ASLWCC: 4331 return OPVCC(31, 24, 0, 1) 4332 case ASLD: 4333 return OPVCC(31, 27, 0, 0) 4334 case ASLDCC: 4335 return OPVCC(31, 27, 0, 1) 4336 4337 case ASRAW: 4338 return OPVCC(31, 792, 0, 0) 4339 case ASRAWCC: 4340 return OPVCC(31, 792, 0, 1) 4341 case ASRAD: 4342 return OPVCC(31, 794, 0, 0) 4343 case ASRADCC: 4344 return OPVCC(31, 794, 0, 1) 4345 4346 case AEXTSWSLI: 4347 return OPVCC(31, 445, 0, 0) 4348 case AEXTSWSLICC: 4349 return OPVCC(31, 445, 0, 1) 4350 4351 case ASRW: 4352 return OPVCC(31, 536, 0, 0) 4353 case ASRWCC: 4354 return OPVCC(31, 536, 0, 1) 4355 case ASRD: 4356 return OPVCC(31, 539, 0, 0) 4357 case ASRDCC: 4358 return OPVCC(31, 539, 0, 1) 4359 4360 case ASUB: 4361 return OPVCC(31, 40, 0, 0) 4362 case ASUBCC: 4363 return OPVCC(31, 40, 0, 1) 4364 case ASUBV: 4365 return OPVCC(31, 40, 1, 0) 4366 case ASUBVCC: 4367 return OPVCC(31, 40, 1, 1) 4368 case ASUBC: 4369 return OPVCC(31, 8, 0, 0) 4370 case ASUBCCC: 4371 return OPVCC(31, 8, 0, 1) 4372 case ASUBCV: 4373 return OPVCC(31, 8, 1, 0) 4374 case ASUBCVCC: 4375 return OPVCC(31, 8, 1, 1) 4376 case ASUBE: 4377 return OPVCC(31, 136, 0, 0) 4378 case ASUBECC: 4379 return OPVCC(31, 136, 0, 1) 4380 case ASUBEV: 4381 return OPVCC(31, 136, 1, 0) 4382 case ASUBEVCC: 4383 return OPVCC(31, 136, 1, 1) 4384 case ASUBME: 4385 return OPVCC(31, 232, 0, 0) 4386 case ASUBMECC: 4387 return OPVCC(31, 232, 0, 1) 4388 case ASUBMEV: 4389 return OPVCC(31, 232, 1, 0) 4390 case ASUBMEVCC: 4391 return OPVCC(31, 232, 1, 1) 4392 case ASUBZE: 4393 return OPVCC(31, 200, 0, 0) 4394 case ASUBZECC: 4395 return OPVCC(31, 200, 0, 1) 4396 case ASUBZEV: 4397 return OPVCC(31, 200, 1, 0) 4398 case ASUBZEVCC: 4399 return OPVCC(31, 200, 1, 1) 4400 4401 case ASYNC: 4402 return OPVCC(31, 598, 0, 0) 4403 case ALWSYNC: 4404 return OPVCC(31, 598, 0, 0) | 1<<21 4405 4406 case APTESYNC: 4407 return OPVCC(31, 598, 0, 0) | 2<<21 4408 4409 case ATLBIE: 4410 return OPVCC(31, 306, 0, 0) 4411 case ATLBIEL: 4412 return OPVCC(31, 274, 0, 0) 4413 case ATLBSYNC: 4414 return OPVCC(31, 566, 0, 0) 4415 case ASLBIA: 4416 return OPVCC(31, 498, 0, 0) 4417 case ASLBIE: 4418 return OPVCC(31, 434, 0, 0) 4419 case ASLBMFEE: 4420 return OPVCC(31, 915, 0, 0) 4421 case ASLBMFEV: 4422 return OPVCC(31, 851, 0, 0) 4423 case ASLBMTE: 4424 return OPVCC(31, 402, 0, 0) 4425 4426 case ATW: 4427 return OPVCC(31, 4, 0, 0) 4428 case ATD: 4429 return OPVCC(31, 68, 0, 0) 4430 4431 /* Vector (VMX/Altivec) instructions */ 4432 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */ 4433 /* are enabled starting at POWER6 (ISA 2.05). */ 4434 case AVAND: 4435 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */ 4436 case AVANDC: 4437 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */ 4438 case AVNAND: 4439 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */ 4440 4441 case AVOR: 4442 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */ 4443 case AVORC: 4444 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */ 4445 case AVNOR: 4446 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */ 4447 case AVXOR: 4448 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */ 4449 case AVEQV: 4450 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */ 4451 4452 case AVADDUBM: 4453 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */ 4454 case AVADDUHM: 4455 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */ 4456 case AVADDUWM: 4457 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */ 4458 case AVADDUDM: 4459 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */ 4460 case AVADDUQM: 4461 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */ 4462 4463 case AVADDCUQ: 4464 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */ 4465 case AVADDCUW: 4466 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */ 4467 4468 case AVADDUBS: 4469 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */ 4470 case AVADDUHS: 4471 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */ 4472 case AVADDUWS: 4473 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */ 4474 4475 case AVADDSBS: 4476 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */ 4477 case AVADDSHS: 4478 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */ 4479 case AVADDSWS: 4480 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */ 4481 4482 case AVADDEUQM: 4483 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */ 4484 case AVADDECUQ: 4485 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */ 4486 4487 case AVMULESB: 4488 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */ 4489 case AVMULOSB: 4490 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */ 4491 case AVMULEUB: 4492 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */ 4493 case AVMULOUB: 4494 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */ 4495 case AVMULESH: 4496 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */ 4497 case AVMULOSH: 4498 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */ 4499 case AVMULEUH: 4500 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */ 4501 case AVMULOUH: 4502 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */ 4503 case AVMULESW: 4504 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */ 4505 case AVMULOSW: 4506 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */ 4507 case AVMULEUW: 4508 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */ 4509 case AVMULOUW: 4510 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */ 4511 case AVMULUWM: 4512 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */ 4513 4514 case AVPMSUMB: 4515 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */ 4516 case AVPMSUMH: 4517 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */ 4518 case AVPMSUMW: 4519 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */ 4520 case AVPMSUMD: 4521 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */ 4522 4523 case AVMSUMUDM: 4524 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */ 4525 4526 case AVSUBUBM: 4527 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */ 4528 case AVSUBUHM: 4529 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */ 4530 case AVSUBUWM: 4531 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */ 4532 case AVSUBUDM: 4533 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */ 4534 case AVSUBUQM: 4535 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */ 4536 4537 case AVSUBCUQ: 4538 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */ 4539 case AVSUBCUW: 4540 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */ 4541 4542 case AVSUBUBS: 4543 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */ 4544 case AVSUBUHS: 4545 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */ 4546 case AVSUBUWS: 4547 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */ 4548 4549 case AVSUBSBS: 4550 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */ 4551 case AVSUBSHS: 4552 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */ 4553 case AVSUBSWS: 4554 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */ 4555 4556 case AVSUBEUQM: 4557 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */ 4558 case AVSUBECUQ: 4559 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */ 4560 4561 case AVRLB: 4562 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */ 4563 case AVRLH: 4564 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */ 4565 case AVRLW: 4566 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */ 4567 case AVRLD: 4568 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */ 4569 4570 case AVMRGOW: 4571 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */ 4572 case AVMRGEW: 4573 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */ 4574 4575 case AVSLB: 4576 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */ 4577 case AVSLH: 4578 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */ 4579 case AVSLW: 4580 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */ 4581 case AVSL: 4582 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */ 4583 case AVSLO: 4584 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */ 4585 case AVSRB: 4586 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */ 4587 case AVSRH: 4588 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */ 4589 case AVSRW: 4590 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */ 4591 case AVSR: 4592 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */ 4593 case AVSRO: 4594 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */ 4595 case AVSLD: 4596 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */ 4597 case AVSRD: 4598 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */ 4599 4600 case AVSRAB: 4601 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */ 4602 case AVSRAH: 4603 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */ 4604 case AVSRAW: 4605 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */ 4606 case AVSRAD: 4607 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */ 4608 4609 case AVBPERMQ: 4610 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */ 4611 case AVBPERMD: 4612 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */ 4613 4614 case AVCLZB: 4615 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */ 4616 case AVCLZH: 4617 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */ 4618 case AVCLZW: 4619 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */ 4620 case AVCLZD: 4621 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */ 4622 4623 case AVCLZLSBB: 4624 return OPVX(4, 1538, 0, 0) /* vclzlsbb - v3.0 */ 4625 case AVCTZLSBB: 4626 return OPVX(4, 1538, 0, 0) | 1<<16 /* vctzlsbb - v3.0 */ 4627 4628 case AVPOPCNTB: 4629 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */ 4630 case AVPOPCNTH: 4631 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */ 4632 case AVPOPCNTW: 4633 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */ 4634 case AVPOPCNTD: 4635 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */ 4636 4637 case AVCMPEQUB: 4638 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */ 4639 case AVCMPEQUBCC: 4640 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */ 4641 case AVCMPEQUH: 4642 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */ 4643 case AVCMPEQUHCC: 4644 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */ 4645 case AVCMPEQUW: 4646 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */ 4647 case AVCMPEQUWCC: 4648 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */ 4649 case AVCMPEQUD: 4650 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */ 4651 case AVCMPEQUDCC: 4652 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */ 4653 4654 case AVCMPGTUB: 4655 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */ 4656 case AVCMPGTUBCC: 4657 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */ 4658 case AVCMPGTUH: 4659 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */ 4660 case AVCMPGTUHCC: 4661 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */ 4662 case AVCMPGTUW: 4663 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */ 4664 case AVCMPGTUWCC: 4665 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */ 4666 case AVCMPGTUD: 4667 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */ 4668 case AVCMPGTUDCC: 4669 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */ 4670 case AVCMPGTSB: 4671 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */ 4672 case AVCMPGTSBCC: 4673 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */ 4674 case AVCMPGTSH: 4675 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */ 4676 case AVCMPGTSHCC: 4677 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */ 4678 case AVCMPGTSW: 4679 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */ 4680 case AVCMPGTSWCC: 4681 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */ 4682 case AVCMPGTSD: 4683 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */ 4684 case AVCMPGTSDCC: 4685 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */ 4686 4687 case AVCMPNEZB: 4688 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */ 4689 case AVCMPNEZBCC: 4690 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */ 4691 case AVCMPNEB: 4692 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */ 4693 case AVCMPNEBCC: 4694 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */ 4695 case AVCMPNEH: 4696 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */ 4697 case AVCMPNEHCC: 4698 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */ 4699 case AVCMPNEW: 4700 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */ 4701 case AVCMPNEWCC: 4702 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */ 4703 4704 case AVPERM: 4705 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */ 4706 case AVPERMXOR: 4707 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */ 4708 case AVPERMR: 4709 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */ 4710 4711 case AVSEL: 4712 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */ 4713 4714 case AVCIPHER: 4715 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */ 4716 case AVCIPHERLAST: 4717 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */ 4718 case AVNCIPHER: 4719 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */ 4720 case AVNCIPHERLAST: 4721 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */ 4722 case AVSBOX: 4723 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */ 4724 /* End of vector instructions */ 4725 4726 /* Vector scalar (VSX) instructions */ 4727 /* ISA 2.06 enables these for POWER7. */ 4728 case AMFVSRD, AMFVRD, AMFFPRD: 4729 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */ 4730 case AMFVSRWZ: 4731 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */ 4732 case AMFVSRLD: 4733 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */ 4734 4735 case AMTVSRD, AMTFPRD, AMTVRD: 4736 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */ 4737 case AMTVSRWA: 4738 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */ 4739 case AMTVSRWZ: 4740 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */ 4741 case AMTVSRDD: 4742 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */ 4743 case AMTVSRWS: 4744 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */ 4745 4746 case AXXLAND: 4747 return OPVXX3(60, 130, 0) /* xxland - v2.06 */ 4748 case AXXLANDC: 4749 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */ 4750 case AXXLEQV: 4751 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */ 4752 case AXXLNAND: 4753 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */ 4754 4755 case AXXLORC: 4756 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */ 4757 case AXXLNOR: 4758 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */ 4759 case AXXLOR, AXXLORQ: 4760 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */ 4761 case AXXLXOR: 4762 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */ 4763 case AXSMINJDP: 4764 return OPVXX3(60, 152, 0) /* xsminjdp - v3.0 */ 4765 case AXSMAXJDP: 4766 return OPVXX3(60, 144, 0) /* xsmaxjdp - v3.0 */ 4767 4768 case AXXSEL: 4769 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */ 4770 4771 case AXXMRGHW: 4772 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */ 4773 case AXXMRGLW: 4774 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */ 4775 4776 case AXXSPLTW: 4777 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */ 4778 4779 case AXXSPLTIB: 4780 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */ 4781 4782 case AXXPERM: 4783 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */ 4784 case AXXPERMDI: 4785 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */ 4786 4787 case AXXSLDWI: 4788 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */ 4789 4790 case AXXBRQ: 4791 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */ 4792 case AXXBRD: 4793 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */ 4794 case AXXBRW: 4795 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */ 4796 case AXXBRH: 4797 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */ 4798 4799 case AXSCVDPSP: 4800 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */ 4801 case AXSCVSPDP: 4802 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */ 4803 case AXSCVDPSPN: 4804 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */ 4805 case AXSCVSPDPN: 4806 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */ 4807 4808 case AXVCVDPSP: 4809 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */ 4810 case AXVCVSPDP: 4811 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */ 4812 4813 case AXSCVDPSXDS: 4814 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */ 4815 case AXSCVDPSXWS: 4816 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */ 4817 case AXSCVDPUXDS: 4818 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */ 4819 case AXSCVDPUXWS: 4820 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */ 4821 4822 case AXSCVSXDDP: 4823 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */ 4824 case AXSCVUXDDP: 4825 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */ 4826 case AXSCVSXDSP: 4827 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */ 4828 case AXSCVUXDSP: 4829 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */ 4830 4831 case AXVCVDPSXDS: 4832 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */ 4833 case AXVCVDPSXWS: 4834 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */ 4835 case AXVCVDPUXDS: 4836 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */ 4837 case AXVCVDPUXWS: 4838 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */ 4839 case AXVCVSPSXDS: 4840 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */ 4841 case AXVCVSPSXWS: 4842 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */ 4843 case AXVCVSPUXDS: 4844 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */ 4845 case AXVCVSPUXWS: 4846 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */ 4847 4848 case AXVCVSXDDP: 4849 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */ 4850 case AXVCVSXWDP: 4851 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */ 4852 case AXVCVUXDDP: 4853 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */ 4854 case AXVCVUXWDP: 4855 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */ 4856 case AXVCVSXDSP: 4857 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */ 4858 case AXVCVSXWSP: 4859 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */ 4860 case AXVCVUXDSP: 4861 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */ 4862 case AXVCVUXWSP: 4863 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */ 4864 /* End of VSX instructions */ 4865 4866 case AMADDHD: 4867 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */ 4868 case AMADDHDU: 4869 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */ 4870 case AMADDLD: 4871 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */ 4872 4873 case AXOR: 4874 return OPVCC(31, 316, 0, 0) 4875 case AXORCC: 4876 return OPVCC(31, 316, 0, 1) 4877 } 4878 4879 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a) 4880 return 0 4881} 4882 4883func (c *ctxt9) opirrr(a obj.As) uint32 { 4884 switch a { 4885 /* Vector (VMX/Altivec) instructions */ 4886 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */ 4887 /* are enabled starting at POWER6 (ISA 2.05). */ 4888 case AVSLDOI: 4889 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */ 4890 } 4891 4892 c.ctxt.Diag("bad i/r/r/r opcode %v", a) 4893 return 0 4894} 4895 4896func (c *ctxt9) opiirr(a obj.As) uint32 { 4897 switch a { 4898 /* Vector (VMX/Altivec) instructions */ 4899 /* ISA 2.07 enables these for POWER8 and beyond. */ 4900 case AVSHASIGMAW: 4901 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */ 4902 case AVSHASIGMAD: 4903 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */ 4904 } 4905 4906 c.ctxt.Diag("bad i/i/r/r opcode %v", a) 4907 return 0 4908} 4909 4910func (c *ctxt9) opirr(a obj.As) uint32 { 4911 switch a { 4912 case AADD: 4913 return OPVCC(14, 0, 0, 0) 4914 case AADDC: 4915 return OPVCC(12, 0, 0, 0) 4916 case AADDCCC: 4917 return OPVCC(13, 0, 0, 0) 4918 case AADDIS: 4919 return OPVCC(15, 0, 0, 0) /* ADDIS */ 4920 4921 case AANDCC: 4922 return OPVCC(28, 0, 0, 0) 4923 case AANDISCC: 4924 return OPVCC(29, 0, 0, 0) /* ANDIS. */ 4925 4926 case ABR: 4927 return OPVCC(18, 0, 0, 0) 4928 case ABL: 4929 return OPVCC(18, 0, 0, 0) | 1 4930 case obj.ADUFFZERO: 4931 return OPVCC(18, 0, 0, 0) | 1 4932 case obj.ADUFFCOPY: 4933 return OPVCC(18, 0, 0, 0) | 1 4934 case ABC: 4935 return OPVCC(16, 0, 0, 0) 4936 case ABCL: 4937 return OPVCC(16, 0, 0, 0) | 1 4938 4939 case ABEQ: 4940 return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0) 4941 case ABGE: 4942 return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0) 4943 case ABGT: 4944 return AOP_RRR(16<<26, BO_BCR, BI_GT, 0) 4945 case ABLE: 4946 return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0) 4947 case ABLT: 4948 return AOP_RRR(16<<26, BO_BCR, BI_LT, 0) 4949 case ABNE: 4950 return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0) 4951 case ABVC: 4952 return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0) 4953 case ABVS: 4954 return AOP_RRR(16<<26, BO_BCR, BI_FU, 0) 4955 case ABDZ: 4956 return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0) 4957 case ABDNZ: 4958 return AOP_RRR(16<<26, BO_BCTR, 0, 0) 4959 4960 case ACMP: 4961 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */ 4962 case ACMPU: 4963 return OPVCC(10, 0, 0, 0) | 1<<21 4964 case ACMPW: 4965 return OPVCC(11, 0, 0, 0) /* L=0 */ 4966 case ACMPWU: 4967 return OPVCC(10, 0, 0, 0) 4968 case ACMPEQB: 4969 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */ 4970 4971 case ALSW: 4972 return OPVCC(31, 597, 0, 0) 4973 4974 case ACOPY: 4975 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */ 4976 case APASTECC: 4977 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */ 4978 case ADARN: 4979 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */ 4980 4981 case AMULLW, AMULLD: 4982 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */ 4983 4984 case AOR: 4985 return OPVCC(24, 0, 0, 0) 4986 case AORIS: 4987 return OPVCC(25, 0, 0, 0) /* ORIS */ 4988 4989 case ARLWMI: 4990 return OPVCC(20, 0, 0, 0) /* rlwimi */ 4991 case ARLWMICC: 4992 return OPVCC(20, 0, 0, 1) 4993 case ARLDMI: 4994 return OPMD(30, 3, 0) /* rldimi */ 4995 case ARLDMICC: 4996 return OPMD(30, 3, 1) /* rldimi. */ 4997 case ARLDIMI: 4998 return OPMD(30, 3, 0) /* rldimi */ 4999 case ARLDIMICC: 5000 return OPMD(30, 3, 1) /* rldimi. */ 5001 case ARLWNM: 5002 return OPVCC(21, 0, 0, 0) /* rlwinm */ 5003 case ARLWNMCC: 5004 return OPVCC(21, 0, 0, 1) 5005 5006 case ARLDCL: 5007 return OPMD(30, 0, 0) /* rldicl */ 5008 case ARLDCLCC: 5009 return OPMD(30, 0, 1) /* rldicl. */ 5010 case ARLDCR: 5011 return OPMD(30, 1, 0) /* rldicr */ 5012 case ARLDCRCC: 5013 return OPMD(30, 1, 1) /* rldicr. */ 5014 case ARLDC: 5015 return OPMD(30, 2, 0) /* rldic */ 5016 case ARLDCCC: 5017 return OPMD(30, 2, 1) /* rldic. */ 5018 5019 case ASRAW: 5020 return OPVCC(31, 824, 0, 0) 5021 case ASRAWCC: 5022 return OPVCC(31, 824, 0, 1) 5023 case ASRAD: 5024 return OPVCC(31, (413 << 1), 0, 0) 5025 case ASRADCC: 5026 return OPVCC(31, (413 << 1), 0, 1) 5027 case AEXTSWSLI: 5028 return OPVCC(31, 445, 0, 0) 5029 case AEXTSWSLICC: 5030 return OPVCC(31, 445, 0, 1) 5031 5032 case ASTSW: 5033 return OPVCC(31, 725, 0, 0) 5034 5035 case ASUBC: 5036 return OPVCC(8, 0, 0, 0) 5037 5038 case ATW: 5039 return OPVCC(3, 0, 0, 0) 5040 case ATD: 5041 return OPVCC(2, 0, 0, 0) 5042 5043 /* Vector (VMX/Altivec) instructions */ 5044 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */ 5045 /* are enabled starting at POWER6 (ISA 2.05). */ 5046 case AVSPLTB: 5047 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */ 5048 case AVSPLTH: 5049 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */ 5050 case AVSPLTW: 5051 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */ 5052 5053 case AVSPLTISB: 5054 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */ 5055 case AVSPLTISH: 5056 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */ 5057 case AVSPLTISW: 5058 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */ 5059 /* End of vector instructions */ 5060 5061 case AFTDIV: 5062 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */ 5063 case AFTSQRT: 5064 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */ 5065 5066 case AXOR: 5067 return OPVCC(26, 0, 0, 0) /* XORIL */ 5068 case AXORIS: 5069 return OPVCC(27, 0, 0, 0) /* XORIS */ 5070 } 5071 5072 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a) 5073 return 0 5074} 5075 5076/* 5077 * load o(a),d 5078 */ 5079func (c *ctxt9) opload(a obj.As) uint32 { 5080 switch a { 5081 case AMOVD: 5082 return OPVCC(58, 0, 0, 0) /* ld */ 5083 case AMOVDU: 5084 return OPVCC(58, 0, 0, 1) /* ldu */ 5085 case AMOVWZ: 5086 return OPVCC(32, 0, 0, 0) /* lwz */ 5087 case AMOVWZU: 5088 return OPVCC(33, 0, 0, 0) /* lwzu */ 5089 case AMOVW: 5090 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */ 5091 case ALXV: 5092 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */ 5093 case ALXVL: 5094 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */ 5095 case ALXVLL: 5096 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */ 5097 case ALXVX: 5098 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */ 5099 5100 /* no AMOVWU */ 5101 case AMOVB, AMOVBZ: 5102 return OPVCC(34, 0, 0, 0) 5103 /* load */ 5104 5105 case AMOVBU, AMOVBZU: 5106 return OPVCC(35, 0, 0, 0) 5107 case AFMOVD: 5108 return OPVCC(50, 0, 0, 0) 5109 case AFMOVDU: 5110 return OPVCC(51, 0, 0, 0) 5111 case AFMOVS: 5112 return OPVCC(48, 0, 0, 0) 5113 case AFMOVSU: 5114 return OPVCC(49, 0, 0, 0) 5115 case AMOVH: 5116 return OPVCC(42, 0, 0, 0) 5117 case AMOVHU: 5118 return OPVCC(43, 0, 0, 0) 5119 case AMOVHZ: 5120 return OPVCC(40, 0, 0, 0) 5121 case AMOVHZU: 5122 return OPVCC(41, 0, 0, 0) 5123 case AMOVMW: 5124 return OPVCC(46, 0, 0, 0) /* lmw */ 5125 } 5126 5127 c.ctxt.Diag("bad load opcode %v", a) 5128 return 0 5129} 5130 5131/* 5132 * indexed load a(b),d 5133 */ 5134func (c *ctxt9) oploadx(a obj.As) uint32 { 5135 switch a { 5136 case AMOVWZ: 5137 return OPVCC(31, 23, 0, 0) /* lwzx */ 5138 case AMOVWZU: 5139 return OPVCC(31, 55, 0, 0) /* lwzux */ 5140 case AMOVW: 5141 return OPVCC(31, 341, 0, 0) /* lwax */ 5142 case AMOVWU: 5143 return OPVCC(31, 373, 0, 0) /* lwaux */ 5144 5145 case AMOVB, AMOVBZ: 5146 return OPVCC(31, 87, 0, 0) /* lbzx */ 5147 5148 case AMOVBU, AMOVBZU: 5149 return OPVCC(31, 119, 0, 0) /* lbzux */ 5150 case AFMOVD: 5151 return OPVCC(31, 599, 0, 0) /* lfdx */ 5152 case AFMOVDU: 5153 return OPVCC(31, 631, 0, 0) /* lfdux */ 5154 case AFMOVS: 5155 return OPVCC(31, 535, 0, 0) /* lfsx */ 5156 case AFMOVSU: 5157 return OPVCC(31, 567, 0, 0) /* lfsux */ 5158 case AFMOVSX: 5159 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */ 5160 case AFMOVSZ: 5161 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */ 5162 case AMOVH: 5163 return OPVCC(31, 343, 0, 0) /* lhax */ 5164 case AMOVHU: 5165 return OPVCC(31, 375, 0, 0) /* lhaux */ 5166 case AMOVHBR: 5167 return OPVCC(31, 790, 0, 0) /* lhbrx */ 5168 case AMOVWBR: 5169 return OPVCC(31, 534, 0, 0) /* lwbrx */ 5170 case AMOVDBR: 5171 return OPVCC(31, 532, 0, 0) /* ldbrx */ 5172 case AMOVHZ: 5173 return OPVCC(31, 279, 0, 0) /* lhzx */ 5174 case AMOVHZU: 5175 return OPVCC(31, 311, 0, 0) /* lhzux */ 5176 case ALBAR: 5177 return OPVCC(31, 52, 0, 0) /* lbarx */ 5178 case ALHAR: 5179 return OPVCC(31, 116, 0, 0) /* lharx */ 5180 case ALWAR: 5181 return OPVCC(31, 20, 0, 0) /* lwarx */ 5182 case ALDAR: 5183 return OPVCC(31, 84, 0, 0) /* ldarx */ 5184 case ALSW: 5185 return OPVCC(31, 533, 0, 0) /* lswx */ 5186 case AMOVD: 5187 return OPVCC(31, 21, 0, 0) /* ldx */ 5188 case AMOVDU: 5189 return OPVCC(31, 53, 0, 0) /* ldux */ 5190 5191 /* Vector (VMX/Altivec) instructions */ 5192 case ALVEBX: 5193 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */ 5194 case ALVEHX: 5195 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */ 5196 case ALVEWX: 5197 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */ 5198 case ALVX: 5199 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */ 5200 case ALVXL: 5201 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */ 5202 case ALVSL: 5203 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */ 5204 case ALVSR: 5205 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */ 5206 /* End of vector instructions */ 5207 5208 /* Vector scalar (VSX) instructions */ 5209 case ALXVX: 5210 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */ 5211 case ALXVD2X: 5212 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */ 5213 case ALXVW4X: 5214 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */ 5215 case ALXVH8X: 5216 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */ 5217 case ALXVB16X: 5218 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */ 5219 case ALXVDSX: 5220 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */ 5221 case ALXSDX: 5222 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */ 5223 case ALXSIWAX: 5224 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */ 5225 case ALXSIWZX: 5226 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */ 5227 } 5228 5229 c.ctxt.Diag("bad loadx opcode %v", a) 5230 return 0 5231} 5232 5233/* 5234 * store s,o(d) 5235 */ 5236func (c *ctxt9) opstore(a obj.As) uint32 { 5237 switch a { 5238 case AMOVB, AMOVBZ: 5239 return OPVCC(38, 0, 0, 0) /* stb */ 5240 5241 case AMOVBU, AMOVBZU: 5242 return OPVCC(39, 0, 0, 0) /* stbu */ 5243 case AFMOVD: 5244 return OPVCC(54, 0, 0, 0) /* stfd */ 5245 case AFMOVDU: 5246 return OPVCC(55, 0, 0, 0) /* stfdu */ 5247 case AFMOVS: 5248 return OPVCC(52, 0, 0, 0) /* stfs */ 5249 case AFMOVSU: 5250 return OPVCC(53, 0, 0, 0) /* stfsu */ 5251 5252 case AMOVHZ, AMOVH: 5253 return OPVCC(44, 0, 0, 0) /* sth */ 5254 5255 case AMOVHZU, AMOVHU: 5256 return OPVCC(45, 0, 0, 0) /* sthu */ 5257 case AMOVMW: 5258 return OPVCC(47, 0, 0, 0) /* stmw */ 5259 case ASTSW: 5260 return OPVCC(31, 725, 0, 0) /* stswi */ 5261 5262 case AMOVWZ, AMOVW: 5263 return OPVCC(36, 0, 0, 0) /* stw */ 5264 5265 case AMOVWZU, AMOVWU: 5266 return OPVCC(37, 0, 0, 0) /* stwu */ 5267 case AMOVD: 5268 return OPVCC(62, 0, 0, 0) /* std */ 5269 case AMOVDU: 5270 return OPVCC(62, 0, 0, 1) /* stdu */ 5271 case ASTXV: 5272 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */ 5273 case ASTXVL: 5274 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */ 5275 case ASTXVLL: 5276 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */ 5277 case ASTXVX: 5278 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */ 5279 5280 } 5281 5282 c.ctxt.Diag("unknown store opcode %v", a) 5283 return 0 5284} 5285 5286/* 5287 * indexed store s,a(b) 5288 */ 5289func (c *ctxt9) opstorex(a obj.As) uint32 { 5290 switch a { 5291 case AMOVB, AMOVBZ: 5292 return OPVCC(31, 215, 0, 0) /* stbx */ 5293 5294 case AMOVBU, AMOVBZU: 5295 return OPVCC(31, 247, 0, 0) /* stbux */ 5296 case AFMOVD: 5297 return OPVCC(31, 727, 0, 0) /* stfdx */ 5298 case AFMOVDU: 5299 return OPVCC(31, 759, 0, 0) /* stfdux */ 5300 case AFMOVS: 5301 return OPVCC(31, 663, 0, 0) /* stfsx */ 5302 case AFMOVSU: 5303 return OPVCC(31, 695, 0, 0) /* stfsux */ 5304 case AFMOVSX: 5305 return OPVCC(31, 983, 0, 0) /* stfiwx */ 5306 5307 case AMOVHZ, AMOVH: 5308 return OPVCC(31, 407, 0, 0) /* sthx */ 5309 case AMOVHBR: 5310 return OPVCC(31, 918, 0, 0) /* sthbrx */ 5311 5312 case AMOVHZU, AMOVHU: 5313 return OPVCC(31, 439, 0, 0) /* sthux */ 5314 5315 case AMOVWZ, AMOVW: 5316 return OPVCC(31, 151, 0, 0) /* stwx */ 5317 5318 case AMOVWZU, AMOVWU: 5319 return OPVCC(31, 183, 0, 0) /* stwux */ 5320 case ASTSW: 5321 return OPVCC(31, 661, 0, 0) /* stswx */ 5322 case AMOVWBR: 5323 return OPVCC(31, 662, 0, 0) /* stwbrx */ 5324 case AMOVDBR: 5325 return OPVCC(31, 660, 0, 0) /* stdbrx */ 5326 case ASTBCCC: 5327 return OPVCC(31, 694, 0, 1) /* stbcx. */ 5328 case ASTHCCC: 5329 return OPVCC(31, 726, 0, 1) /* sthcx. */ 5330 case ASTWCCC: 5331 return OPVCC(31, 150, 0, 1) /* stwcx. */ 5332 case ASTDCCC: 5333 return OPVCC(31, 214, 0, 1) /* stwdx. */ 5334 case AMOVD: 5335 return OPVCC(31, 149, 0, 0) /* stdx */ 5336 case AMOVDU: 5337 return OPVCC(31, 181, 0, 0) /* stdux */ 5338 5339 /* Vector (VMX/Altivec) instructions */ 5340 case ASTVEBX: 5341 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */ 5342 case ASTVEHX: 5343 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */ 5344 case ASTVEWX: 5345 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */ 5346 case ASTVX: 5347 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */ 5348 case ASTVXL: 5349 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */ 5350 /* End of vector instructions */ 5351 5352 /* Vector scalar (VSX) instructions */ 5353 case ASTXVX: 5354 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */ 5355 case ASTXVD2X: 5356 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */ 5357 case ASTXVW4X: 5358 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */ 5359 case ASTXVH8X: 5360 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */ 5361 case ASTXVB16X: 5362 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */ 5363 5364 case ASTXSDX: 5365 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */ 5366 5367 case ASTXSIWX: 5368 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */ 5369 5370 /* End of vector scalar instructions */ 5371 5372 } 5373 5374 c.ctxt.Diag("unknown storex opcode %v", a) 5375 return 0 5376} 5377