1/*************************************************************************************** 2 * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3 * Copyright (c) 2020-2021 Peng Cheng Laboratory 4 * 5 * XiangShan is licensed under Mulan PSL v2. 6 * You can use this software according to the terms and conditions of the Mulan PSL v2. 7 * You may obtain a copy of Mulan PSL v2 at: 8 * http://license.coscl.org.cn/MulanPSL2 9 * 10 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13 * 14 * See the Mulan PSL v2 for more details. 15 ***************************************************************************************/ 16 17package xiangshan.mem 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import utility._ 24import xiangshan._ 25import xiangshan.backend.rob.RobPtr 26import xiangshan.backend.Bundles._ 27import xiangshan.backend.fu.FuType 28 29/** 30 * Common used parameters or functions in vlsu 31 */ 32trait VLSUConstants { 33 val VLEN = 128 34 //for pack unit-stride flow 35 val AlignedNum = 4 // 1/2/4/8 36 def VLENB = VLEN/8 37 def vOffsetBits = log2Up(VLENB) // bits-width to index offset inside a vector reg 38 lazy val vlmBindexBits = 8 //will be overrided later 39 lazy val vsmBindexBits = 8 // will be overrided later 40 41 def alignTypes = 5 // eew/sew = 1/2/4/8, last indicate 128 bit element 42 def alignTypeBits = log2Up(alignTypes) 43 def maxMUL = 8 44 def maxFields = 8 45 /** 46 * In the most extreme cases like a segment indexed instruction, eew=64, emul=8, sew=8, lmul=1, 47 * and nf=8, each data reg is mapped with 8 index regs and there are 8 data regs in total, 48 * each for a field. Therefore an instruction can be divided into 64 uops at most. 49 */ 50 def maxUopNum = maxMUL * maxFields // 64 51 def maxFlowNum = 16 52 def maxElemNum = maxMUL * maxFlowNum // 128 53 // def uopIdxBits = log2Up(maxUopNum) // to index uop inside an robIdx 54 def elemIdxBits = log2Up(maxElemNum) + 1 // to index which element in an instruction 55 def flowIdxBits = log2Up(maxFlowNum) + 1 // to index which flow in a uop 56 def fieldBits = log2Up(maxFields) + 1 // 4-bits to indicate 1~8 57 58 def ewBits = 3 // bits-width of EEW/SEW 59 def mulBits = 3 // bits-width of emul/lmul 60 61 def getSlice(data: UInt, i: Int, alignBits: Int): UInt = { 62 require(data.getWidth >= (i+1) * alignBits) 63 data((i+1) * alignBits - 1, i * alignBits) 64 } 65 66 def getByte(data: UInt, i: Int = 0) = getSlice(data, i, 8) 67 def getHalfWord(data: UInt, i: Int = 0) = getSlice(data, i, 16) 68 def getWord(data: UInt, i: Int = 0) = getSlice(data, i, 32) 69 def getDoubleWord(data: UInt, i: Int = 0) = getSlice(data, i, 64) 70 def getDoubleDoubleWord(data: UInt, i: Int = 0) = getSlice(data, i, 128) 71} 72 73trait HasVLSUParameters extends HasXSParameter with VLSUConstants { 74 override val VLEN = coreParams.VLEN 75 override lazy val vlmBindexBits = log2Up(coreParams.VlMergeBufferSize) 76 override lazy val vsmBindexBits = log2Up(coreParams.VsMergeBufferSize) 77 def isUnitStride(instType: UInt) = instType(1, 0) === "b00".U 78 def isStrided(instType: UInt) = instType(1, 0) === "b10".U 79 def isIndexed(instType: UInt) = instType(0) === "b1".U 80 def isNotIndexed(instType: UInt) = instType(0) === "b0".U 81 def isSegment(instType: UInt) = instType(2) === "b1".U 82 def is128Bit(alignedType: UInt) = alignedType(2) === "b1".U 83 84 def mergeDataWithMask(oldData: UInt, newData: UInt, mask: UInt): Vec[UInt] = { 85 require(oldData.getWidth == newData.getWidth) 86 require(oldData.getWidth == mask.getWidth * 8) 87 VecInit(mask.asBools.zipWithIndex.map { case (en, i) => 88 Mux(en, getByte(newData, i), getByte(oldData, i)) 89 }) 90 } 91 92 // def asBytes(data: UInt) = { 93 // require(data.getWidth % 8 == 0) 94 // (0 until data.getWidth/8).map(i => getByte(data, i)) 95 // } 96 97 def mergeDataWithElemIdx( 98 oldData: UInt, 99 newData: Seq[UInt], 100 alignedType: UInt, 101 elemIdx: Seq[UInt], 102 valids: Seq[Bool] 103 ): UInt = { 104 require(newData.length == elemIdx.length) 105 require(newData.length == valids.length) 106 LookupTree(alignedType, List( 107 "b00".U -> VecInit(elemIdx.map(e => UIntToOH(e(3, 0)).asBools).transpose.zipWithIndex.map { case (selVec, i) => 108 ParallelPosteriorityMux( 109 true.B +: selVec.zip(valids).map(x => x._1 && x._2), 110 getByte(oldData, i) +: newData.map(getByte(_)) 111 )}).asUInt, 112 "b01".U -> VecInit(elemIdx.map(e => UIntToOH(e(2, 0)).asBools).transpose.zipWithIndex.map { case (selVec, i) => 113 ParallelPosteriorityMux( 114 true.B +: selVec.zip(valids).map(x => x._1 && x._2), 115 getHalfWord(oldData, i) +: newData.map(getHalfWord(_)) 116 )}).asUInt, 117 "b10".U -> VecInit(elemIdx.map(e => UIntToOH(e(1, 0)).asBools).transpose.zipWithIndex.map { case (selVec, i) => 118 ParallelPosteriorityMux( 119 true.B +: selVec.zip(valids).map(x => x._1 && x._2), 120 getWord(oldData, i) +: newData.map(getWord(_)) 121 )}).asUInt, 122 "b11".U -> VecInit(elemIdx.map(e => UIntToOH(e(0)).asBools).transpose.zipWithIndex.map { case (selVec, i) => 123 ParallelPosteriorityMux( 124 true.B +: selVec.zip(valids).map(x => x._1 && x._2), 125 getDoubleWord(oldData, i) +: newData.map(getDoubleWord(_)) 126 )}).asUInt 127 )) 128 } 129 130 def mergeDataWithElemIdx(oldData: UInt, newData: UInt, alignedType: UInt, elemIdx: UInt): UInt = { 131 mergeDataWithElemIdx(oldData, Seq(newData), alignedType, Seq(elemIdx), Seq(true.B)) 132 } 133 /** 134 * for merge 128-bits data of unit-stride 135 */ 136 object mergeDataByoffset{ 137 def apply(oldData: UInt, newData: Seq[UInt], mask: Seq[UInt], offset: Seq[UInt], valids: Seq[Bool]): UInt = { 138 require(newData.length == valids.length) 139 require(newData.length == offset.length) 140 // if (i>offset[k] && mask[k][i]==1 && valid[k]) -> newData, else -> oldData 141 val selVec = (mask zip offset).map{case (m,e) => 142 ((~UIntToMask(e, VLENB)).asBools.zip(m.asBools).map(x=> x._1 && x._2))}.transpose // vector(3,16) 143 144 VecInit(selVec.zipWithIndex.map{ case (selV, i) => // selV: vector(3,1), 0=<i<16 145 ParallelPosteriorityMux( 146 true.B +: selV.zip(valids).map(x => x._1 && x._2), 147 getByte(oldData, i) +: newData.map(getByte(_)) 148 )}).asUInt 149 } 150 } 151 def mergeDataByoffset(oldData: UInt, newData: UInt, mask: UInt, offset: UInt): UInt = { 152 mergeDataByoffset(oldData, Seq(newData), Seq(mask), Seq(offset), Seq(true.B)) 153 } 154} 155abstract class VLSUModule(implicit p: Parameters) extends XSModule 156 with HasVLSUParameters 157 with HasCircularQueuePtrHelper 158abstract class VLSUBundle(implicit p: Parameters) extends XSBundle 159 with HasVLSUParameters 160 161class VLSUBundleWithMicroOp(implicit p: Parameters) extends VLSUBundle { 162 val uop = new DynInst 163} 164 165class OnlyVecExuOutput(implicit p: Parameters) extends VLSUBundle { 166 val isvec = Bool() 167 val vecdata = UInt(VLEN.W) 168 val mask = UInt(VLENB.W) 169 // val rob_idx_valid = Vec(2, Bool()) 170 // val inner_idx = Vec(2, UInt(3.W)) 171 // val rob_idx = Vec(2, new RobPtr) 172 // val offset = Vec(2, UInt(4.W)) 173 val reg_offset = UInt(vOffsetBits.W) 174 val vecActive = Bool() // 1: vector active element, 0: vector not active element 175 val is_first_ele = Bool() 176 val elemIdx = UInt(elemIdxBits.W) // element index 177 val elemIdxInsideVd = UInt(elemIdxBits.W) // element index in scope of vd 178 // val uopQueuePtr = new VluopPtr 179 // val flowPtr = new VlflowPtr 180} 181 182class VecExuOutput(implicit p: Parameters) extends MemExuOutput with HasVLSUParameters { 183 val vec = new OnlyVecExuOutput 184 val alignedType = UInt(alignTypeBits.W) 185 // feedback 186 val vecFeedback = Bool() 187} 188 189// class VecStoreExuOutput(implicit p: Parameters) extends MemExuOutput with HasVLSUParameters { 190// val elemIdx = UInt(elemIdxBits.W) 191// val uopQueuePtr = new VsUopPtr 192// val fieldIdx = UInt(fieldBits.W) 193// val segmentIdx = UInt(elemIdxBits.W) 194// val vaddr = UInt(VAddrBits.W) 195// // pack 196// val isPackage = Bool() 197// val packageNum = UInt((log2Up(VLENB) + 1).W) 198// val originAlignedType = UInt(alignTypeBits.W) 199// val alignedType = UInt(alignTypeBits.W) 200// } 201 202class VecUopBundle(implicit p: Parameters) extends VLSUBundleWithMicroOp { 203 val flowMask = UInt(VLENB.W) // each bit for a flow 204 val byteMask = UInt(VLENB.W) // each bit for a byte 205 val data = UInt(VLEN.W) 206 // val fof = Bool() // fof is only used for vector loads 207 val excp_eew_index = UInt(elemIdxBits.W) 208 // val exceptionVec = ExceptionVec() // uop has exceptionVec 209 val baseAddr = UInt(VAddrBits.W) 210 val stride = UInt(VLEN.W) 211 val flow_counter = UInt(flowIdxBits.W) 212 213 // instruction decode result 214 val flowNum = UInt(flowIdxBits.W) // # of flows in a uop 215 // val flowNumLog2 = UInt(log2Up(flowIdxBits).W) // log2(flowNum), for better timing of multiplication 216 val nfields = UInt(fieldBits.W) // NFIELDS 217 val vm = Bool() // whether vector masking is enabled 218 val usWholeReg = Bool() // unit-stride, whole register load 219 val usMaskReg = Bool() // unit-stride, masked store/load 220 val eew = UInt(ewBits.W) // size of memory elements 221 val sew = UInt(ewBits.W) 222 val emul = UInt(mulBits.W) 223 val lmul = UInt(mulBits.W) 224 val vlmax = UInt(elemIdxBits.W) 225 val instType = UInt(3.W) 226 val vd_last_uop = Bool() 227 val vd_first_uop = Bool() 228} 229 230class VecFlowBundle(implicit p: Parameters) extends VLSUBundleWithMicroOp { 231 val vaddr = UInt(VAddrBits.W) 232 val mask = UInt(VLENB.W) 233 val alignedType = UInt(alignTypeBits.W) 234 val vecActive = Bool() 235 val elemIdx = UInt(elemIdxBits.W) 236 val is_first_ele = Bool() 237 238 // pack 239 val isPackage = Bool() 240 val packageNum = UInt((log2Up(VLENB) + 1).W) 241 val originAlignedType = UInt(alignTypeBits.W) 242} 243 244class VecMemExuOutput(isVector: Boolean = false)(implicit p: Parameters) extends VLSUBundle{ 245 val output = new MemExuOutput(isVector) 246 val vecFeedback = Bool() 247 val mmio = Bool() 248 val usSecondInv = Bool() 249 val elemIdx = UInt(elemIdxBits.W) 250 val alignedType = UInt(alignTypeBits.W) 251} 252 253object MulNum { 254 def apply (mul: UInt): UInt = { //mul means emul or lmul 255 (LookupTree(mul,List( 256 "b101".U -> 1.U , // 1/8 257 "b110".U -> 1.U , // 1/4 258 "b111".U -> 1.U , // 1/2 259 "b000".U -> 1.U , // 1 260 "b001".U -> 2.U , // 2 261 "b010".U -> 4.U , // 4 262 "b011".U -> 8.U // 8 263 )))} 264} 265/** 266 * when emul is greater than or equal to 1, this means the entire register needs to be written; 267 * otherwise, only write the specified number of bytes */ 268object MulDataSize { 269 def apply (mul: UInt): UInt = { //mul means emul or lmul 270 (LookupTree(mul,List( 271 "b101".U -> 2.U , // 1/8 272 "b110".U -> 4.U , // 1/4 273 "b111".U -> 8.U , // 1/2 274 "b000".U -> 16.U , // 1 275 "b001".U -> 16.U , // 2 276 "b010".U -> 16.U , // 4 277 "b011".U -> 16.U // 8 278 )))} 279} 280 281object OneRegNum { 282 def apply (eew: UInt): UInt = { //mul means emul or lmul 283 (LookupTree(eew,List( 284 "b000".U -> 16.U , // 1 285 "b101".U -> 8.U , // 2 286 "b110".U -> 4.U , // 4 287 "b111".U -> 2.U // 8 288 )))} 289} 290 291//index inst read data byte 292object SewDataSize { 293 def apply (sew: UInt): UInt = { 294 (LookupTree(sew,List( 295 "b000".U -> 1.U , // 1 296 "b001".U -> 2.U , // 2 297 "b010".U -> 4.U , // 4 298 "b011".U -> 8.U // 8 299 )))} 300} 301 302// strided inst read data byte 303object EewDataSize { 304 def apply (eew: UInt): UInt = { 305 (LookupTree(eew,List( 306 "b000".U -> 1.U , // 1 307 "b101".U -> 2.U , // 2 308 "b110".U -> 4.U , // 4 309 "b111".U -> 8.U // 8 310 )))} 311} 312 313object loadDataSize { 314 def apply (instType: UInt, emul: UInt, eew: UInt, sew: UInt): UInt = { 315 (LookupTree(instType,List( 316 "b000".U -> MulDataSize(emul), // unit-stride 317 "b010".U -> EewDataSize(eew) , // strided 318 "b001".U -> SewDataSize(sew) , // indexed-unordered 319 "b011".U -> SewDataSize(sew) , // indexed-ordered 320 "b100".U -> EewDataSize(eew) , // segment unit-stride 321 "b110".U -> EewDataSize(eew) , // segment strided 322 "b101".U -> SewDataSize(sew) , // segment indexed-unordered 323 "b111".U -> SewDataSize(sew) // segment indexed-ordered 324 )))} 325} 326 327object storeDataSize { 328 def apply (instType: UInt, eew: UInt, sew: UInt): UInt = { 329 (LookupTree(instType,List( 330 "b000".U -> EewDataSize(eew) , // unit-stride, do not use 331 "b010".U -> EewDataSize(eew) , // strided 332 "b001".U -> SewDataSize(sew) , // indexed-unordered 333 "b011".U -> SewDataSize(sew) , // indexed-ordered 334 "b100".U -> EewDataSize(eew) , // segment unit-stride 335 "b110".U -> EewDataSize(eew) , // segment strided 336 "b101".U -> SewDataSize(sew) , // segment indexed-unordered 337 "b111".U -> SewDataSize(sew) // segment indexed-ordered 338 )))} 339} 340 341object GenVecStoreMask { 342 def apply (instType: UInt, eew: UInt, sew: UInt): UInt = { 343 val mask = Wire(UInt(16.W)) 344 mask := UIntToOH(storeDataSize(instType = instType, eew = eew, sew = sew)) - 1.U 345 mask 346 } 347} 348 349/** 350 * these are used to obtain immediate addresses for index instruction */ 351object EewEq8 { 352 def apply(index:UInt, flow_inner_idx: UInt): UInt = { 353 (LookupTree(flow_inner_idx,List( 354 0.U -> index(7 ,0 ), 355 1.U -> index(15,8 ), 356 2.U -> index(23,16 ), 357 3.U -> index(31,24 ), 358 4.U -> index(39,32 ), 359 5.U -> index(47,40 ), 360 6.U -> index(55,48 ), 361 7.U -> index(63,56 ), 362 8.U -> index(71,64 ), 363 9.U -> index(79,72 ), 364 10.U -> index(87,80 ), 365 11.U -> index(95,88 ), 366 12.U -> index(103,96 ), 367 13.U -> index(111,104), 368 14.U -> index(119,112), 369 15.U -> index(127,120) 370 )))} 371} 372 373object EewEq16 { 374 def apply(index: UInt, flow_inner_idx: UInt): UInt = { 375 (LookupTree(flow_inner_idx, List( 376 0.U -> index(15, 0), 377 1.U -> index(31, 16), 378 2.U -> index(47, 32), 379 3.U -> index(63, 48), 380 4.U -> index(79, 64), 381 5.U -> index(95, 80), 382 6.U -> index(111, 96), 383 7.U -> index(127, 112) 384 )))} 385} 386 387object EewEq32 { 388 def apply(index: UInt, flow_inner_idx: UInt): UInt = { 389 (LookupTree(flow_inner_idx, List( 390 0.U -> index(31, 0), 391 1.U -> index(63, 32), 392 2.U -> index(95, 64), 393 3.U -> index(127, 96) 394 )))} 395} 396 397object EewEq64 { 398 def apply (index: UInt, flow_inner_idx: UInt): UInt = { 399 (LookupTree(flow_inner_idx, List( 400 0.U -> index(63, 0), 401 1.U -> index(127, 64) 402 )))} 403} 404 405object IndexAddr { 406 def apply (index: UInt, flow_inner_idx: UInt, eew: UInt): UInt = { 407 (LookupTree(eew,List( 408 "b000".U -> EewEq8 (index = index, flow_inner_idx = flow_inner_idx ), // Imm is 1 Byte // TODO: index maybe cross register 409 "b101".U -> EewEq16(index = index, flow_inner_idx = flow_inner_idx ), // Imm is 2 Byte 410 "b110".U -> EewEq32(index = index, flow_inner_idx = flow_inner_idx ), // Imm is 4 Byte 411 "b111".U -> EewEq64(index = index, flow_inner_idx = flow_inner_idx ) // Imm is 8 Byte 412 )))} 413} 414 415object Log2Num { 416 def apply (num: UInt): UInt = { 417 (LookupTree(num,List( 418 16.U -> 4.U, 419 8.U -> 3.U, 420 4.U -> 2.U, 421 2.U -> 1.U, 422 1.U -> 0.U 423 )))} 424} 425 426object GenUopIdxInField { 427 def apply (instType: UInt, emul: UInt, lmul: UInt, uopIdx: UInt): UInt = { 428 val isIndexed = instType(0) 429 val mulInField = Mux( 430 isIndexed, 431 Mux(lmul.asSInt > emul.asSInt, lmul, emul), 432 emul 433 ) 434 LookupTree(mulInField, List( 435 "b101".U -> 0.U, 436 "b110".U -> 0.U, 437 "b111".U -> 0.U, 438 "b000".U -> 0.U, 439 "b001".U -> uopIdx(0), 440 "b010".U -> uopIdx(1, 0), 441 "b011".U -> uopIdx(2, 0) 442 )) 443 } 444} 445 446//eew decode 447object EewLog2 extends VLSUConstants { 448 // def apply (eew: UInt): UInt = { 449 // (LookupTree(eew,List( 450 // "b000".U -> "b000".U , // 1 451 // "b101".U -> "b001".U , // 2 452 // "b110".U -> "b010".U , // 4 453 // "b111".U -> "b011".U // 8 454 // )))} 455 def apply(eew: UInt): UInt = ZeroExt(eew(1, 0), ewBits) 456} 457 458/** 459 * unit-stride instructions don't use this method; 460 * other instructions generate realFlowNum by EmulDataSize >> eew(1,0), 461 * EmulDataSize means the number of bytes that need to be written to the register, 462 * eew(1,0) means the number of bytes written at once*/ 463object GenRealFlowNum { 464 def apply (instType: UInt, emul: UInt, lmul: UInt, eew: UInt, sew: UInt): UInt = { 465 (LookupTree(instType,List( 466 "b000".U -> (MulDataSize(emul) >> eew(1,0)).asUInt, // store use, load do not use 467 "b010".U -> (MulDataSize(emul) >> eew(1,0)).asUInt, // strided 468 "b001".U -> Mux(emul.asSInt > lmul.asSInt, (MulDataSize(emul) >> eew(1,0)).asUInt, (MulDataSize(lmul) >> sew(1,0)).asUInt), // indexed-unordered 469 "b011".U -> Mux(emul.asSInt > lmul.asSInt, (MulDataSize(emul) >> eew(1,0)).asUInt, (MulDataSize(lmul) >> sew(1,0)).asUInt), // indexed-ordered 470 "b100".U -> (MulDataSize(emul) >> eew(1,0)).asUInt, // segment unit-stride 471 "b110".U -> (MulDataSize(emul) >> eew(1,0)).asUInt, // segment strided 472 "b101".U -> Mux(emul.asSInt > lmul.asSInt, (MulDataSize(emul) >> eew(1,0)).asUInt, (MulDataSize(lmul) >> sew(1,0)).asUInt), // segment indexed-unordered 473 "b111".U -> Mux(emul.asSInt > lmul.asSInt, (MulDataSize(emul) >> eew(1,0)).asUInt, (MulDataSize(lmul) >> sew(1,0)).asUInt) // segment indexed-ordered 474 )))} 475} 476 477/** 478 * GenRealFlowLog2 = Log2(GenRealFlowNum) 479 */ 480object GenRealFlowLog2 extends VLSUConstants { 481 def apply(instType: UInt, emul: UInt, lmul: UInt, eew: UInt, sew: UInt): UInt = { 482 val emulLog2 = Mux(emul.asSInt >= 0.S, 0.U, emul) 483 val lmulLog2 = Mux(lmul.asSInt >= 0.S, 0.U, lmul) 484 val eewRealFlowLog2 = emulLog2 + log2Up(VLENB).U - eew(1, 0) 485 val sewRealFlowLog2 = lmulLog2 + log2Up(VLENB).U - sew(1, 0) 486 (LookupTree(instType, List( 487 "b000".U -> eewRealFlowLog2, // unit-stride 488 "b010".U -> eewRealFlowLog2, // strided 489 "b001".U -> Mux(emul.asSInt > lmul.asSInt, eewRealFlowLog2, sewRealFlowLog2), // indexed-unordered 490 "b011".U -> Mux(emul.asSInt > lmul.asSInt, eewRealFlowLog2, sewRealFlowLog2), // indexed-ordered 491 "b100".U -> eewRealFlowLog2, // segment unit-stride 492 "b110".U -> eewRealFlowLog2, // segment strided 493 "b101".U -> Mux(emul.asSInt > lmul.asSInt, eewRealFlowLog2, sewRealFlowLog2), // segment indexed-unordered 494 "b111".U -> Mux(emul.asSInt > lmul.asSInt, eewRealFlowLog2, sewRealFlowLog2), // segment indexed-ordered 495 ))) 496 } 497} 498 499/** 500 * GenElemIdx generals an element index within an instruction, given a certain uopIdx and a known flowIdx 501 * inside the uop. 502 */ 503object GenElemIdx extends VLSUConstants { 504 def apply(instType: UInt, emul: UInt, lmul: UInt, eew: UInt, sew: UInt, 505 uopIdx: UInt, flowIdx: UInt): UInt = { 506 val isIndexed = instType(0).asBool 507 val eewUopFlowsLog2 = Mux(emul.asSInt > 0.S, 0.U, emul) + log2Up(VLENB).U - eew(1, 0) 508 val sewUopFlowsLog2 = Mux(lmul.asSInt > 0.S, 0.U, lmul) + log2Up(VLENB).U - sew(1, 0) 509 val uopFlowsLog2 = Mux( 510 isIndexed, 511 Mux(emul.asSInt > lmul.asSInt, eewUopFlowsLog2, sewUopFlowsLog2), 512 eewUopFlowsLog2 513 ) 514 LookupTree(uopFlowsLog2, List( 515 0.U -> uopIdx, 516 1.U -> uopIdx ## flowIdx(0), 517 2.U -> uopIdx ## flowIdx(1, 0), 518 3.U -> uopIdx ## flowIdx(2, 0), 519 4.U -> uopIdx ## flowIdx(3, 0) 520 )) 521 } 522} 523 524/** 525 * GenVLMAX calculates VLMAX, which equals MUL * ew 526 */ 527object GenVLMAXLog2 extends VLSUConstants { 528 def apply(lmul: UInt, sew: UInt): UInt = lmul + log2Up(VLENB).U - sew 529} 530object GenVLMAX { 531 def apply(lmul: UInt, sew: UInt): UInt = 1.U << GenVLMAXLog2(lmul, sew) 532} 533 534object GenUSWholeRegVL extends VLSUConstants { 535 def apply(nfields: UInt, eew: UInt): UInt = { 536 LookupTree(eew(1, 0), List( 537 "b00".U -> (nfields << (log2Up(VLENB) - 0)), 538 "b01".U -> (nfields << (log2Up(VLENB) - 1)), 539 "b10".U -> (nfields << (log2Up(VLENB) - 2)), 540 "b11".U -> (nfields << (log2Up(VLENB) - 3)) 541 )) 542 } 543} 544object GenUSWholeEmul extends VLSUConstants{ 545 def apply(nf: UInt): UInt={ 546 LookupTree(nf,List( 547 "b000".U -> "b000".U(mulBits.W), 548 "b001".U -> "b001".U(mulBits.W), 549 "b011".U -> "b010".U(mulBits.W), 550 "b111".U -> "b011".U(mulBits.W) 551 )) 552 } 553} 554 555 556object GenUSMaskRegVL extends VLSUConstants { 557 def apply(vl: UInt): UInt = { 558 Mux(vl(2,0) === 0.U , (vl >> 3.U), ((vl >> 3.U) + 1.U)) 559 } 560} 561 562object GenUopByteMask { 563 def apply(flowMask: UInt, alignedType: UInt): UInt = { 564 LookupTree(alignedType, List( 565 "b000".U -> flowMask, 566 "b001".U -> FillInterleaved(2, flowMask), 567 "b010".U -> FillInterleaved(4, flowMask), 568 "b011".U -> FillInterleaved(8, flowMask), 569 "b100".U -> FillInterleaved(16, flowMask) 570 )) 571 } 572} 573 574object GenVdIdxInField extends VLSUConstants { 575 def apply(instType: UInt, emul: UInt, lmul: UInt, uopIdx: UInt): UInt = { 576 val vdIdx = Wire(UInt(log2Up(maxMUL).W)) 577 when (instType(1,0) === "b00".U || instType(1,0) === "b10".U || lmul.asSInt > emul.asSInt) { 578 // Unit-stride or Strided, or indexed with lmul >= emul 579 vdIdx := uopIdx 580 }.otherwise { 581 // Indexed with lmul <= emul 582 val multiple = emul - lmul 583 val uopIdxWidth = uopIdx.getWidth 584 vdIdx := LookupTree(multiple, List( 585 0.U -> uopIdx, 586 1.U -> (uopIdx >> 1), 587 2.U -> (uopIdx >> 2), 588 3.U -> (uopIdx >> 3) 589 )) 590 } 591 vdIdx 592 } 593} 594/** 595* Use start and vl to generate flow activative mask 596* mod = true fill 0 597* mod = false fill 1 598*/ 599object GenFlowMask extends VLSUConstants { 600 def apply(elementMask: UInt, start: UInt, vl: UInt , mod: Boolean): UInt = { 601 val startMask = ~UIntToMask(start, VLEN) 602 val vlMask = UIntToMask(vl, VLEN) 603 val maskVlStart = vlMask & startMask 604 if(mod){ 605 elementMask & maskVlStart 606 } 607 else{ 608 (~elementMask).asUInt & maskVlStart 609 } 610 } 611} 612 613object CheckAligned extends VLSUConstants { 614 def apply(addr: UInt): UInt = { 615 val aligned_16 = (addr(0) === 0.U) // 16-bit 616 val aligned_32 = (addr(1,0) === 0.U) // 32-bit 617 val aligned_64 = (addr(2,0) === 0.U) // 64-bit 618 val aligned_128 = (addr(3,0) === 0.U) // 128-bit 619 Cat(true.B, aligned_16, aligned_32, aligned_64, aligned_128) 620 } 621} 622 623/** 624 search if mask have continue 'len' bit '1' 625 mask: source mask 626 len: search length 627*/ 628object GenPackMask{ 629 def leadX(mask: Seq[Bool], len: Int): Bool = { 630 if(len == 1){ 631 mask.head 632 } 633 else{ 634 leadX(mask.drop(1),len-1) & mask.head 635 } 636 } 637 def leadOneVec(shiftMask: Seq[Bool]): UInt = { 638 // max is 64-bit, so the max num of flow to pack is 8 639 640 val lead1 = leadX(shiftMask, 1) // continue 1 bit 641 val lead2 = leadX(shiftMask, 2) // continue 2 bit 642 val lead4 = leadX(shiftMask, 4) // continue 4 bit 643 val lead8 = leadX(shiftMask, 8) // continue 8 bit 644 val lead16 = leadX(shiftMask, 16) // continue 16 bit 645 Cat(lead1, lead2, lead4, lead8, lead16) 646 } 647 648 def apply(shiftMask: UInt) = { 649 // pack mask 650 val packMask = leadOneVec(shiftMask.asBools) 651 packMask 652 } 653} 654/** 655PackEnable = (LeadXVec >> eew) & alignedVec, where the 0th bit represents the ability to merge into a 64 bit flow, the second bit represents the ability to merge into a 32 bit flow, and so on. 656 657example: 658 addr = 0x0, activeMask = b00011100101111, flowIdx = 0, eew = 0(8-bit) 659 660 step 0 : addrAlignedVec = (1, 1, 1, 1) elemIdxAligned = (1, 1, 1, 1) 661 step 1 : activePackVec = (1, 1, 1, 0), inactivePackVec = (0, 0, 0, 0) 662 step 2 : activePackEnable = (1, 1, 1, 0), inactivePackVec = (0, 0, 0, 0) 663 664 we can package 4 8-bit activative flows into a 32-bit flow. 665*/ 666object GenPackVec extends VLSUConstants{ 667 def apply(addr: UInt, shiftMask: UInt, eew: UInt, elemIdx: UInt): UInt = { 668 val addrAlignedVec = CheckAligned(addr) 669 val elemIdxAligned = CheckAligned(elemIdx) 670 val packMask = GenPackMask(shiftMask) 671 // generate packVec 672 val packVec = addrAlignedVec & elemIdxAligned & (packMask.asUInt >> eew) 673 674 packVec 675 } 676} 677 678object GenPackAlignedType extends VLSUConstants{ 679 def apply(packVec: UInt): UInt = { 680 val packAlignedType = PriorityMux(Seq( 681 packVec(0) -> "b100".U, 682 packVec(1) -> "b011".U, 683 packVec(2) -> "b010".U, 684 packVec(3) -> "b001".U, 685 packVec(4) -> "b000".U 686 )) 687 packAlignedType 688 } 689} 690 691object GenPackNum extends VLSUConstants{ 692 def apply(alignedType: UInt, packAlignedType: UInt): UInt = { 693 (1.U << (packAlignedType - alignedType)).asUInt 694 } 695} 696 697object genVWmask128 { 698 def apply(addr: UInt, sizeEncode: UInt): UInt = { 699 (LookupTree(sizeEncode, List( 700 "b000".U -> 0x1.U, //0001 << addr(2:0) 701 "b001".U -> 0x3.U, //0011 702 "b010".U -> 0xf.U, //1111 703 "b011".U -> 0xff.U, //11111111 704 "b100".U -> 0xffff.U //1111111111111111 705 )) << addr(3, 0)).asUInt 706 } 707} 708/* 709* only use in max length is 128 710*/ 711object genVWdata { 712 def apply(data: UInt, sizeEncode: UInt): UInt = { 713 LookupTree(sizeEncode, List( 714 "b000".U -> Fill(16, data(7, 0)), 715 "b001".U -> Fill(8, data(15, 0)), 716 "b010".U -> Fill(4, data(31, 0)), 717 "b011".U -> Fill(2, data(63,0)), 718 "b100".U -> data(127,0) 719 )) 720 } 721} 722 723object genUSSplitAddr{ 724 def apply(addr: UInt, index: UInt): UInt = { 725 val tmpAddr = Cat(addr(38, 4), 0.U(4.W)) 726 val nextCacheline = tmpAddr + 16.U 727 LookupTree(index, List( 728 0.U -> tmpAddr, 729 1.U -> nextCacheline 730 )) 731 } 732} 733 734object genUSSplitMask{ 735 def apply(mask: UInt, index: UInt, addrOffset: UInt): UInt = { 736 val tmpMask = Cat(0.U(16.W),mask) << addrOffset // 32-bits 737 LookupTree(index, List( 738 0.U -> tmpMask(15, 0), 739 1.U -> tmpMask(31, 16), 740 )) 741 } 742} 743 744object genUSSplitData{ 745 def apply(data: UInt, index: UInt, addrOffset: UInt): UInt = { 746 val tmpData = WireInit(0.U(256.W)) 747 val lookupTable = (0 until 16).map{case i => 748 if(i == 0){ 749 i.U -> Cat(0.U(128.W), data) 750 }else{ 751 i.U -> Cat(0.U(((16-i)*8).W), data, 0.U((i*8).W)) 752 } 753 } 754 tmpData := LookupTree(addrOffset, lookupTable).asUInt 755 756 LookupTree(index, List( 757 0.U -> tmpData(127, 0), 758 1.U -> tmpData(255, 128) 759 )) 760 } 761} 762/** 763 * generate offset in Vd of flows, only used in Unit-Stride 764 * */ 765object genVdOffset{ 766 def apply(offset: UInt, index: UInt): UInt = { 767 LookupTree(index, List( 768 0.U -> 0.U, 769 1.U -> ((~offset).asUInt + 1.U) 770 )) 771 } 772} 773 774object GenVSData extends VLSUConstants { 775 def apply(data: UInt, elemIdx: UInt, alignedType: UInt): UInt = { 776 LookupTree(alignedType, List( 777 "b000".U -> ZeroExt(LookupTree(elemIdx(3, 0), List.tabulate(VLEN/8)(i => i.U -> getByte(data, i))), VLEN), 778 "b001".U -> ZeroExt(LookupTree(elemIdx(2, 0), List.tabulate(VLEN/16)(i => i.U -> getHalfWord(data, i))), VLEN), 779 "b010".U -> ZeroExt(LookupTree(elemIdx(1, 0), List.tabulate(VLEN/32)(i => i.U -> getWord(data, i))), VLEN), 780 "b011".U -> ZeroExt(LookupTree(elemIdx(0), List.tabulate(VLEN/64)(i => i.U -> getDoubleWord(data, i))), VLEN), 781 "b100".U -> data // if have wider element, it will broken 782 )) 783 } 784}