1/*************************************************************************************** 2 * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3 * Copyright (c) 2020-2021 Peng Cheng Laboratory 4 * 5 * XiangShan is licensed under Mulan PSL v2. 6 * You can use this software according to the terms and conditions of the Mulan PSL v2. 7 * You may obtain a copy of Mulan PSL v2 at: 8 * http://license.coscl.org.cn/MulanPSL2 9 * 10 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13 * 14 * See the Mulan PSL v2 for more details. 15 ***************************************************************************************/ 16 17package xiangshan.mem 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import utility._ 24import xiangshan._ 25import xiangshan.backend.rob.RobPtr 26import xiangshan.backend.Bundles._ 27import xiangshan.backend.fu.FuType 28 29/** 30 * Common used parameters or functions in vlsu 31 */ 32trait VLSUConstants { 33 val VLEN = 128 34 //for pack unit-stride flow 35 val AlignedNum = 4 // 1/2/4/8 36 def VLENB = VLEN/8 37 def vOffsetBits = log2Up(VLENB) // bits-width to index offset inside a vector reg 38 lazy val vlmBindexBits = 8 //will be overrided later 39 lazy val vsmBindexBits = 8 // will be overrided later 40 41 def alignTypes = 5 // eew/sew = 1/2/4/8, last indicate 128 bit element 42 def alignTypeBits = log2Up(alignTypes) 43 def maxMUL = 8 44 def maxFields = 8 45 /** 46 * In the most extreme cases like a segment indexed instruction, eew=64, emul=8, sew=8, lmul=1, 47 * and nf=8, each data reg is mapped with 8 index regs and there are 8 data regs in total, 48 * each for a field. Therefore an instruction can be divided into 64 uops at most. 49 */ 50 def maxUopNum = maxMUL * maxFields // 64 51 def maxFlowNum = 16 52 def maxElemNum = maxMUL * maxFlowNum // 128 53 // def uopIdxBits = log2Up(maxUopNum) // to index uop inside an robIdx 54 def elemIdxBits = log2Up(maxElemNum) + 1 // to index which element in an instruction 55 def flowIdxBits = log2Up(maxFlowNum) + 1 // to index which flow in a uop 56 def fieldBits = log2Up(maxFields) + 1 // 4-bits to indicate 1~8 57 58 def ewBits = 3 // bits-width of EEW/SEW 59 def mulBits = 3 // bits-width of emul/lmul 60 61 def getSlice(data: UInt, i: Int, alignBits: Int): UInt = { 62 require(data.getWidth >= (i+1) * alignBits) 63 data((i+1) * alignBits - 1, i * alignBits) 64 } 65 def getNoAlignedSlice(data: UInt, i: Int, alignBits: Int): UInt = { 66 data(i * 8 + alignBits - 1, i * 8) 67 } 68 69 def getByte(data: UInt, i: Int = 0) = getSlice(data, i, 8) 70 def getHalfWord(data: UInt, i: Int = 0) = getSlice(data, i, 16) 71 def getWord(data: UInt, i: Int = 0) = getSlice(data, i, 32) 72 def getDoubleWord(data: UInt, i: Int = 0) = getSlice(data, i, 64) 73 def getDoubleDoubleWord(data: UInt, i: Int = 0) = getSlice(data, i, 128) 74} 75 76trait HasVLSUParameters extends HasXSParameter with VLSUConstants { 77 override val VLEN = coreParams.VLEN 78 override lazy val vlmBindexBits = log2Up(coreParams.VlMergeBufferSize) 79 override lazy val vsmBindexBits = log2Up(coreParams.VsMergeBufferSize) 80 def isUnitStride(instType: UInt) = instType(1, 0) === "b00".U 81 def isStrided(instType: UInt) = instType(1, 0) === "b10".U 82 def isIndexed(instType: UInt) = instType(0) === "b1".U 83 def isNotIndexed(instType: UInt) = instType(0) === "b0".U 84 def isSegment(instType: UInt) = instType(2) === "b1".U 85 def is128Bit(alignedType: UInt) = alignedType(2) === "b1".U 86 87 def mergeDataWithMask(oldData: UInt, newData: UInt, mask: UInt): Vec[UInt] = { 88 require(oldData.getWidth == newData.getWidth) 89 require(oldData.getWidth == mask.getWidth * 8) 90 VecInit(mask.asBools.zipWithIndex.map { case (en, i) => 91 Mux(en, getByte(newData, i), getByte(oldData, i)) 92 }) 93 } 94 95 // def asBytes(data: UInt) = { 96 // require(data.getWidth % 8 == 0) 97 // (0 until data.getWidth/8).map(i => getByte(data, i)) 98 // } 99 100 def mergeDataWithElemIdx( 101 oldData: UInt, 102 newData: Seq[UInt], 103 alignedType: UInt, 104 elemIdx: Seq[UInt], 105 valids: Seq[Bool] 106 ): UInt = { 107 require(newData.length == elemIdx.length) 108 require(newData.length == valids.length) 109 LookupTree(alignedType, List( 110 "b00".U -> VecInit(elemIdx.map(e => UIntToOH(e(3, 0)).asBools).transpose.zipWithIndex.map { case (selVec, i) => 111 ParallelPosteriorityMux( 112 true.B +: selVec.zip(valids).map(x => x._1 && x._2), 113 getByte(oldData, i) +: newData.map(getByte(_)) 114 )}).asUInt, 115 "b01".U -> VecInit(elemIdx.map(e => UIntToOH(e(2, 0)).asBools).transpose.zipWithIndex.map { case (selVec, i) => 116 ParallelPosteriorityMux( 117 true.B +: selVec.zip(valids).map(x => x._1 && x._2), 118 getHalfWord(oldData, i) +: newData.map(getHalfWord(_)) 119 )}).asUInt, 120 "b10".U -> VecInit(elemIdx.map(e => UIntToOH(e(1, 0)).asBools).transpose.zipWithIndex.map { case (selVec, i) => 121 ParallelPosteriorityMux( 122 true.B +: selVec.zip(valids).map(x => x._1 && x._2), 123 getWord(oldData, i) +: newData.map(getWord(_)) 124 )}).asUInt, 125 "b11".U -> VecInit(elemIdx.map(e => UIntToOH(e(0)).asBools).transpose.zipWithIndex.map { case (selVec, i) => 126 ParallelPosteriorityMux( 127 true.B +: selVec.zip(valids).map(x => x._1 && x._2), 128 getDoubleWord(oldData, i) +: newData.map(getDoubleWord(_)) 129 )}).asUInt 130 )) 131 } 132 133 def mergeDataWithElemIdx(oldData: UInt, newData: UInt, alignedType: UInt, elemIdx: UInt): UInt = { 134 mergeDataWithElemIdx(oldData, Seq(newData), alignedType, Seq(elemIdx), Seq(true.B)) 135 } 136 /** 137 * for merge 128-bits data of unit-stride 138 */ 139 object mergeDataByByte{ 140 def apply(oldData: UInt, newData: UInt, mask: UInt): UInt = { 141 val selVec = Seq(mask).map(_.asBools).transpose 142 VecInit(selVec.zipWithIndex.map{ case (selV, i) => 143 ParallelPosteriorityMux( 144 true.B +: selV.map(x => x), 145 getByte(oldData, i) +: Seq(getByte(newData, i)) 146 )}).asUInt 147 } 148 } 149 150 /** 151 * for merge Unit-Stride data to 256-bits 152 * merge 128-bits data to 256-bits 153 * if have 3 port, 154 * if is port0, it is 6 to 1 Multiplexer -> (128'b0, data) or (data, 128'b0) or (data, port2data) or (port2data, data) or (data, port3data) or (port3data, data) 155 * if is port1, it is 4 to 1 Multiplexer -> (128'b0, data) or (data, 128'b0) or (data, port3data) or (port3data, data) 156 * if is port3, it is 2 to 1 Multiplexer -> (128'b0, data) or (data, 128'b0) 157 * 158 */ 159 object mergeDataByIndex{ 160 def apply(data: Seq[UInt], mask: Seq[UInt], index: UInt, valids: Seq[Bool]): (UInt, UInt) = { 161 require(data.length == valids.length) 162 require(data.length == mask.length) 163 val muxLength = data.length 164 val selDataMatrix = Wire(Vec(muxLength, Vec(2, UInt((VLEN * 2).W)))) // 3 * 2 * 256 165 val selMaskMatrix = Wire(Vec(muxLength, Vec(2, UInt((VLENB * 2).W)))) // 3 * 2 * 16 166 dontTouch(selDataMatrix) 167 dontTouch(selMaskMatrix) 168 for(i <- 0 until muxLength){ 169 if(i == 0){ 170 selDataMatrix(i)(0) := Cat(0.U(VLEN.W), data(i)) 171 selDataMatrix(i)(1) := Cat(data(i), 0.U(VLEN.W)) 172 selMaskMatrix(i)(0) := Cat(0.U(VLENB.W), mask(i)) 173 selMaskMatrix(i)(1) := Cat(mask(i), 0.U(VLENB.W)) 174 } 175 else{ 176 selDataMatrix(i)(0) := Cat(data(i), data(0)) 177 selDataMatrix(i)(1) := Cat(data(0), data(i)) 178 selMaskMatrix(i)(0) := Cat(mask(i), mask(0)) 179 selMaskMatrix(i)(1) := Cat(mask(0), mask(i)) 180 } 181 } 182 val selIdxVec = (0 until muxLength).map(_.U) 183 val selIdx = PriorityMux(valids.reverse, selIdxVec.reverse) 184 185 val selData = LookupTree(index, List( 186 0.U -> selDataMatrix(selIdx)(0), 187 1.U -> selDataMatrix(selIdx)(1) 188 )) 189 val selMask = LookupTree(index, List( 190 0.U -> selMaskMatrix(selIdx)(0), 191 1.U -> selMaskMatrix(selIdx)(1) 192 )) 193 (selData, selMask) 194 } 195 } 196 def mergeDataByIndex(data: UInt, mask: UInt, index: UInt): (UInt, UInt) = { 197 mergeDataByIndex(Seq(data), Seq(mask), index, Seq(true.B)) 198 } 199} 200abstract class VLSUModule(implicit p: Parameters) extends XSModule 201 with HasVLSUParameters 202 with HasCircularQueuePtrHelper 203abstract class VLSUBundle(implicit p: Parameters) extends XSBundle 204 with HasVLSUParameters 205 206class VLSUBundleWithMicroOp(implicit p: Parameters) extends VLSUBundle { 207 val uop = new DynInst 208} 209 210class OnlyVecExuOutput(implicit p: Parameters) extends VLSUBundle { 211 val isvec = Bool() 212 val vecdata = UInt(VLEN.W) 213 val mask = UInt(VLENB.W) 214 // val rob_idx_valid = Vec(2, Bool()) 215 // val inner_idx = Vec(2, UInt(3.W)) 216 // val rob_idx = Vec(2, new RobPtr) 217 // val offset = Vec(2, UInt(4.W)) 218 val reg_offset = UInt(vOffsetBits.W) 219 val vecActive = Bool() // 1: vector active element, 0: vector not active element 220 val is_first_ele = Bool() 221 val elemIdx = UInt(elemIdxBits.W) // element index 222 val elemIdxInsideVd = UInt(elemIdxBits.W) // element index in scope of vd 223 // val uopQueuePtr = new VluopPtr 224 // val flowPtr = new VlflowPtr 225} 226 227class VecExuOutput(implicit p: Parameters) extends MemExuOutput with HasVLSUParameters { 228 val vec = new OnlyVecExuOutput 229 val alignedType = UInt(alignTypeBits.W) 230 // feedback 231 val vecFeedback = Bool() 232} 233 234// class VecStoreExuOutput(implicit p: Parameters) extends MemExuOutput with HasVLSUParameters { 235// val elemIdx = UInt(elemIdxBits.W) 236// val uopQueuePtr = new VsUopPtr 237// val fieldIdx = UInt(fieldBits.W) 238// val segmentIdx = UInt(elemIdxBits.W) 239// val vaddr = UInt(VAddrBits.W) 240// // pack 241// val isPackage = Bool() 242// val packageNum = UInt((log2Up(VLENB) + 1).W) 243// val originAlignedType = UInt(alignTypeBits.W) 244// val alignedType = UInt(alignTypeBits.W) 245// } 246 247class VecUopBundle(implicit p: Parameters) extends VLSUBundleWithMicroOp { 248 val flowMask = UInt(VLENB.W) // each bit for a flow 249 val byteMask = UInt(VLENB.W) // each bit for a byte 250 val data = UInt(VLEN.W) 251 // val fof = Bool() // fof is only used for vector loads 252 val excp_eew_index = UInt(elemIdxBits.W) 253 // val exceptionVec = ExceptionVec() // uop has exceptionVec 254 val baseAddr = UInt(VAddrBits.W) 255 val stride = UInt(VLEN.W) 256 val flow_counter = UInt(flowIdxBits.W) 257 258 // instruction decode result 259 val flowNum = UInt(flowIdxBits.W) // # of flows in a uop 260 // val flowNumLog2 = UInt(log2Up(flowIdxBits).W) // log2(flowNum), for better timing of multiplication 261 val nfields = UInt(fieldBits.W) // NFIELDS 262 val vm = Bool() // whether vector masking is enabled 263 val usWholeReg = Bool() // unit-stride, whole register load 264 val usMaskReg = Bool() // unit-stride, masked store/load 265 val eew = UInt(ewBits.W) // size of memory elements 266 val sew = UInt(ewBits.W) 267 val emul = UInt(mulBits.W) 268 val lmul = UInt(mulBits.W) 269 val vlmax = UInt(elemIdxBits.W) 270 val instType = UInt(3.W) 271 val vd_last_uop = Bool() 272 val vd_first_uop = Bool() 273} 274 275class VecFlowBundle(implicit p: Parameters) extends VLSUBundleWithMicroOp { 276 val vaddr = UInt(VAddrBits.W) 277 val mask = UInt(VLENB.W) 278 val alignedType = UInt(alignTypeBits.W) 279 val vecActive = Bool() 280 val elemIdx = UInt(elemIdxBits.W) 281 val is_first_ele = Bool() 282 283 // pack 284 val isPackage = Bool() 285 val packageNum = UInt((log2Up(VLENB) + 1).W) 286 val originAlignedType = UInt(alignTypeBits.W) 287} 288 289class VecMemExuOutput(isVector: Boolean = false)(implicit p: Parameters) extends VLSUBundle{ 290 val output = new MemExuOutput(isVector) 291 val vecFeedback = Bool() 292 val mmio = Bool() 293 val usSecondInv = Bool() 294 val elemIdx = UInt(elemIdxBits.W) 295 val alignedType = UInt(alignTypeBits.W) 296 val mbIndex = UInt(vsmBindexBits.W) 297} 298 299object MulNum { 300 def apply (mul: UInt): UInt = { //mul means emul or lmul 301 (LookupTree(mul,List( 302 "b101".U -> 1.U , // 1/8 303 "b110".U -> 1.U , // 1/4 304 "b111".U -> 1.U , // 1/2 305 "b000".U -> 1.U , // 1 306 "b001".U -> 2.U , // 2 307 "b010".U -> 4.U , // 4 308 "b011".U -> 8.U // 8 309 )))} 310} 311/** 312 * when emul is greater than or equal to 1, this means the entire register needs to be written; 313 * otherwise, only write the specified number of bytes */ 314object MulDataSize { 315 def apply (mul: UInt): UInt = { //mul means emul or lmul 316 (LookupTree(mul,List( 317 "b101".U -> 2.U , // 1/8 318 "b110".U -> 4.U , // 1/4 319 "b111".U -> 8.U , // 1/2 320 "b000".U -> 16.U , // 1 321 "b001".U -> 16.U , // 2 322 "b010".U -> 16.U , // 4 323 "b011".U -> 16.U // 8 324 )))} 325} 326 327object OneRegNum { 328 def apply (eew: UInt): UInt = { //mul means emul or lmul 329 (LookupTree(eew,List( 330 "b000".U -> 16.U , // 1 331 "b101".U -> 8.U , // 2 332 "b110".U -> 4.U , // 4 333 "b111".U -> 2.U // 8 334 )))} 335} 336 337//index inst read data byte 338object SewDataSize { 339 def apply (sew: UInt): UInt = { 340 (LookupTree(sew,List( 341 "b000".U -> 1.U , // 1 342 "b001".U -> 2.U , // 2 343 "b010".U -> 4.U , // 4 344 "b011".U -> 8.U // 8 345 )))} 346} 347 348// strided inst read data byte 349object EewDataSize { 350 def apply (eew: UInt): UInt = { 351 (LookupTree(eew,List( 352 "b000".U -> 1.U , // 1 353 "b101".U -> 2.U , // 2 354 "b110".U -> 4.U , // 4 355 "b111".U -> 8.U // 8 356 )))} 357} 358 359object loadDataSize { 360 def apply (instType: UInt, emul: UInt, eew: UInt, sew: UInt): UInt = { 361 (LookupTree(instType,List( 362 "b000".U -> MulDataSize(emul), // unit-stride 363 "b010".U -> EewDataSize(eew) , // strided 364 "b001".U -> SewDataSize(sew) , // indexed-unordered 365 "b011".U -> SewDataSize(sew) , // indexed-ordered 366 "b100".U -> EewDataSize(eew) , // segment unit-stride 367 "b110".U -> EewDataSize(eew) , // segment strided 368 "b101".U -> SewDataSize(sew) , // segment indexed-unordered 369 "b111".U -> SewDataSize(sew) // segment indexed-ordered 370 )))} 371} 372 373object storeDataSize { 374 def apply (instType: UInt, eew: UInt, sew: UInt): UInt = { 375 (LookupTree(instType,List( 376 "b000".U -> EewDataSize(eew) , // unit-stride, do not use 377 "b010".U -> EewDataSize(eew) , // strided 378 "b001".U -> SewDataSize(sew) , // indexed-unordered 379 "b011".U -> SewDataSize(sew) , // indexed-ordered 380 "b100".U -> EewDataSize(eew) , // segment unit-stride 381 "b110".U -> EewDataSize(eew) , // segment strided 382 "b101".U -> SewDataSize(sew) , // segment indexed-unordered 383 "b111".U -> SewDataSize(sew) // segment indexed-ordered 384 )))} 385} 386 387object GenVecStoreMask { 388 def apply (instType: UInt, eew: UInt, sew: UInt): UInt = { 389 val mask = Wire(UInt(16.W)) 390 mask := UIntToOH(storeDataSize(instType = instType, eew = eew, sew = sew)) - 1.U 391 mask 392 } 393} 394 395/** 396 * these are used to obtain immediate addresses for index instruction */ 397object EewEq8 { 398 def apply(index:UInt, flow_inner_idx: UInt): UInt = { 399 (LookupTree(flow_inner_idx,List( 400 0.U -> index(7 ,0 ), 401 1.U -> index(15,8 ), 402 2.U -> index(23,16 ), 403 3.U -> index(31,24 ), 404 4.U -> index(39,32 ), 405 5.U -> index(47,40 ), 406 6.U -> index(55,48 ), 407 7.U -> index(63,56 ), 408 8.U -> index(71,64 ), 409 9.U -> index(79,72 ), 410 10.U -> index(87,80 ), 411 11.U -> index(95,88 ), 412 12.U -> index(103,96 ), 413 13.U -> index(111,104), 414 14.U -> index(119,112), 415 15.U -> index(127,120) 416 )))} 417} 418 419object EewEq16 { 420 def apply(index: UInt, flow_inner_idx: UInt): UInt = { 421 (LookupTree(flow_inner_idx, List( 422 0.U -> index(15, 0), 423 1.U -> index(31, 16), 424 2.U -> index(47, 32), 425 3.U -> index(63, 48), 426 4.U -> index(79, 64), 427 5.U -> index(95, 80), 428 6.U -> index(111, 96), 429 7.U -> index(127, 112) 430 )))} 431} 432 433object EewEq32 { 434 def apply(index: UInt, flow_inner_idx: UInt): UInt = { 435 (LookupTree(flow_inner_idx, List( 436 0.U -> index(31, 0), 437 1.U -> index(63, 32), 438 2.U -> index(95, 64), 439 3.U -> index(127, 96) 440 )))} 441} 442 443object EewEq64 { 444 def apply (index: UInt, flow_inner_idx: UInt): UInt = { 445 (LookupTree(flow_inner_idx, List( 446 0.U -> index(63, 0), 447 1.U -> index(127, 64) 448 )))} 449} 450 451object IndexAddr { 452 def apply (index: UInt, flow_inner_idx: UInt, eew: UInt): UInt = { 453 (LookupTree(eew,List( 454 "b000".U -> EewEq8 (index = index, flow_inner_idx = flow_inner_idx ), // Imm is 1 Byte // TODO: index maybe cross register 455 "b101".U -> EewEq16(index = index, flow_inner_idx = flow_inner_idx ), // Imm is 2 Byte 456 "b110".U -> EewEq32(index = index, flow_inner_idx = flow_inner_idx ), // Imm is 4 Byte 457 "b111".U -> EewEq64(index = index, flow_inner_idx = flow_inner_idx ) // Imm is 8 Byte 458 )))} 459} 460 461object Log2Num { 462 def apply (num: UInt): UInt = { 463 (LookupTree(num,List( 464 16.U -> 4.U, 465 8.U -> 3.U, 466 4.U -> 2.U, 467 2.U -> 1.U, 468 1.U -> 0.U 469 )))} 470} 471 472object GenUopIdxInField { 473 def apply (instType: UInt, emul: UInt, lmul: UInt, uopIdx: UInt): UInt = { 474 val isIndexed = instType(0) 475 val mulInField = Mux( 476 isIndexed, 477 Mux(lmul.asSInt > emul.asSInt, lmul, emul), 478 emul 479 ) 480 LookupTree(mulInField, List( 481 "b101".U -> 0.U, 482 "b110".U -> 0.U, 483 "b111".U -> 0.U, 484 "b000".U -> 0.U, 485 "b001".U -> uopIdx(0), 486 "b010".U -> uopIdx(1, 0), 487 "b011".U -> uopIdx(2, 0) 488 )) 489 } 490} 491 492//eew decode 493object EewLog2 extends VLSUConstants { 494 // def apply (eew: UInt): UInt = { 495 // (LookupTree(eew,List( 496 // "b000".U -> "b000".U , // 1 497 // "b101".U -> "b001".U , // 2 498 // "b110".U -> "b010".U , // 4 499 // "b111".U -> "b011".U // 8 500 // )))} 501 def apply(eew: UInt): UInt = ZeroExt(eew(1, 0), ewBits) 502} 503 504/** 505 * unit-stride instructions don't use this method; 506 * other instructions generate realFlowNum by EmulDataSize >> eew(1,0), 507 * EmulDataSize means the number of bytes that need to be written to the register, 508 * eew(1,0) means the number of bytes written at once*/ 509object GenRealFlowNum { 510 def apply (instType: UInt, emul: UInt, lmul: UInt, eew: UInt, sew: UInt): UInt = { 511 (LookupTree(instType,List( 512 "b000".U -> (MulDataSize(emul) >> eew(1,0)).asUInt, // store use, load do not use 513 "b010".U -> (MulDataSize(emul) >> eew(1,0)).asUInt, // strided 514 "b001".U -> Mux(emul.asSInt > lmul.asSInt, (MulDataSize(emul) >> eew(1,0)).asUInt, (MulDataSize(lmul) >> sew(1,0)).asUInt), // indexed-unordered 515 "b011".U -> Mux(emul.asSInt > lmul.asSInt, (MulDataSize(emul) >> eew(1,0)).asUInt, (MulDataSize(lmul) >> sew(1,0)).asUInt), // indexed-ordered 516 "b100".U -> (MulDataSize(emul) >> eew(1,0)).asUInt, // segment unit-stride 517 "b110".U -> (MulDataSize(emul) >> eew(1,0)).asUInt, // segment strided 518 "b101".U -> Mux(emul.asSInt > lmul.asSInt, (MulDataSize(emul) >> eew(1,0)).asUInt, (MulDataSize(lmul) >> sew(1,0)).asUInt), // segment indexed-unordered 519 "b111".U -> Mux(emul.asSInt > lmul.asSInt, (MulDataSize(emul) >> eew(1,0)).asUInt, (MulDataSize(lmul) >> sew(1,0)).asUInt) // segment indexed-ordered 520 )))} 521} 522 523/** 524 * GenRealFlowLog2 = Log2(GenRealFlowNum) 525 */ 526object GenRealFlowLog2 extends VLSUConstants { 527 def apply(instType: UInt, emul: UInt, lmul: UInt, eew: UInt, sew: UInt): UInt = { 528 val emulLog2 = Mux(emul.asSInt >= 0.S, 0.U, emul) 529 val lmulLog2 = Mux(lmul.asSInt >= 0.S, 0.U, lmul) 530 val eewRealFlowLog2 = emulLog2 + log2Up(VLENB).U - eew(1, 0) 531 val sewRealFlowLog2 = lmulLog2 + log2Up(VLENB).U - sew(1, 0) 532 (LookupTree(instType, List( 533 "b000".U -> eewRealFlowLog2, // unit-stride 534 "b010".U -> eewRealFlowLog2, // strided 535 "b001".U -> Mux(emul.asSInt > lmul.asSInt, eewRealFlowLog2, sewRealFlowLog2), // indexed-unordered 536 "b011".U -> Mux(emul.asSInt > lmul.asSInt, eewRealFlowLog2, sewRealFlowLog2), // indexed-ordered 537 "b100".U -> eewRealFlowLog2, // segment unit-stride 538 "b110".U -> eewRealFlowLog2, // segment strided 539 "b101".U -> Mux(emul.asSInt > lmul.asSInt, eewRealFlowLog2, sewRealFlowLog2), // segment indexed-unordered 540 "b111".U -> Mux(emul.asSInt > lmul.asSInt, eewRealFlowLog2, sewRealFlowLog2), // segment indexed-ordered 541 ))) 542 } 543} 544 545/** 546 * GenElemIdx generals an element index within an instruction, given a certain uopIdx and a known flowIdx 547 * inside the uop. 548 */ 549object GenElemIdx extends VLSUConstants { 550 def apply(instType: UInt, emul: UInt, lmul: UInt, eew: UInt, sew: UInt, 551 uopIdx: UInt, flowIdx: UInt): UInt = { 552 val isIndexed = instType(0).asBool 553 val eewUopFlowsLog2 = Mux(emul.asSInt > 0.S, 0.U, emul) + log2Up(VLENB).U - eew(1, 0) 554 val sewUopFlowsLog2 = Mux(lmul.asSInt > 0.S, 0.U, lmul) + log2Up(VLENB).U - sew(1, 0) 555 val uopFlowsLog2 = Mux( 556 isIndexed, 557 Mux(emul.asSInt > lmul.asSInt, eewUopFlowsLog2, sewUopFlowsLog2), 558 eewUopFlowsLog2 559 ) 560 LookupTree(uopFlowsLog2, List( 561 0.U -> uopIdx, 562 1.U -> uopIdx ## flowIdx(0), 563 2.U -> uopIdx ## flowIdx(1, 0), 564 3.U -> uopIdx ## flowIdx(2, 0), 565 4.U -> uopIdx ## flowIdx(3, 0) 566 )) 567 } 568} 569 570/** 571 * GenVLMAX calculates VLMAX, which equals MUL * ew 572 */ 573object GenVLMAXLog2 extends VLSUConstants { 574 def apply(lmul: UInt, sew: UInt): UInt = lmul + log2Up(VLENB).U - sew 575} 576object GenVLMAX { 577 def apply(lmul: UInt, sew: UInt): UInt = 1.U << GenVLMAXLog2(lmul, sew) 578} 579 580object GenUSWholeRegVL extends VLSUConstants { 581 def apply(nfields: UInt, eew: UInt): UInt = { 582 LookupTree(eew(1, 0), List( 583 "b00".U -> (nfields << (log2Up(VLENB) - 0)), 584 "b01".U -> (nfields << (log2Up(VLENB) - 1)), 585 "b10".U -> (nfields << (log2Up(VLENB) - 2)), 586 "b11".U -> (nfields << (log2Up(VLENB) - 3)) 587 )) 588 } 589} 590object GenUSWholeEmul extends VLSUConstants{ 591 def apply(nf: UInt): UInt={ 592 LookupTree(nf,List( 593 "b000".U -> "b000".U(mulBits.W), 594 "b001".U -> "b001".U(mulBits.W), 595 "b011".U -> "b010".U(mulBits.W), 596 "b111".U -> "b011".U(mulBits.W) 597 )) 598 } 599} 600 601 602object GenUSMaskRegVL extends VLSUConstants { 603 def apply(vl: UInt): UInt = { 604 Mux(vl(2,0) === 0.U , (vl >> 3.U), ((vl >> 3.U) + 1.U)) 605 } 606} 607 608object GenUopByteMask { 609 def apply(flowMask: UInt, alignedType: UInt): UInt = { 610 LookupTree(alignedType, List( 611 "b000".U -> flowMask, 612 "b001".U -> FillInterleaved(2, flowMask), 613 "b010".U -> FillInterleaved(4, flowMask), 614 "b011".U -> FillInterleaved(8, flowMask), 615 "b100".U -> FillInterleaved(16, flowMask) 616 )) 617 } 618} 619 620object GenVdIdxInField extends VLSUConstants { 621 def apply(instType: UInt, emul: UInt, lmul: UInt, uopIdx: UInt): UInt = { 622 val vdIdx = Wire(UInt(log2Up(maxMUL).W)) 623 when (instType(1,0) === "b00".U || instType(1,0) === "b10".U || lmul.asSInt > emul.asSInt) { 624 // Unit-stride or Strided, or indexed with lmul >= emul 625 vdIdx := uopIdx 626 }.otherwise { 627 // Indexed with lmul <= emul 628 val multiple = emul - lmul 629 val uopIdxWidth = uopIdx.getWidth 630 vdIdx := LookupTree(multiple, List( 631 0.U -> uopIdx, 632 1.U -> (uopIdx >> 1), 633 2.U -> (uopIdx >> 2), 634 3.U -> (uopIdx >> 3) 635 )) 636 } 637 vdIdx 638 } 639} 640/** 641* Use start and vl to generate flow activative mask 642* mod = true fill 0 643* mod = false fill 1 644*/ 645object GenFlowMask extends VLSUConstants { 646 def apply(elementMask: UInt, start: UInt, vl: UInt , mod: Boolean): UInt = { 647 val startMask = ~UIntToMask(start, VLEN) 648 val vlMask = UIntToMask(vl, VLEN) 649 val maskVlStart = vlMask & startMask 650 if(mod){ 651 elementMask & maskVlStart 652 } 653 else{ 654 (~elementMask).asUInt & maskVlStart 655 } 656 } 657} 658 659object CheckAligned extends VLSUConstants { 660 def apply(addr: UInt): UInt = { 661 val aligned_16 = (addr(0) === 0.U) // 16-bit 662 val aligned_32 = (addr(1,0) === 0.U) // 32-bit 663 val aligned_64 = (addr(2,0) === 0.U) // 64-bit 664 val aligned_128 = (addr(3,0) === 0.U) // 128-bit 665 Cat(true.B, aligned_16, aligned_32, aligned_64, aligned_128) 666 } 667} 668 669/** 670 search if mask have continue 'len' bit '1' 671 mask: source mask 672 len: search length 673*/ 674object GenPackMask{ 675 def leadX(mask: Seq[Bool], len: Int): Bool = { 676 if(len == 1){ 677 mask.head 678 } 679 else{ 680 leadX(mask.drop(1),len-1) & mask.head 681 } 682 } 683 def leadOneVec(shiftMask: Seq[Bool]): UInt = { 684 // max is 64-bit, so the max num of flow to pack is 8 685 686 val lead1 = leadX(shiftMask, 1) // continue 1 bit 687 val lead2 = leadX(shiftMask, 2) // continue 2 bit 688 val lead4 = leadX(shiftMask, 4) // continue 4 bit 689 val lead8 = leadX(shiftMask, 8) // continue 8 bit 690 val lead16 = leadX(shiftMask, 16) // continue 16 bit 691 Cat(lead1, lead2, lead4, lead8, lead16) 692 } 693 694 def apply(shiftMask: UInt) = { 695 // pack mask 696 val packMask = leadOneVec(shiftMask.asBools) 697 packMask 698 } 699} 700/** 701PackEnable = (LeadXVec >> eew) & alignedVec, where the 0th bit represents the ability to merge into a 64 bit flow, the second bit represents the ability to merge into a 32 bit flow, and so on. 702 703example: 704 addr = 0x0, activeMask = b00011100101111, flowIdx = 0, eew = 0(8-bit) 705 706 step 0 : addrAlignedVec = (1, 1, 1, 1) elemIdxAligned = (1, 1, 1, 1) 707 step 1 : activePackVec = (1, 1, 1, 0), inactivePackVec = (0, 0, 0, 0) 708 step 2 : activePackEnable = (1, 1, 1, 0), inactivePackVec = (0, 0, 0, 0) 709 710 we can package 4 8-bit activative flows into a 32-bit flow. 711*/ 712object GenPackVec extends VLSUConstants{ 713 def apply(addr: UInt, shiftMask: UInt, eew: UInt, elemIdx: UInt): UInt = { 714 val addrAlignedVec = CheckAligned(addr) 715 val elemIdxAligned = CheckAligned(elemIdx) 716 val packMask = GenPackMask(shiftMask) 717 // generate packVec 718 val packVec = addrAlignedVec & elemIdxAligned & (packMask.asUInt >> eew) 719 720 packVec 721 } 722} 723 724object GenPackAlignedType extends VLSUConstants{ 725 def apply(packVec: UInt): UInt = { 726 val packAlignedType = PriorityMux(Seq( 727 packVec(0) -> "b100".U, 728 packVec(1) -> "b011".U, 729 packVec(2) -> "b010".U, 730 packVec(3) -> "b001".U, 731 packVec(4) -> "b000".U 732 )) 733 packAlignedType 734 } 735} 736 737object GenPackNum extends VLSUConstants{ 738 def apply(alignedType: UInt, packAlignedType: UInt): UInt = { 739 (1.U << (packAlignedType - alignedType)).asUInt 740 } 741} 742 743object genVWmask128 { 744 def apply(addr: UInt, sizeEncode: UInt): UInt = { 745 (LookupTree(sizeEncode, List( 746 "b000".U -> 0x1.U, //0001 << addr(2:0) 747 "b001".U -> 0x3.U, //0011 748 "b010".U -> 0xf.U, //1111 749 "b011".U -> 0xff.U, //11111111 750 "b100".U -> 0xffff.U //1111111111111111 751 )) << addr(3, 0)).asUInt 752 } 753} 754/* 755* only use in max length is 128 756*/ 757object genVWdata { 758 def apply(data: UInt, sizeEncode: UInt): UInt = { 759 LookupTree(sizeEncode, List( 760 "b000".U -> Fill(16, data(7, 0)), 761 "b001".U -> Fill(8, data(15, 0)), 762 "b010".U -> Fill(4, data(31, 0)), 763 "b011".U -> Fill(2, data(63,0)), 764 "b100".U -> data(127,0) 765 )) 766 } 767} 768 769object genUSSplitAddr{ 770 def apply(addr: UInt, index: UInt): UInt = { 771 val tmpAddr = Cat(addr(38, 4), 0.U(4.W)) 772 val nextCacheline = tmpAddr + 16.U 773 LookupTree(index, List( 774 0.U -> tmpAddr, 775 1.U -> nextCacheline 776 )) 777 } 778} 779 780object genUSSplitMask{ 781 def apply(mask: UInt, index: UInt, addrOffset: UInt): UInt = { 782 val tmpMask = Cat(0.U(16.W),mask) << addrOffset // 32-bits 783 LookupTree(index, List( 784 0.U -> tmpMask(15, 0), 785 1.U -> tmpMask(31, 16), 786 )) 787 } 788} 789 790object genUSSplitData{ 791 def apply(data: UInt, index: UInt, addrOffset: UInt): UInt = { 792 val tmpData = WireInit(0.U(256.W)) 793 val lookupTable = (0 until 16).map{case i => 794 if(i == 0){ 795 i.U -> Cat(0.U(128.W), data) 796 }else{ 797 i.U -> Cat(0.U(((16-i)*8).W), data, 0.U((i*8).W)) 798 } 799 } 800 tmpData := LookupTree(addrOffset, lookupTable).asUInt 801 802 LookupTree(index, List( 803 0.U -> tmpData(127, 0), 804 1.U -> tmpData(255, 128) 805 )) 806 } 807} 808 809object GenVSData extends VLSUConstants { 810 def apply(data: UInt, elemIdx: UInt, alignedType: UInt): UInt = { 811 LookupTree(alignedType, List( 812 "b000".U -> ZeroExt(LookupTree(elemIdx(3, 0), List.tabulate(VLEN/8)(i => i.U -> getByte(data, i))), VLEN), 813 "b001".U -> ZeroExt(LookupTree(elemIdx(2, 0), List.tabulate(VLEN/16)(i => i.U -> getHalfWord(data, i))), VLEN), 814 "b010".U -> ZeroExt(LookupTree(elemIdx(1, 0), List.tabulate(VLEN/32)(i => i.U -> getWord(data, i))), VLEN), 815 "b011".U -> ZeroExt(LookupTree(elemIdx(0), List.tabulate(VLEN/64)(i => i.U -> getDoubleWord(data, i))), VLEN), 816 "b100".U -> data // if have wider element, it will broken 817 )) 818 } 819}