1/*************************************************************************************** 2 * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3 * Copyright (c) 2020-2021 Peng Cheng Laboratory 4 * 5 * XiangShan is licensed under Mulan PSL v2. 6 * You can use this software according to the terms and conditions of the Mulan PSL v2. 7 * You may obtain a copy of Mulan PSL v2 at: 8 * http://license.coscl.org.cn/MulanPSL2 9 * 10 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13 * 14 * See the Mulan PSL v2 for more details. 15 ***************************************************************************************/ 16 17package xiangshan.mem 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import utility._ 24import xiangshan._ 25import xiangshan.backend.rob.RobPtr 26import xiangshan.backend.Bundles._ 27import xiangshan.backend.fu.FuType 28import xiangshan.backend.fu.vector.Bundles.VEew 29 30/** 31 * Common used parameters or functions in vlsu 32 */ 33trait VLSUConstants { 34 val VLEN = 128 35 //for pack unit-stride flow 36 val AlignedNum = 4 // 1/2/4/8 37 def VLENB = VLEN/8 38 def vOffsetBits = log2Up(VLENB) // bits-width to index offset inside a vector reg 39 lazy val vlmBindexBits = 8 //will be overrided later 40 lazy val vsmBindexBits = 8 // will be overrided later 41 42 def alignTypes = 5 // eew/sew = 1/2/4/8, last indicate 128 bit element 43 def alignTypeBits = log2Up(alignTypes) 44 def maxMUL = 8 45 def maxFields = 8 46 /** 47 * In the most extreme cases like a segment indexed instruction, eew=64, emul=8, sew=8, lmul=1, 48 * and nf=8, each data reg is mapped with 8 index regs and there are 8 data regs in total, 49 * each for a field. Therefore an instruction can be divided into 64 uops at most. 50 */ 51 def maxUopNum = maxMUL * maxFields // 64 52 def maxFlowNum = 16 53 def maxElemNum = maxMUL * maxFlowNum // 128 54 // def uopIdxBits = log2Up(maxUopNum) // to index uop inside an robIdx 55 def elemIdxBits = log2Up(maxElemNum) + 1 // to index which element in an instruction 56 def flowIdxBits = log2Up(maxFlowNum) + 1 // to index which flow in a uop 57 def fieldBits = log2Up(maxFields) + 1 // 4-bits to indicate 1~8 58 59 def ewBits = 3 // bits-width of EEW/SEW 60 def mulBits = 3 // bits-width of emul/lmul 61 62 def getSlice(data: UInt, i: Int, alignBits: Int): UInt = { 63 require(data.getWidth >= (i+1) * alignBits) 64 data((i+1) * alignBits - 1, i * alignBits) 65 } 66 def getNoAlignedSlice(data: UInt, i: Int, alignBits: Int): UInt = { 67 data(i * 8 + alignBits - 1, i * 8) 68 } 69 70 def getByte(data: UInt, i: Int = 0) = getSlice(data, i, 8) 71 def getHalfWord(data: UInt, i: Int = 0) = getSlice(data, i, 16) 72 def getWord(data: UInt, i: Int = 0) = getSlice(data, i, 32) 73 def getDoubleWord(data: UInt, i: Int = 0) = getSlice(data, i, 64) 74 def getDoubleDoubleWord(data: UInt, i: Int = 0) = getSlice(data, i, 128) 75} 76 77trait HasVLSUParameters extends HasXSParameter with VLSUConstants { 78 override val VLEN = coreParams.VLEN 79 override lazy val vlmBindexBits = log2Up(coreParams.VlMergeBufferSize) 80 override lazy val vsmBindexBits = log2Up(coreParams.VsMergeBufferSize) 81 lazy val maxMemByteNum = 16 // Maximum bytes for a single memory access 82 /** 83 * get addr aligned low bits 84 * @param addr Address to be check 85 * @param width Width for checking alignment 86 */ 87 def getCheckAddrLowBits(addr: UInt, width: Int): UInt = addr(log2Up(width) - 1, 0) 88 def getOverflowBit(in: UInt, width: Int): UInt = in(log2Up(width)) 89 def isUnitStride(instType: UInt) = instType(1, 0) === "b00".U 90 def isStrided(instType: UInt) = instType(1, 0) === "b10".U 91 def isIndexed(instType: UInt) = instType(0) === "b1".U 92 def isNotIndexed(instType: UInt) = instType(0) === "b0".U 93 def isSegment(instType: UInt) = instType(2) === "b1".U 94 def is128Bit(alignedType: UInt) = alignedType(2) === "b1".U 95 96 def mergeDataWithMask(oldData: UInt, newData: UInt, mask: UInt): Vec[UInt] = { 97 require(oldData.getWidth == newData.getWidth) 98 require(oldData.getWidth == mask.getWidth * 8) 99 VecInit(mask.asBools.zipWithIndex.map { case (en, i) => 100 Mux(en, getByte(newData, i), getByte(oldData, i)) 101 }) 102 } 103 104 // def asBytes(data: UInt) = { 105 // require(data.getWidth % 8 == 0) 106 // (0 until data.getWidth/8).map(i => getByte(data, i)) 107 // } 108 109 def mergeDataWithElemIdx( 110 oldData: UInt, 111 newData: Seq[UInt], 112 alignedType: UInt, 113 elemIdx: Seq[UInt], 114 valids: Seq[Bool] 115 ): UInt = { 116 require(newData.length == elemIdx.length) 117 require(newData.length == valids.length) 118 LookupTree(alignedType, List( 119 "b00".U -> VecInit(elemIdx.map(e => UIntToOH(e(3, 0)).asBools).transpose.zipWithIndex.map { case (selVec, i) => 120 ParallelPosteriorityMux( 121 true.B +: selVec.zip(valids).map(x => x._1 && x._2), 122 getByte(oldData, i) +: newData.map(getByte(_)) 123 )}).asUInt, 124 "b01".U -> VecInit(elemIdx.map(e => UIntToOH(e(2, 0)).asBools).transpose.zipWithIndex.map { case (selVec, i) => 125 ParallelPosteriorityMux( 126 true.B +: selVec.zip(valids).map(x => x._1 && x._2), 127 getHalfWord(oldData, i) +: newData.map(getHalfWord(_)) 128 )}).asUInt, 129 "b10".U -> VecInit(elemIdx.map(e => UIntToOH(e(1, 0)).asBools).transpose.zipWithIndex.map { case (selVec, i) => 130 ParallelPosteriorityMux( 131 true.B +: selVec.zip(valids).map(x => x._1 && x._2), 132 getWord(oldData, i) +: newData.map(getWord(_)) 133 )}).asUInt, 134 "b11".U -> VecInit(elemIdx.map(e => UIntToOH(e(0)).asBools).transpose.zipWithIndex.map { case (selVec, i) => 135 ParallelPosteriorityMux( 136 true.B +: selVec.zip(valids).map(x => x._1 && x._2), 137 getDoubleWord(oldData, i) +: newData.map(getDoubleWord(_)) 138 )}).asUInt 139 )) 140 } 141 142 def mergeDataWithElemIdx(oldData: UInt, newData: UInt, alignedType: UInt, elemIdx: UInt): UInt = { 143 mergeDataWithElemIdx(oldData, Seq(newData), alignedType, Seq(elemIdx), Seq(true.B)) 144 } 145 /** 146 * for merge 128-bits data of unit-stride 147 */ 148 object mergeDataByByte{ 149 def apply(oldData: UInt, newData: UInt, mask: UInt): UInt = { 150 val selVec = Seq(mask).map(_.asBools).transpose 151 VecInit(selVec.zipWithIndex.map{ case (selV, i) => 152 ParallelPosteriorityMux( 153 true.B +: selV.map(x => x), 154 getByte(oldData, i) +: Seq(getByte(newData, i)) 155 )}).asUInt 156 } 157 } 158 159 /** 160 * for merge Unit-Stride data to 256-bits 161 * merge 128-bits data to 256-bits 162 * if have 3 port, 163 * if is port0, it is 6 to 1 Multiplexer -> (128'b0, data) or (data, 128'b0) or (data, port2data) or (port2data, data) or (data, port3data) or (port3data, data) 164 * if is port1, it is 4 to 1 Multiplexer -> (128'b0, data) or (data, 128'b0) or (data, port3data) or (port3data, data) 165 * if is port3, it is 2 to 1 Multiplexer -> (128'b0, data) or (data, 128'b0) 166 * 167 */ 168 object mergeDataByIndex{ 169 def apply(data: Seq[UInt], mask: Seq[UInt], index: UInt, valids: Seq[Bool]): (UInt, UInt) = { 170 require(data.length == valids.length) 171 require(data.length == mask.length) 172 val muxLength = data.length 173 val selDataMatrix = Wire(Vec(muxLength, Vec(2, UInt((VLEN * 2).W)))) // 3 * 2 * 256 174 val selMaskMatrix = Wire(Vec(muxLength, Vec(2, UInt((VLENB * 2).W)))) // 3 * 2 * 16 175 dontTouch(selDataMatrix) 176 dontTouch(selMaskMatrix) 177 for(i <- 0 until muxLength){ 178 if(i == 0){ 179 selDataMatrix(i)(0) := Cat(0.U(VLEN.W), data(i)) 180 selDataMatrix(i)(1) := Cat(data(i), 0.U(VLEN.W)) 181 selMaskMatrix(i)(0) := Cat(0.U(VLENB.W), mask(i)) 182 selMaskMatrix(i)(1) := Cat(mask(i), 0.U(VLENB.W)) 183 } 184 else{ 185 selDataMatrix(i)(0) := Cat(data(i), data(0)) 186 selDataMatrix(i)(1) := Cat(data(0), data(i)) 187 selMaskMatrix(i)(0) := Cat(mask(i), mask(0)) 188 selMaskMatrix(i)(1) := Cat(mask(0), mask(i)) 189 } 190 } 191 val selIdxVec = (0 until muxLength).map(_.U) 192 val selIdx = PriorityMux(valids.reverse, selIdxVec.reverse) 193 194 val selData = Mux(index === 0.U, 195 selDataMatrix(selIdx)(0), 196 selDataMatrix(selIdx)(1)) 197 val selMask = Mux(index === 0.U, 198 selMaskMatrix(selIdx)(0), 199 selMaskMatrix(selIdx)(1)) 200 (selData, selMask) 201 } 202 } 203 def mergeDataByIndex(data: UInt, mask: UInt, index: UInt): (UInt, UInt) = { 204 mergeDataByIndex(Seq(data), Seq(mask), index, Seq(true.B)) 205 } 206} 207abstract class VLSUModule(implicit p: Parameters) extends XSModule 208 with HasVLSUParameters 209 with HasCircularQueuePtrHelper 210abstract class VLSUBundle(implicit p: Parameters) extends XSBundle 211 with HasVLSUParameters 212 213class VLSUBundleWithMicroOp(implicit p: Parameters) extends VLSUBundle { 214 val uop = new DynInst 215} 216 217class OnlyVecExuOutput(implicit p: Parameters) extends VLSUBundle { 218 val isvec = Bool() 219 val vecdata = UInt(VLEN.W) 220 val mask = UInt(VLENB.W) 221 // val rob_idx_valid = Vec(2, Bool()) 222 // val inner_idx = Vec(2, UInt(3.W)) 223 // val rob_idx = Vec(2, new RobPtr) 224 // val offset = Vec(2, UInt(4.W)) 225 val reg_offset = UInt(vOffsetBits.W) 226 val vecActive = Bool() // 1: vector active element, 0: vector not active element 227 val is_first_ele = Bool() 228 val elemIdx = UInt(elemIdxBits.W) // element index 229 val elemIdxInsideVd = UInt(elemIdxBits.W) // element index in scope of vd 230 // val uopQueuePtr = new VluopPtr 231 // val flowPtr = new VlflowPtr 232} 233 234class VecExuOutput(implicit p: Parameters) extends MemExuOutput with HasVLSUParameters { 235 val vec = new OnlyVecExuOutput 236 val alignedType = UInt(alignTypeBits.W) 237 // feedback 238 val vecFeedback = Bool() 239} 240 241class VecUopBundle(implicit p: Parameters) extends VLSUBundleWithMicroOp { 242 val flowMask = UInt(VLENB.W) // each bit for a flow 243 val byteMask = UInt(VLENB.W) // each bit for a byte 244 val data = UInt(VLEN.W) 245 // val fof = Bool() // fof is only used for vector loads 246 val excp_eew_index = UInt(elemIdxBits.W) 247 // val exceptionVec = ExceptionVec() // uop has exceptionVec 248 val baseAddr = UInt(VAddrBits.W) 249 val stride = UInt(VLEN.W) 250 val flow_counter = UInt(flowIdxBits.W) 251 252 // instruction decode result 253 val flowNum = UInt(flowIdxBits.W) // # of flows in a uop 254 // val flowNumLog2 = UInt(log2Up(flowIdxBits).W) // log2(flowNum), for better timing of multiplication 255 val nfields = UInt(fieldBits.W) // NFIELDS 256 val vm = Bool() // whether vector masking is enabled 257 val usWholeReg = Bool() // unit-stride, whole register load 258 val usMaskReg = Bool() // unit-stride, masked store/load 259 val eew = VEew() // size of memory elements 260 val sew = UInt(ewBits.W) 261 val emul = UInt(mulBits.W) 262 val lmul = UInt(mulBits.W) 263 val vlmax = UInt(elemIdxBits.W) 264 val instType = UInt(3.W) 265 val vd_last_uop = Bool() 266 val vd_first_uop = Bool() 267} 268 269class VecFlowBundle(implicit p: Parameters) extends VLSUBundleWithMicroOp { 270 val vaddr = UInt(VAddrBits.W) 271 val mask = UInt(VLENB.W) 272 val alignedType = UInt(alignTypeBits.W) 273 val vecActive = Bool() 274 val elemIdx = UInt(elemIdxBits.W) 275 val is_first_ele = Bool() 276 277 // pack 278 val isPackage = Bool() 279 val packageNum = UInt((log2Up(VLENB) + 1).W) 280 val originAlignedType = UInt(alignTypeBits.W) 281} 282 283class VecMemExuOutput(isVector: Boolean = false)(implicit p: Parameters) extends VLSUBundle{ 284 val output = new MemExuOutput(isVector) 285 val vecFeedback = Bool() 286 val mmio = Bool() 287 val usSecondInv = Bool() 288 val elemIdx = UInt(elemIdxBits.W) 289 val alignedType = UInt(alignTypeBits.W) 290 val mbIndex = UInt(vsmBindexBits.W) 291 val mask = UInt(VLENB.W) 292 val vaddr = UInt(VAddrBits.W) 293} 294 295object MulNum { 296 def apply (mul: UInt): UInt = { //mul means emul or lmul 297 (LookupTree(mul,List( 298 "b101".U -> 1.U , // 1/8 299 "b110".U -> 1.U , // 1/4 300 "b111".U -> 1.U , // 1/2 301 "b000".U -> 1.U , // 1 302 "b001".U -> 2.U , // 2 303 "b010".U -> 4.U , // 4 304 "b011".U -> 8.U // 8 305 )))} 306} 307/** 308 * when emul is greater than or equal to 1, this means the entire register needs to be written; 309 * otherwise, only write the specified number of bytes */ 310object MulDataSize { 311 def apply (mul: UInt): UInt = { //mul means emul or lmul 312 (LookupTree(mul,List( 313 "b101".U -> 2.U , // 1/8 314 "b110".U -> 4.U , // 1/4 315 "b111".U -> 8.U , // 1/2 316 "b000".U -> 16.U , // 1 317 "b001".U -> 16.U , // 2 318 "b010".U -> 16.U , // 4 319 "b011".U -> 16.U // 8 320 )))} 321} 322 323object OneRegNum { 324 def apply (eew: UInt): UInt = { //mul means emul or lmul 325 require(eew.getWidth == 2, "The eew width must be 2.") 326 (LookupTree(eew, List( 327 "b00".U -> 16.U , // 1 328 "b01".U -> 8.U , // 2 329 "b10".U -> 4.U , // 4 330 "b11".U -> 2.U // 8 331 )))} 332} 333 334//index inst read data byte 335object SewDataSize { 336 def apply (sew: UInt): UInt = { 337 (LookupTree(sew,List( 338 "b000".U -> 1.U , // 1 339 "b001".U -> 2.U , // 2 340 "b010".U -> 4.U , // 4 341 "b011".U -> 8.U // 8 342 )))} 343} 344 345// strided inst read data byte 346object EewDataSize { 347 def apply (eew: UInt): UInt = { 348 require(eew.getWidth == 2, "The eew width must be 2.") 349 (LookupTree(eew, List( 350 "b00".U -> 1.U , // 1 351 "b01".U -> 2.U , // 2 352 "b10".U -> 4.U , // 4 353 "b11".U -> 8.U // 8 354 )))} 355} 356 357object loadDataSize { 358 def apply (instType: UInt, emul: UInt, eew: UInt, sew: UInt): UInt = { 359 (LookupTree(instType,List( 360 "b000".U -> MulDataSize(emul), // unit-stride 361 "b010".U -> EewDataSize(eew) , // strided 362 "b001".U -> SewDataSize(sew) , // indexed-unordered 363 "b011".U -> SewDataSize(sew) , // indexed-ordered 364 "b100".U -> EewDataSize(eew) , // segment unit-stride 365 "b110".U -> EewDataSize(eew) , // segment strided 366 "b101".U -> SewDataSize(sew) , // segment indexed-unordered 367 "b111".U -> SewDataSize(sew) // segment indexed-ordered 368 )))} 369} 370 371object storeDataSize { 372 def apply (instType: UInt, eew: UInt, sew: UInt): UInt = { 373 (LookupTree(instType,List( 374 "b000".U -> EewDataSize(eew) , // unit-stride, do not use 375 "b010".U -> EewDataSize(eew) , // strided 376 "b001".U -> SewDataSize(sew) , // indexed-unordered 377 "b011".U -> SewDataSize(sew) , // indexed-ordered 378 "b100".U -> EewDataSize(eew) , // segment unit-stride 379 "b110".U -> EewDataSize(eew) , // segment strided 380 "b101".U -> SewDataSize(sew) , // segment indexed-unordered 381 "b111".U -> SewDataSize(sew) // segment indexed-ordered 382 )))} 383} 384 385/** 386 * these are used to obtain immediate addresses for index instruction */ 387object EewEq8 { 388 def apply(index:UInt, flow_inner_idx: UInt): UInt = { 389 (LookupTree(flow_inner_idx,List( 390 0.U -> index(7 ,0 ), 391 1.U -> index(15,8 ), 392 2.U -> index(23,16 ), 393 3.U -> index(31,24 ), 394 4.U -> index(39,32 ), 395 5.U -> index(47,40 ), 396 6.U -> index(55,48 ), 397 7.U -> index(63,56 ), 398 8.U -> index(71,64 ), 399 9.U -> index(79,72 ), 400 10.U -> index(87,80 ), 401 11.U -> index(95,88 ), 402 12.U -> index(103,96 ), 403 13.U -> index(111,104), 404 14.U -> index(119,112), 405 15.U -> index(127,120) 406 )))} 407} 408 409object EewEq16 { 410 def apply(index: UInt, flow_inner_idx: UInt): UInt = { 411 (LookupTree(flow_inner_idx, List( 412 0.U -> index(15, 0), 413 1.U -> index(31, 16), 414 2.U -> index(47, 32), 415 3.U -> index(63, 48), 416 4.U -> index(79, 64), 417 5.U -> index(95, 80), 418 6.U -> index(111, 96), 419 7.U -> index(127, 112) 420 )))} 421} 422 423object EewEq32 { 424 def apply(index: UInt, flow_inner_idx: UInt): UInt = { 425 (LookupTree(flow_inner_idx, List( 426 0.U -> index(31, 0), 427 1.U -> index(63, 32), 428 2.U -> index(95, 64), 429 3.U -> index(127, 96) 430 )))} 431} 432 433object EewEq64 { 434 def apply (index: UInt, flow_inner_idx: UInt): UInt = { 435 (LookupTree(flow_inner_idx, List( 436 0.U -> index(63, 0), 437 1.U -> index(127, 64) 438 )))} 439} 440 441object IndexAddr { 442 def apply (index: UInt, flow_inner_idx: UInt, eew: UInt): UInt = { 443 require(eew.getWidth == 2, "The eew width must be 2.") 444 (LookupTree(eew, List( 445 "b00".U -> EewEq8 (index = index, flow_inner_idx = flow_inner_idx ), // Imm is 1 Byte // TODO: index maybe cross register 446 "b01".U -> EewEq16(index = index, flow_inner_idx = flow_inner_idx ), // Imm is 2 Byte 447 "b10".U -> EewEq32(index = index, flow_inner_idx = flow_inner_idx ), // Imm is 4 Byte 448 "b11".U -> EewEq64(index = index, flow_inner_idx = flow_inner_idx ) // Imm is 8 Byte 449 )))} 450} 451 452object Log2Num { 453 def apply (num: UInt): UInt = { 454 (LookupTree(num,List( 455 16.U -> 4.U, 456 8.U -> 3.U, 457 4.U -> 2.U, 458 2.U -> 1.U, 459 1.U -> 0.U 460 )))} 461} 462 463object GenUopIdxInField { 464 /** 465 * Used in normal vector instruction 466 * */ 467 def apply (instType: UInt, emul: UInt, lmul: UInt, uopIdx: UInt): UInt = { 468 val isIndexed = instType(0) 469 val mulInField = Mux( 470 isIndexed, 471 Mux(lmul.asSInt > emul.asSInt, lmul, emul), 472 emul 473 ) 474 LookupTree(mulInField, List( 475 "b101".U -> 0.U, 476 "b110".U -> 0.U, 477 "b111".U -> 0.U, 478 "b000".U -> 0.U, 479 "b001".U -> uopIdx(0), 480 "b010".U -> uopIdx(1, 0), 481 "b011".U -> uopIdx(2, 0) 482 )) 483 } 484 /** 485 * Only used in segment instruction. 486 * */ 487 def apply (select: UInt, uopIdx: UInt): UInt = { 488 LookupTree(select, List( 489 "b101".U -> 0.U, 490 "b110".U -> 0.U, 491 "b111".U -> 0.U, 492 "b000".U -> 0.U, 493 "b001".U -> uopIdx(0), 494 "b010".U -> uopIdx(1, 0), 495 "b011".U -> uopIdx(2, 0) 496 )) 497 } 498} 499 500//eew decode 501object EewLog2 extends VLSUConstants { 502 // def apply (eew: UInt): UInt = { 503 // (LookupTree(eew,List( 504 // "b000".U -> "b000".U , // 1 505 // "b101".U -> "b001".U , // 2 506 // "b110".U -> "b010".U , // 4 507 // "b111".U -> "b011".U // 8 508 // )))} 509 def apply(eew: UInt): UInt = { 510 require(eew.getWidth == 2, "The eew width must be 2.") 511 ZeroExt(eew, ewBits) 512 } 513} 514 515object GenRealFlowNum { 516 /** 517 * unit-stride instructions don't use this method; 518 * other instructions generate realFlowNum by EmulDataSize >> eew, 519 * EmulDataSize means the number of bytes that need to be written to the register, 520 * eew means the number of bytes written at once. 521 * 522 * @param instType As the name implies. 523 * @param emul As the name implies. 524 * @param lmul As the name implies. 525 * @param eew As the name implies. 526 * @param sew As the name implies. 527 * @param isSegment Only modules related to segment need to be set to true. 528 * @return FlowNum of instruction. 529 * 530 */ 531 def apply (instType: UInt, emul: UInt, lmul: UInt, eew: UInt, sew: UInt, isSegment: Boolean = false): UInt = { 532 require(instType.getWidth == 3, "The instType width must be 3, (isSegment, mop)") 533 require(eew.getWidth == 2, "The eew width must be 2.") 534 // Because the new segmentunit is needed. But the previous implementation is retained for the time being in case of emergency. 535 val segmentIndexFlowNum = if (isSegment) (MulDataSize(lmul) >> sew(1,0)).asUInt 536 else Mux(emul.asSInt > lmul.asSInt, (MulDataSize(emul) >> eew).asUInt, (MulDataSize(lmul) >> sew(1,0)).asUInt) 537 (LookupTree(instType,List( 538 "b000".U -> (MulDataSize(emul) >> eew).asUInt, // store use, load do not use 539 "b010".U -> (MulDataSize(emul) >> eew).asUInt, // strided 540 "b001".U -> Mux(emul.asSInt > lmul.asSInt, (MulDataSize(emul) >> eew).asUInt, (MulDataSize(lmul) >> sew(1,0)).asUInt), // indexed-unordered 541 "b011".U -> Mux(emul.asSInt > lmul.asSInt, (MulDataSize(emul) >> eew).asUInt, (MulDataSize(lmul) >> sew(1,0)).asUInt), // indexed-ordered 542 "b100".U -> (MulDataSize(emul) >> eew).asUInt, // segment unit-stride 543 "b110".U -> (MulDataSize(emul) >> eew).asUInt, // segment strided 544 "b101".U -> segmentIndexFlowNum, // segment indexed-unordered 545 "b111".U -> segmentIndexFlowNum // segment indexed-ordered 546 )))} 547} 548 549object GenRealFlowLog2 extends VLSUConstants { 550 /** 551 * GenRealFlowLog2 = Log2(GenRealFlowNum) 552 * 553 * @param instType As the name implies. 554 * @param emul As the name implies. 555 * @param lmul As the name implies. 556 * @param eew As the name implies. 557 * @param sew As the name implies. 558 * @param isSegment Only modules related to segment need to be set to true. 559 * @return FlowNumLog2 of instruction. 560 */ 561 def apply(instType: UInt, emul: UInt, lmul: UInt, eew: UInt, sew: UInt, isSegment: Boolean = false): UInt = { 562 require(instType.getWidth == 3, "The instType width must be 3, (isSegment, mop)") 563 require(eew.getWidth == 2, "The eew width must be 2.") 564 val emulLog2 = Mux(emul.asSInt >= 0.S, 0.U, emul) 565 val lmulLog2 = Mux(lmul.asSInt >= 0.S, 0.U, lmul) 566 val eewRealFlowLog2 = emulLog2 + log2Up(VLENB).U - eew 567 val sewRealFlowLog2 = lmulLog2 + log2Up(VLENB).U - sew(1, 0) 568 // Because the new segmentunit is needed. But the previous implementation is retained for the time being in case of emergency. 569 val segmentIndexFlowLog2 = if (isSegment) sewRealFlowLog2 else Mux(emul.asSInt > lmul.asSInt, eewRealFlowLog2, sewRealFlowLog2) 570 (LookupTree(instType, List( 571 "b000".U -> eewRealFlowLog2, // unit-stride 572 "b010".U -> eewRealFlowLog2, // strided 573 "b001".U -> Mux(emul.asSInt > lmul.asSInt, eewRealFlowLog2, sewRealFlowLog2), // indexed-unordered 574 "b011".U -> Mux(emul.asSInt > lmul.asSInt, eewRealFlowLog2, sewRealFlowLog2), // indexed-ordered 575 "b100".U -> eewRealFlowLog2, // segment unit-stride 576 "b110".U -> eewRealFlowLog2, // segment strided 577 "b101".U -> segmentIndexFlowLog2, // segment indexed-unordered 578 "b111".U -> segmentIndexFlowLog2, // segment indexed-ordered 579 ))) 580 } 581} 582 583/** 584 * GenElemIdx generals an element index within an instruction, given a certain uopIdx and a known flowIdx 585 * inside the uop. 586 */ 587object GenElemIdx extends VLSUConstants { 588 def apply(instType: UInt, emul: UInt, lmul: UInt, eew: UInt, sew: UInt, 589 uopIdx: UInt, flowIdx: UInt): UInt = { 590 require(eew.getWidth == 2, "The eew width must be 2.") 591 val isIndexed = instType(0).asBool 592 val eewUopFlowsLog2 = Mux(emul.asSInt > 0.S, 0.U, emul) + log2Up(VLENB).U - eew 593 val sewUopFlowsLog2 = Mux(lmul.asSInt > 0.S, 0.U, lmul) + log2Up(VLENB).U - sew(1, 0) 594 val uopFlowsLog2 = Mux( 595 isIndexed, 596 Mux(emul.asSInt > lmul.asSInt, eewUopFlowsLog2, sewUopFlowsLog2), 597 eewUopFlowsLog2 598 ) 599 LookupTree(uopFlowsLog2, List( 600 0.U -> uopIdx, 601 1.U -> uopIdx ## flowIdx(0), 602 2.U -> uopIdx ## flowIdx(1, 0), 603 3.U -> uopIdx ## flowIdx(2, 0), 604 4.U -> uopIdx ## flowIdx(3, 0) 605 )) 606 } 607} 608 609/** 610 * GenVLMAX calculates VLMAX, which equals MUL * ew 611 */ 612object GenVLMAXLog2 extends VLSUConstants { 613 def apply(lmul: UInt, sew: UInt): UInt = lmul + log2Up(VLENB).U - sew 614} 615object GenVLMAX { 616 def apply(lmul: UInt, sew: UInt): UInt = 1.U << GenVLMAXLog2(lmul, sew) 617} 618/** 619 * generate mask base on vlmax 620 * example: vlmax = b100, max = b011 621 * */ 622object GenVlMaxMask{ 623 def apply(vlmax: UInt, length: Int): UInt = (vlmax - 1.U)(length-1, 0) 624} 625 626object GenUSWholeRegVL extends VLSUConstants { 627 def apply(nfields: UInt, eew: UInt): UInt = { 628 require(eew.getWidth == 2, "The eew width must be 2.") 629 LookupTree(eew, List( 630 "b00".U -> (nfields << (log2Up(VLENB) - 0)), 631 "b01".U -> (nfields << (log2Up(VLENB) - 1)), 632 "b10".U -> (nfields << (log2Up(VLENB) - 2)), 633 "b11".U -> (nfields << (log2Up(VLENB) - 3)) 634 )) 635 } 636} 637object GenUSWholeEmul extends VLSUConstants{ 638 def apply(nf: UInt): UInt={ 639 LookupTree(nf,List( 640 "b000".U -> "b000".U(mulBits.W), 641 "b001".U -> "b001".U(mulBits.W), 642 "b011".U -> "b010".U(mulBits.W), 643 "b111".U -> "b011".U(mulBits.W) 644 )) 645 } 646} 647 648 649object GenUSMaskRegVL extends VLSUConstants { 650 def apply(vl: UInt): UInt = { 651 Mux(vl(2,0) === 0.U , (vl >> 3.U), ((vl >> 3.U) + 1.U)) 652 } 653} 654 655object GenUopByteMask { 656 def apply(flowMask: UInt, alignedType: UInt): UInt = { 657 LookupTree(alignedType, List( 658 "b000".U -> flowMask, 659 "b001".U -> FillInterleaved(2, flowMask), 660 "b010".U -> FillInterleaved(4, flowMask), 661 "b011".U -> FillInterleaved(8, flowMask), 662 "b100".U -> FillInterleaved(16, flowMask) 663 )) 664 } 665} 666 667object GenVdIdxInField extends VLSUConstants { 668 def apply(instType: UInt, emul: UInt, lmul: UInt, uopIdx: UInt): UInt = { 669 val vdIdx = Wire(UInt(log2Up(maxMUL).W)) 670 when (instType(1,0) === "b00".U || instType(1,0) === "b10".U || lmul.asSInt > emul.asSInt) { 671 // Unit-stride or Strided, or indexed with lmul >= emul 672 vdIdx := uopIdx 673 }.otherwise { 674 // Indexed with lmul <= emul 675 val multiple = emul - lmul 676 val uopIdxWidth = uopIdx.getWidth 677 vdIdx := LookupTree(multiple, List( 678 0.U -> uopIdx, 679 1.U -> (uopIdx >> 1), 680 2.U -> (uopIdx >> 2), 681 3.U -> (uopIdx >> 3) 682 )) 683 } 684 vdIdx 685 } 686} 687/** 688* Use start and vl to generate flow activative mask 689* mod = true fill 0 690* mod = false fill 1 691*/ 692object GenFlowMask extends VLSUConstants { 693 def apply(elementMask: UInt, start: UInt, vl: UInt , mod: Boolean): UInt = { 694 val startMask = ~UIntToMask(start, VLEN) 695 val vlMask = UIntToMask(vl, VLEN) 696 val maskVlStart = vlMask & startMask 697 if(mod){ 698 elementMask & maskVlStart 699 } 700 else{ 701 (~elementMask).asUInt & maskVlStart 702 } 703 } 704} 705 706object genVWmask128 { 707 def apply(addr: UInt, sizeEncode: UInt): UInt = { 708 (LookupTree(sizeEncode, List( 709 "b000".U -> 0x1.U, //0001 << addr(2:0) 710 "b001".U -> 0x3.U, //0011 711 "b010".U -> 0xf.U, //1111 712 "b011".U -> 0xff.U, //11111111 713 "b100".U -> 0xffff.U //1111111111111111 714 )) << addr(3, 0)).asUInt 715 } 716} 717/* 718* only use in max length is 128 719*/ 720object genVWdata { 721 def apply(data: UInt, sizeEncode: UInt): UInt = { 722 LookupTree(sizeEncode, List( 723 "b000".U -> Fill(16, data(7, 0)), 724 "b001".U -> Fill(8, data(15, 0)), 725 "b010".U -> Fill(4, data(31, 0)), 726 "b011".U -> Fill(2, data(63,0)), 727 "b100".U -> data(127,0) 728 )) 729 } 730} 731 732object genUSSplitAddr{ 733 def apply(addr: UInt, index: UInt): UInt = { 734 val tmpAddr = Cat(addr(38, 4), 0.U(4.W)) 735 val nextCacheline = tmpAddr + 16.U 736 LookupTree(index, List( 737 0.U -> tmpAddr, 738 1.U -> nextCacheline 739 )) 740 } 741} 742 743object genUSSplitMask{ 744 def apply(mask: UInt, index: UInt): UInt = { 745 require(mask.getWidth == 32) // need to be 32-bits 746 LookupTree(index, List( 747 0.U -> mask(15, 0), 748 1.U -> mask(31, 16), 749 )) 750 } 751} 752 753object genUSSplitData{ 754 def apply(data: UInt, index: UInt, addrOffset: UInt): UInt = { 755 val tmpData = WireInit(0.U(256.W)) 756 val lookupTable = (0 until 16).map{case i => 757 if(i == 0){ 758 i.U -> Cat(0.U(128.W), data) 759 }else{ 760 i.U -> Cat(0.U(((16-i)*8).W), data, 0.U((i*8).W)) 761 } 762 } 763 tmpData := LookupTree(addrOffset, lookupTable).asUInt 764 765 LookupTree(index, List( 766 0.U -> tmpData(127, 0), 767 1.U -> tmpData(255, 128) 768 )) 769 } 770} 771 772object genVSData extends VLSUConstants { 773 def apply(data: UInt, elemIdx: UInt, alignedType: UInt): UInt = { 774 LookupTree(alignedType, List( 775 "b000".U -> ZeroExt(LookupTree(elemIdx(3, 0), List.tabulate(VLEN/8)(i => i.U -> getByte(data, i))), VLEN), 776 "b001".U -> ZeroExt(LookupTree(elemIdx(2, 0), List.tabulate(VLEN/16)(i => i.U -> getHalfWord(data, i))), VLEN), 777 "b010".U -> ZeroExt(LookupTree(elemIdx(1, 0), List.tabulate(VLEN/32)(i => i.U -> getWord(data, i))), VLEN), 778 "b011".U -> ZeroExt(LookupTree(elemIdx(0), List.tabulate(VLEN/64)(i => i.U -> getDoubleWord(data, i))), VLEN), 779 "b100".U -> data // if have wider element, it will broken 780 )) 781 } 782} 783 784// TODO: more elegant 785object genVStride extends VLSUConstants { 786 def apply(uopIdx: UInt, stride: UInt): UInt = { 787 LookupTree(uopIdx, List( 788 0.U -> 0.U, 789 1.U -> stride, 790 2.U -> (stride << 1), 791 3.U -> ((stride << 1).asUInt + stride), 792 4.U -> (stride << 2), 793 5.U -> ((stride << 2).asUInt + stride), 794 6.U -> ((stride << 2).asUInt + (stride << 1)), 795 7.U -> ((stride << 2).asUInt + (stride << 1) + stride) 796 )) 797 } 798} 799/** 800 * generate uopOffset, not used in segment instruction 801 * */ 802object genVUopOffset extends VLSUConstants { 803 def apply(instType: UInt, isfof: Bool, uopidx: UInt, nf: UInt, eew: UInt, stride: UInt, alignedType: UInt): UInt = { 804 val uopInsidefield = (uopidx >> nf).asUInt // when nf == 0, is uopidx 805 806 val fofVUopOffset = (LookupTree(instType,List( 807 "b000".U -> ( genVStride(uopInsidefield, stride) << (log2Up(VLENB).U - eew) ) , // unit-stride fof 808 "b100".U -> ( genVStride(uopInsidefield, stride) << (log2Up(VLENB).U - eew) ) , // segment unit-stride fof 809 ))).asUInt 810 811 val otherVUopOffset = (LookupTree(instType,List( 812 "b000".U -> ( uopInsidefield << alignedType ) , // unit-stride 813 "b010".U -> ( genVStride(uopInsidefield, stride) << (log2Up(VLENB).U - eew) ) , // strided 814 "b001".U -> ( 0.U ) , // indexed-unordered 815 "b011".U -> ( 0.U ) , // indexed-ordered 816 "b100".U -> ( uopInsidefield << alignedType ) , // segment unit-stride 817 "b110".U -> ( genVStride(uopInsidefield, stride) << (log2Up(VLENB).U - eew) ) , // segment strided 818 "b101".U -> ( 0.U ) , // segment indexed-unordered 819 "b111".U -> ( 0.U ) // segment indexed-ordered 820 ))).asUInt 821 822 Mux(isfof, fofVUopOffset, otherVUopOffset) 823 } 824} 825 826 827 828object genVFirstUnmask extends VLSUConstants { 829 /** 830 * Find the lowest unmasked number of bits. 831 * example: 832 * mask = 16'b1111_1111_1110_0000 833 * return 5 834 * @param mask 16bits of mask. 835 * @return lowest unmasked number of bits. 836 */ 837 def apply(mask: UInt): UInt = { 838 require(mask.getWidth == 16, "The mask width must be 16") 839 val select = (0 until 16).zip(mask.asBools).map{case (i, v) => 840 (v, i.U) 841 } 842 PriorityMuxDefault(select, 0.U) 843 } 844 845 def apply(mask: UInt, regOffset: UInt): UInt = { 846 require(mask.getWidth == 16, "The mask width must be 16") 847 val realMask = (mask >> regOffset).asUInt 848 val select = (0 until 16).zip(realMask.asBools).map{case (i, v) => 849 (v, i.U) 850 } 851 PriorityMuxDefault(select, 0.U) 852 } 853} 854 855