1/*************************************************************************************** 2 * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3 * Copyright (c) 2020-2021 Peng Cheng Laboratory 4 * 5 * XiangShan is licensed under Mulan PSL v2. 6 * You can use this software according to the terms and conditions of the Mulan PSL v2. 7 * You may obtain a copy of Mulan PSL v2 at: 8 * http://license.coscl.org.cn/MulanPSL2 9 * 10 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13 * 14 * See the Mulan PSL v2 for more details. 15 ***************************************************************************************/ 16 17package xiangshan.mem 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import utility._ 24import xiangshan._ 25import xiangshan.backend.rob.RobPtr 26import xiangshan.backend.Bundles._ 27import xiangshan.mem._ 28import xiangshan.backend.fu.FuType 29import freechips.rocketchip.diplomacy.BufferParams 30import xiangshan.cache.mmu._ 31import xiangshan.cache._ 32import xiangshan.cache.wpu.ReplayCarry 33import xiangshan.backend.fu.util.SdtrigExt 34import xiangshan.ExceptionNO._ 35import xiangshan.backend.fu.vector.Bundles.VConfig 36import xiangshan.backend.fu.vector.Utils.VecDataToMaskDataVec 37 38class VSegmentBundle(implicit p: Parameters) extends VLSUBundle 39{ 40 val vaddr = UInt(VAddrBits.W) 41 val uop = new DynInst 42 val paddr = UInt(PAddrBits.W) 43 val mask = UInt(VLEN.W) 44 val valid = Bool() 45 val alignedType = UInt(alignTypeBits.W) 46 val vl = UInt(elemIdxBits.W) 47 val vlmaxInVd = UInt(elemIdxBits.W) 48 val vlmaxMaskInVd = UInt(elemIdxBits.W) 49 // for exception 50 val vstart = UInt(elemIdxBits.W) 51 val exceptionvaddr = UInt(VAddrBits.W) 52 val exception_va = Bool() 53 val exception_pa = Bool() 54} 55 56class VSegmentUnit (implicit p: Parameters) extends VLSUModule 57 with HasDCacheParameters 58 with MemoryOpConstants 59 with SdtrigExt 60 with HasLoadHelper 61{ 62 val io = IO(new VSegmentUnitIO) 63 64 val maxSize = VSegmentBufferSize 65 66 class VSegUPtr(implicit p: Parameters) extends CircularQueuePtr[VSegUPtr](maxSize){ 67 } 68 69 object VSegUPtr { 70 def apply(f: Bool, v: UInt)(implicit p: Parameters): VSegUPtr = { 71 val ptr = Wire(new VSegUPtr) 72 ptr.flag := f 73 ptr.value := v 74 ptr 75 } 76 } 77 78 // buffer uop 79 val instMicroOp = Reg(new VSegmentBundle) 80 val data = Reg(Vec(maxSize, UInt(VLEN.W))) 81 val pdest = Reg(Vec(maxSize, UInt(PhyRegIdxWidth.W))) 82 val uopIdx = Reg(Vec(maxSize, UopIdx())) 83 val stride = Reg(Vec(maxSize, UInt(VLEN.W))) 84 val allocated = RegInit(VecInit(Seq.fill(maxSize)(false.B))) 85 val enqPtr = RegInit(0.U.asTypeOf(new VSegUPtr)) 86 val deqPtr = RegInit(0.U.asTypeOf(new VSegUPtr)) 87 val stridePtr = WireInit(0.U.asTypeOf(new VSegUPtr)) // for select stride/index 88 89 val segmentIdx = RegInit(0.U(elemIdxBits.W)) 90 val fieldIdx = RegInit(0.U(fieldBits.W)) 91 val segmentOffset = RegInit(0.U(VAddrBits.W)) 92 val splitPtr = RegInit(0.U.asTypeOf(new VSegUPtr)) // for select load/store data 93 val splitPtrNext = WireInit(0.U.asTypeOf(new VSegUPtr)) 94 95 val exception_va = WireInit(false.B) 96 val exception_pa = WireInit(false.B) 97 98 val maxSegIdx = instMicroOp.vl - 1.U 99 val maxNfields = instMicroOp.uop.vpu.nf 100 101 XSError(segmentIdx > maxSegIdx, s"segmentIdx > vl, something error!\n") 102 XSError(fieldIdx > maxNfields, s"fieldIdx > nfields, something error!\n") 103 104 // MicroOp 105 val baseVaddr = instMicroOp.vaddr 106 val alignedType = instMicroOp.alignedType 107 val fuType = instMicroOp.uop.fuType 108 val mask = instMicroOp.mask 109 val exceptionVec = instMicroOp.uop.exceptionVec 110 val issueEew = instMicroOp.uop.vpu.veew 111 val issueLmul = instMicroOp.uop.vpu.vtype.vlmul 112 val issueSew = instMicroOp.uop.vpu.vtype.vsew 113 val issueEmul = EewLog2(issueEew) - issueSew + issueLmul 114 val elemIdxInVd = segmentIdx & instMicroOp.vlmaxMaskInVd 115 val issueInstType = Cat(true.B, instMicroOp.uop.fuOpType(6, 5)) // always segment instruction 116 val issueVLMAXLog2 = GenVLMAXLog2( 117 Mux(issueLmul.asSInt > 0.S, 0.U, issueLmul), 118 Mux(isIndexed(issueInstType), issueSew(1, 0), issueEew(1, 0)) 119 ) // max element number log2 in vd 120 val issueVlMax = instMicroOp.vlmaxInVd // max elementIdx in vd 121 val issueMaxIdxInIndex = GenVLMAX(Mux(issueEmul.asSInt > 0.S, 0.U, issueEmul), issueEew(1, 0)) // index element index in index register 122 val issueMaxIdxInIndexMask = GenVlMaxMask(issueMaxIdxInIndex, elemIdxBits) 123 val issueMaxIdxInIndexLog2 = GenVLMAXLog2(Mux(issueEmul.asSInt > 0.S, 0.U, issueEmul), issueEew(1, 0)) 124 val issueIndexIdx = segmentIdx & issueMaxIdxInIndexMask 125 val segmentActive = (mask & UIntToOH(segmentIdx)).orR 126 127 // Segment instruction's FSM 128 /* 129 * s_idle: wait request 130 * s_flush_sbuffer_req: flush sbuffer 131 * s_wait_flush_sbuffer_resp: wait sbuffer empty 132 * s_tlb_req: request tlb 133 * s_wait_tlb_resp: wait tlb resp 134 * s_pm: check pmp 135 * s_cache_req: request cache 136 * s_cache_resp: wait cache resp 137 * s_latch_and_merge_data: for read data 138 * s_send_data: for send write data 139 * s_finish: 140 * */ 141 val s_idle :: s_flush_sbuffer_req :: s_wait_flush_sbuffer_resp :: s_tlb_req :: s_wait_tlb_resp :: s_pm ::s_cache_req :: s_cache_resp :: s_latch_and_merge_data :: s_send_data :: s_finish :: Nil = Enum(11) 142 val state = RegInit(s_idle) 143 val stateNext = WireInit(s_idle) 144 val sbufferEmpty = io.flush_sbuffer.empty 145 146 /** 147 * state update 148 */ 149 state := stateNext 150 151 /** 152 * state transfer 153 */ 154 when(state === s_idle){ 155 stateNext := Mux(isAfter(enqPtr, deqPtr), s_flush_sbuffer_req, s_idle) 156 }.elsewhen(state === s_flush_sbuffer_req){ 157 stateNext := Mux(sbufferEmpty, s_tlb_req, s_wait_flush_sbuffer_resp) // if sbuffer is empty, go to query tlb 158 159 }.elsewhen(state === s_wait_flush_sbuffer_resp){ 160 stateNext := Mux(sbufferEmpty, s_tlb_req, s_wait_flush_sbuffer_resp) 161 162 }.elsewhen(state === s_tlb_req){ 163 stateNext := Mux(segmentActive, s_wait_tlb_resp, Mux(FuType.isVLoad(instMicroOp.uop.fuType), s_latch_and_merge_data, s_send_data)) 164 165 }.elsewhen(state === s_wait_tlb_resp){ 166 stateNext := Mux(!io.dtlb.resp.bits.miss && io.dtlb.resp.fire, s_pm, s_tlb_req) 167 168 }.elsewhen(state === s_pm){ 169 /* if is vStore, send data to sbuffer, so don't need query dcache */ 170 stateNext := Mux(exception_pa || exception_va, 171 s_finish, 172 Mux(FuType.isVLoad(instMicroOp.uop.fuType), s_cache_req, s_send_data)) 173 174 }.elsewhen(state === s_cache_req){ 175 stateNext := Mux(io.rdcache.req.fire, s_cache_resp, s_cache_req) 176 177 }.elsewhen(state === s_cache_resp){ 178 when(io.rdcache.resp.fire) { 179 when(io.rdcache.resp.bits.miss) { 180 stateNext := s_cache_req 181 }.otherwise { 182 stateNext := Mux(FuType.isVLoad(instMicroOp.uop.fuType), s_latch_and_merge_data, s_send_data) 183 } 184 }.otherwise{ 185 stateNext := s_cache_resp 186 } 187 /* if segment is inactive, don't need to wait access all of the field */ 188 }.elsewhen(state === s_latch_and_merge_data) { 189 when((segmentIdx === maxSegIdx) && (fieldIdx === maxNfields) || 190 ((segmentIdx === maxSegIdx) && !segmentActive)) { 191 192 stateNext := s_finish // segment instruction finish 193 }.otherwise { 194 stateNext := s_tlb_req // need continue 195 } 196 /* if segment is inactive, don't need to wait access all of the field */ 197 }.elsewhen(state === s_send_data) { // when sbuffer accept data 198 when(!io.sbuffer.fire && segmentActive) { 199 stateNext := s_send_data 200 }.elsewhen(((segmentIdx === maxSegIdx) && (fieldIdx === maxNfields)) || 201 ((segmentIdx === maxSegIdx) && !segmentActive)) { 202 203 stateNext := s_finish // segment instruction finish 204 }.otherwise { 205 stateNext := s_tlb_req // need continue 206 } 207 }.elsewhen(state === s_finish){ // writeback uop 208 stateNext := Mux(distanceBetween(enqPtr, deqPtr) === 0.U, s_idle, s_finish) 209 210 }.otherwise{ 211 stateNext := s_idle 212 XSError(true.B, s"Unknown state!\n") 213 } 214 215 /************************************************************************* 216 * enqueue logic 217 *************************************************************************/ 218 io.in.ready := true.B 219 val fuOpType = io.in.bits.uop.fuOpType 220 val vtype = io.in.bits.uop.vpu.vtype 221 val mop = fuOpType(6, 5) 222 val instType = Cat(true.B, mop) 223 val eew = io.in.bits.uop.vpu.veew 224 val sew = vtype.vsew 225 val lmul = vtype.vlmul 226 val vl = instMicroOp.vl 227 val vm = instMicroOp.uop.vpu.vm 228 val vstart = instMicroOp.uop.vpu.vstart 229 val srcMask = GenFlowMask(Mux(vm, Fill(VLEN, 1.U(1.W)), io.in.bits.src_mask), vstart, vl, true) 230 // first uop enqueue, we need to latch microOp of segment instruction 231 when(io.in.fire && !instMicroOp.valid){ 232 val vlmaxInVd = GenVLMAX(Mux(lmul.asSInt > 0.S, 0.U, lmul), Mux(isIndexed(instType), sew(1, 0), eew(1, 0))) // element number in a vd 233 instMicroOp.vaddr := io.in.bits.src_rs1(VAddrBits - 1, 0) 234 instMicroOp.valid := true.B // if is first uop 235 instMicroOp.alignedType := Mux(isIndexed(instType), sew(1, 0), eew(1, 0)) 236 instMicroOp.uop := io.in.bits.uop 237 instMicroOp.mask := srcMask 238 instMicroOp.vstart := 0.U 239 instMicroOp.vlmaxInVd := vlmaxInVd 240 instMicroOp.vlmaxMaskInVd := GenVlMaxMask(vlmaxInVd, elemIdxBits) // for merge data 241 instMicroOp.vl := io.in.bits.src_vl.asTypeOf(VConfig()).vl 242 segmentOffset := 0.U 243 } 244 // latch data 245 when(io.in.fire){ 246 data(enqPtr.value) := io.in.bits.src_vs3 247 stride(enqPtr.value) := io.in.bits.src_stride 248 uopIdx(enqPtr.value) := io.in.bits.uop.vpu.vuopIdx 249 pdest(enqPtr.value) := io.in.bits.uop.pdest 250 } 251 252 // update enqptr, only 1 port 253 when(io.in.fire){ 254 enqPtr := enqPtr + 1.U 255 } 256 257 /************************************************************************* 258 * output logic 259 *************************************************************************/ 260 261 val indexStride = IndexAddr( // index for indexed instruction 262 index = stride(stridePtr.value), 263 flow_inner_idx = issueIndexIdx, 264 eew = issueEew 265 ) 266 val realSegmentOffset = Mux(isIndexed(issueInstType), 267 indexStride, 268 segmentOffset) 269 val vaddr = baseVaddr + (fieldIdx << alignedType).asUInt + realSegmentOffset 270 /** 271 * tlb req and tlb resq 272 */ 273 274 // query DTLB IO Assign 275 io.dtlb.req := DontCare 276 io.dtlb.resp.ready := true.B 277 io.dtlb.req.valid := state === s_tlb_req && segmentActive 278 io.dtlb.req.bits.cmd := Mux(FuType.isVLoad(fuType), TlbCmd.read, TlbCmd.write) 279 io.dtlb.req.bits.vaddr := vaddr 280 io.dtlb.req.bits.size := instMicroOp.alignedType(2,0) 281 io.dtlb.req.bits.memidx.is_ld := FuType.isVLoad(fuType) 282 io.dtlb.req.bits.memidx.is_st := FuType.isVStore(fuType) 283 io.dtlb.req.bits.debug.robIdx := instMicroOp.uop.robIdx 284 io.dtlb.req.bits.no_translate := false.B 285 io.dtlb.req.bits.debug.pc := instMicroOp.uop.pc 286 io.dtlb.req.bits.debug.isFirstIssue := DontCare 287 io.dtlb.req_kill := false.B 288 289 // tlb resp 290 when(io.dtlb.resp.fire && state === s_wait_tlb_resp){ 291 exceptionVec(storePageFault) := io.dtlb.resp.bits.excp(0).pf.st 292 exceptionVec(loadPageFault) := io.dtlb.resp.bits.excp(0).pf.ld 293 exceptionVec(storeAccessFault) := io.dtlb.resp.bits.excp(0).af.st 294 exceptionVec(loadAccessFault) := io.dtlb.resp.bits.excp(0).af.ld 295 when(!io.dtlb.resp.bits.miss){ 296 instMicroOp.paddr := io.dtlb.resp.bits.paddr(0) 297 } 298 } 299 // pmp 300 // NOTE: only handle load/store exception here, if other exception happens, don't send here 301 val pmp = WireInit(io.pmpResp) 302 when(state === s_pm){ 303 exception_va := exceptionVec(storePageFault) || exceptionVec(loadPageFault) || 304 exceptionVec(storeAccessFault) || exceptionVec(loadAccessFault) 305 exception_pa := pmp.st || pmp.ld 306 307 instMicroOp.exception_pa := exception_pa 308 instMicroOp.exception_va := exception_va 309 // update storeAccessFault bit 310 exceptionVec(loadAccessFault) := exceptionVec(loadAccessFault) || pmp.ld 311 exceptionVec(storeAccessFault) := exceptionVec(storeAccessFault) || pmp.st 312 313 when(exception_va || exception_pa){ 314 instMicroOp.exceptionvaddr := vaddr 315 instMicroOp.vl := segmentIdx // for exception 316 instMicroOp.vstart := segmentIdx // for exception 317 } 318 } 319 320 /** 321 * flush sbuffer IO Assign 322 */ 323 io.flush_sbuffer.valid := !sbufferEmpty && (state === s_flush_sbuffer_req) 324 325 326 /** 327 * merge data for load 328 */ 329 val cacheData = LookupTree(vaddr(3,0), List( 330 "b0000".U -> io.rdcache.resp.bits.data_delayed(63, 0), 331 "b0001".U -> io.rdcache.resp.bits.data_delayed(63, 8), 332 "b0010".U -> io.rdcache.resp.bits.data_delayed(63, 16), 333 "b0011".U -> io.rdcache.resp.bits.data_delayed(63, 24), 334 "b0100".U -> io.rdcache.resp.bits.data_delayed(63, 32), 335 "b0101".U -> io.rdcache.resp.bits.data_delayed(63, 40), 336 "b0110".U -> io.rdcache.resp.bits.data_delayed(63, 48), 337 "b0111".U -> io.rdcache.resp.bits.data_delayed(63, 56), 338 "b1000".U -> io.rdcache.resp.bits.data_delayed(127, 64), 339 "b1001".U -> io.rdcache.resp.bits.data_delayed(127, 72), 340 "b1010".U -> io.rdcache.resp.bits.data_delayed(127, 80), 341 "b1011".U -> io.rdcache.resp.bits.data_delayed(127, 88), 342 "b1100".U -> io.rdcache.resp.bits.data_delayed(127, 96), 343 "b1101".U -> io.rdcache.resp.bits.data_delayed(127, 104), 344 "b1110".U -> io.rdcache.resp.bits.data_delayed(127, 112), 345 "b1111".U -> io.rdcache.resp.bits.data_delayed(127, 120) 346 )) 347 val pickData = rdataVecHelper(alignedType(1,0), cacheData) 348 val mergedData = mergeDataWithElemIdx( 349 oldData = data(splitPtr.value), 350 newData = Seq(pickData), 351 alignedType = alignedType(1,0), 352 elemIdx = Seq(elemIdxInVd), 353 valids = Seq(true.B) 354 ) 355 when(state === s_latch_and_merge_data && segmentActive){ 356 data(splitPtr.value) := mergedData 357 } 358 /** 359 * split data for store 360 * */ 361 val splitData = genVSData( 362 data = data(splitPtr.value), 363 elemIdx = elemIdxInVd, 364 alignedType = alignedType 365 ) 366 val flowData = genVWdata(splitData, alignedType) // TODO: connect vstd, pass vector data 367 val wmask = genVWmask(vaddr, alignedType(1, 0)) & Fill(VLENB, segmentActive) 368 369 /** 370 * rdcache req, write request don't need to query dcache, because we write element to sbuffer 371 */ 372 io.rdcache.req := DontCare 373 io.rdcache.req.valid := state === s_cache_req && FuType.isVLoad(fuType) 374 io.rdcache.req.bits.cmd := MemoryOpConstants.M_XRD 375 io.rdcache.req.bits.vaddr := vaddr 376 io.rdcache.req.bits.mask := mask 377 io.rdcache.req.bits.data := flowData 378 io.rdcache.pf_source := LOAD_SOURCE.U 379 io.rdcache.req.bits.id := DontCare 380 io.rdcache.resp.ready := true.B 381 io.rdcache.s1_paddr_dup_lsu := instMicroOp.paddr 382 io.rdcache.s1_paddr_dup_dcache := instMicroOp.paddr 383 io.rdcache.s1_kill := false.B 384 io.rdcache.s2_kill := false.B 385 if (env.FPGAPlatform){ 386 io.rdcache.s0_pc := DontCare 387 io.rdcache.s1_pc := DontCare 388 io.rdcache.s2_pc := DontCare 389 }else{ 390 io.rdcache.s0_pc := instMicroOp.uop.pc 391 io.rdcache.s1_pc := instMicroOp.uop.pc 392 io.rdcache.s2_pc := instMicroOp.uop.pc 393 } 394 io.rdcache.replacementUpdated := false.B 395 io.rdcache.is128Req := false.B 396 397 398 /** 399 * write data to sbuffer 400 * */ 401 402 io.sbuffer.bits := DontCare 403 io.sbuffer.valid := state === s_send_data && segmentActive 404 io.sbuffer.bits.vecValid := state === s_send_data && segmentActive 405 io.sbuffer.bits.mask := wmask 406 io.sbuffer.bits.data := flowData 407 io.sbuffer.bits.vaddr := vaddr 408 io.sbuffer.bits.cmd := MemoryOpConstants.M_XWR 409 io.sbuffer.bits.id := DontCare 410 io.sbuffer.bits.addr := instMicroOp.paddr 411 412 /** 413 * update ptr 414 * */ 415 private val fieldActiveWirteFinish = io.sbuffer.fire && segmentActive // writedata finish and is a active segment 416 XSError(io.sbuffer.fire && !segmentActive, "Attempt write inactive segment to sbuffer, something wrong!\n") 417 418 private val segmentInactiveFinish = ((state === s_latch_and_merge_data) || (state === s_send_data)) && !segmentActive 419 420 val splitPtrOffset = Mux(lmul.asSInt < 0.S, 1.U, (1.U << lmul).asUInt) 421 splitPtrNext := 422 Mux(fieldIdx === maxNfields || !segmentActive, // if segment is active, need to complete this segment, otherwise jump to next segment 423 (deqPtr + ((segmentIdx +& 1.U) >> issueVLMAXLog2).asUInt), // segment finish 424 (splitPtr + splitPtrOffset)) // next field 425 dontTouch(issueVLMAXLog2) 426 dontTouch(splitPtrNext) 427 dontTouch(stridePtr) 428 429 // update splitPtr 430 when(state === s_latch_and_merge_data || (state === s_send_data && (fieldActiveWirteFinish || !segmentActive))){ 431 splitPtr := splitPtrNext 432 }.elsewhen(io.in.fire && !instMicroOp.valid){ 433 splitPtr := deqPtr // initial splitPtr 434 } 435 436 // update stridePtr, only use in index 437 val strideOffset = Mux(isIndexed(issueInstType), segmentIdx >> issueMaxIdxInIndexLog2, 0.U) 438 stridePtr := deqPtr + strideOffset 439 440 // update fieldIdx 441 when(io.in.fire && !instMicroOp.valid){ // init 442 fieldIdx := 0.U 443 }.elsewhen(state === s_latch_and_merge_data && segmentActive || 444 (state === s_send_data && fieldActiveWirteFinish)){ // only if segment is active 445 446 /* next segment, only if segment complete */ 447 fieldIdx := Mux(fieldIdx === maxNfields, 0.U, fieldIdx + 1.U) 448 }.elsewhen(segmentInactiveFinish){ // segment is inactive, go to next segment 449 fieldIdx := 0.U 450 } 451 //update segmentIdx 452 when(io.in.fire && !instMicroOp.valid){ 453 segmentIdx := 0.U 454 }.elsewhen(fieldIdx === maxNfields && (state === s_latch_and_merge_data || (state === s_send_data && fieldActiveWirteFinish)) && 455 segmentIdx =/= maxSegIdx){ // next segment, only if segment is active 456 457 segmentIdx := segmentIdx + 1.U 458 }.elsewhen(segmentInactiveFinish && segmentIdx =/= maxSegIdx){ // if segment is inactive, go to next segment 459 segmentIdx := segmentIdx + 1.U 460 } 461 462 //update segmentOffset 463 /* when segment is active or segment is inactive, increase segmentOffset */ 464 when((fieldIdx === maxNfields && (state === s_latch_and_merge_data || (state === s_send_data && fieldActiveWirteFinish))) || 465 segmentInactiveFinish){ 466 467 segmentOffset := segmentOffset + Mux(isUnitStride(issueInstType), (maxNfields +& 1.U) << issueEew(1, 0), stride(stridePtr.value)) 468 } 469 470 //update deqPtr 471 when(io.uopwriteback.fire){ 472 deqPtr := deqPtr + 1.U 473 } 474 475 /************************************************************************* 476 * dequeue logic 477 *************************************************************************/ 478 val vdIdxInField = GenUopIdxInField(Mux(isIndexed(instType), issueLmul, issueEmul), uopIdx(deqPtr.value)) 479 /*select mask of vd, maybe remove in feature*/ 480 val realEw = Mux(isIndexed(issueInstType), issueSew(1, 0), issueEew(1, 0)) 481 val maskDataVec: Vec[UInt] = VecDataToMaskDataVec(instMicroOp.mask, realEw) 482 val maskUsed = maskDataVec(vdIdxInField) 483 484 when(stateNext === s_idle){ 485 instMicroOp.valid := false.B 486 } 487 io.uopwriteback.valid := (state === s_finish) && distanceBetween(enqPtr, deqPtr) =/= 0.U 488 io.uopwriteback.bits.uop := instMicroOp.uop 489 io.uopwriteback.bits.mask.get := instMicroOp.mask 490 io.uopwriteback.bits.data := data(deqPtr.value) 491 io.uopwriteback.bits.vdIdx.get := vdIdxInField 492 io.uopwriteback.bits.uop.vpu.vl := instMicroOp.vl 493 io.uopwriteback.bits.uop.vpu.vstart := instMicroOp.vstart 494 io.uopwriteback.bits.uop.vpu.vmask := maskUsed 495 io.uopwriteback.bits.uop.pdest := pdest(deqPtr.value) 496 io.uopwriteback.bits.debug := DontCare 497 io.uopwriteback.bits.vdIdxInField.get := vdIdxInField 498 499 //to RS 500 io.feedback.valid := state === s_finish && distanceBetween(enqPtr, deqPtr) =/= 0.U 501 io.feedback.bits.hit := true.B 502 io.feedback.bits.robIdx := instMicroOp.uop.robIdx 503 io.feedback.bits.sourceType := DontCare 504 io.feedback.bits.flushState := DontCare 505 io.feedback.bits.dataInvalidSqIdx := DontCare 506 io.feedback.bits.uopIdx.get := uopIdx(deqPtr.value) 507 508 // exception 509 io.exceptionAddr := DontCare // TODO: fix it when handle exception 510} 511 512