1package xiangshan.backend.issue 2 3import org.chipsalliance.cde.config.Parameters 4import chisel3._ 5import chisel3.util._ 6import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp} 7import utils.{HasPerfEvents, OptionWrapper} 8import xiangshan._ 9import xiangshan.backend.Bundles._ 10import xiangshan.backend.datapath.DataConfig._ 11import xiangshan.backend.datapath.WbConfig._ 12import xiangshan.backend.fu.FuType 13import xiangshan.backend.regfile.RfWritePortWithConfig 14import xiangshan.backend.rename.BusyTable 15import xiangshan.mem.{LsqEnqCtrl, LsqEnqIO, MemWaitUpdateReq, SqPtr, LqPtr} 16import xiangshan.backend.datapath.WbConfig.V0WB 17import xiangshan.backend.regfile.VlPregParams 18import xiangshan.backend.regcache.RegCacheTagTable 19 20sealed trait SchedulerType 21 22case class IntScheduler() extends SchedulerType 23case class FpScheduler() extends SchedulerType 24case class MemScheduler() extends SchedulerType 25case class VfScheduler() extends SchedulerType 26case class NoScheduler() extends SchedulerType 27 28class Scheduler(val params: SchdBlockParams)(implicit p: Parameters) extends LazyModule with HasXSParameter { 29 override def shouldBeInlined: Boolean = false 30 31 val numIntStateWrite = backendParams.numPregWb(IntData()) 32 val numFpStateWrite = backendParams.numPregWb(FpData()) 33 val numVfStateWrite = backendParams.numPregWb(VecData()) 34 val numV0StateWrite = backendParams.numPregWb(V0Data()) 35 val numVlStateWrite = backendParams.numPregWb(VlData()) 36 37 val dispatch2Iq = LazyModule(new Dispatch2Iq(params)) 38 val issueQueue = params.issueBlockParams.map(x => LazyModule(new IssueQueue(x).suggestName(x.getIQName))) 39 40 lazy val module: SchedulerImpBase = params.schdType match { 41 case IntScheduler() => new SchedulerArithImp(this)(params, p) 42 case FpScheduler() => new SchedulerArithImp(this)(params, p) 43 case MemScheduler() => new SchedulerMemImp(this)(params, p) 44 case VfScheduler() => new SchedulerArithImp(this)(params, p) 45 case _ => null 46 } 47} 48 49class SchedulerIO()(implicit params: SchdBlockParams, p: Parameters) extends XSBundle { 50 // params alias 51 private val LoadQueueSize = VirtualLoadQueueSize 52 53 val fromTop = new Bundle { 54 val hartId = Input(UInt(8.W)) 55 } 56 val fromWbFuBusyTable = new Bundle{ 57 val fuBusyTableRead = MixedVec(params.issueBlockParams.map(x => Input(x.genWbFuBusyTableReadBundle))) 58 } 59 val wbFuBusyTable = MixedVec(params.issueBlockParams.map(x => Output(x.genWbFuBusyTableWriteBundle))) 60 val intIQValidNumVec = Output(MixedVec(backendParams.genIntIQValidNumBundle)) 61 val fpIQValidNumVec = Output(MixedVec(backendParams.genFpIQValidNumBundle)) 62 63 val fromCtrlBlock = new Bundle { 64 val flush = Flipped(ValidIO(new Redirect)) 65 } 66 val fromDispatch = new Bundle { 67 val allocPregs = Vec(RenameWidth, Input(new ResetPregStateReq)) 68 val uops = Vec(params.numUopIn, Flipped(DecoupledIO(new DynInst))) 69 } 70 val intWriteBack = MixedVec(Vec(backendParams.numPregWb(IntData()), 71 new RfWritePortWithConfig(backendParams.intPregParams.dataCfg, backendParams.intPregParams.addrWidth))) 72 val fpWriteBack = MixedVec(Vec(backendParams.numPregWb(FpData()), 73 new RfWritePortWithConfig(backendParams.fpPregParams.dataCfg, backendParams.fpPregParams.addrWidth))) 74 val vfWriteBack = MixedVec(Vec(backendParams.numPregWb(VecData()), 75 new RfWritePortWithConfig(backendParams.vfPregParams.dataCfg, backendParams.vfPregParams.addrWidth))) 76 val v0WriteBack = MixedVec(Vec(backendParams.numPregWb(V0Data()), 77 new RfWritePortWithConfig(backendParams.v0PregParams.dataCfg, backendParams.v0PregParams.addrWidth))) 78 val vlWriteBack = MixedVec(Vec(backendParams.numPregWb(VlData()), 79 new RfWritePortWithConfig(backendParams.vlPregParams.dataCfg, backendParams.vlPregParams.addrWidth))) 80 val toDataPathAfterDelay: MixedVec[MixedVec[DecoupledIO[IssueQueueIssueBundle]]] = MixedVec(params.issueBlockParams.map(_.genIssueDecoupledBundle)) 81 82 val vlWriteBackInfo = new Bundle { 83 val vlIsZero = Input(Bool()) 84 val vlIsVlmax = Input(Bool()) 85 } 86 87 val fromSchedulers = new Bundle { 88 val wakeupVec: MixedVec[ValidIO[IssueQueueIQWakeUpBundle]] = Flipped(params.genIQWakeUpInValidBundle) 89 } 90 91 val toSchedulers = new Bundle { 92 val wakeupVec: MixedVec[ValidIO[IssueQueueIQWakeUpBundle]] = params.genIQWakeUpOutValidBundle 93 } 94 95 val fromDataPath = new Bundle { 96 val resp: MixedVec[MixedVec[OGRespBundle]] = MixedVec(params.issueBlockParams.map(x => Flipped(x.genOGRespBundle))) 97 val og0Cancel = Input(ExuVec()) 98 // Todo: remove this after no cancel signal from og1 99 val og1Cancel = Input(ExuVec()) 100 // replace RCIdx to Wakeup Queue 101 val replaceRCIdx = OptionWrapper(params.needWriteRegCache, Vec(params.numWriteRegCache, Input(UInt(RegCacheIdxWidth.W)))) 102 // just be compatible to old code 103 def apply(i: Int)(j: Int) = resp(i)(j) 104 } 105 106 val loadFinalIssueResp = MixedVec(params.issueBlockParams.map(x => MixedVec(Vec(x.LdExuCnt, Flipped(ValidIO(new IssueQueueDeqRespBundle()(p, x))))))) 107 val memAddrIssueResp = MixedVec(params.issueBlockParams.map(x => MixedVec(Vec(x.LdExuCnt, Flipped(ValidIO(new IssueQueueDeqRespBundle()(p, x))))))) 108 val vecLoadIssueResp = MixedVec(params.issueBlockParams.map(x => MixedVec(Vec(x.VlduCnt, Flipped(ValidIO(new IssueQueueDeqRespBundle()(p, x))))))) 109 110 val ldCancel = Vec(backendParams.LduCnt + backendParams.HyuCnt, Flipped(new LoadCancelIO)) 111 112 val memIO = if (params.isMemSchd) Some(new Bundle { 113 val lsqEnqIO = Flipped(new LsqEnqIO) 114 }) else None 115 val fromMem = if (params.isMemSchd) Some(new Bundle { 116 val ldaFeedback = Flipped(Vec(params.LduCnt, new MemRSFeedbackIO)) 117 val staFeedback = Flipped(Vec(params.StaCnt, new MemRSFeedbackIO)) 118 val hyuFeedback = Flipped(Vec(params.HyuCnt, new MemRSFeedbackIO)) 119 val vstuFeedback = Flipped(Vec(params.VstuCnt, new MemRSFeedbackIO(isVector = true))) 120 val vlduFeedback = Flipped(Vec(params.VlduCnt, new MemRSFeedbackIO(isVector = true))) 121 val stIssuePtr = Input(new SqPtr()) 122 val lcommit = Input(UInt(log2Up(CommitWidth + 1).W)) 123 val scommit = Input(UInt(log2Ceil(EnsbufferWidth + 1).W)) // connected to `memBlock.io.sqDeq` instead of ROB 124 val wakeup = Vec(params.LdExuCnt, Flipped(Valid(new DynInst))) 125 val lqDeqPtr = Input(new LqPtr) 126 val sqDeqPtr = Input(new SqPtr) 127 // from lsq 128 val lqCancelCnt = Input(UInt(log2Up(LoadQueueSize + 1).W)) 129 val sqCancelCnt = Input(UInt(log2Up(StoreQueueSize + 1).W)) 130 val memWaitUpdateReq = Flipped(new MemWaitUpdateReq) 131 }) else None 132 val toMem = if (params.isMemSchd) Some(new Bundle { 133 val loadFastMatch = Output(Vec(params.LduCnt, new IssueQueueLoadBundle)) 134 }) else None 135 val fromOg2 = if(params.isVfSchd) Some(MixedVec(params.issueBlockParams.map(x => Flipped(x.genOG2RespBundle)))) else None 136} 137 138abstract class SchedulerImpBase(wrapper: Scheduler)(implicit params: SchdBlockParams, p: Parameters) 139 extends LazyModuleImp(wrapper) 140 with HasXSParameter 141{ 142 val io = IO(new SchedulerIO()) 143 144 // alias 145 private val iqWakeUpInMap: Map[Int, ValidIO[IssueQueueIQWakeUpBundle]] = 146 io.fromSchedulers.wakeupVec.map(x => (x.bits.exuIdx, x)).toMap 147 private val schdType = params.schdType 148 149 // Modules 150 val dispatch2Iq: Dispatch2IqImp = wrapper.dispatch2Iq.module 151 val issueQueues: Seq[IssueQueueImp] = wrapper.issueQueue.map(_.module) 152 io.intIQValidNumVec := 0.U.asTypeOf(io.intIQValidNumVec) 153 io.fpIQValidNumVec := 0.U.asTypeOf(io.fpIQValidNumVec) 154 if (params.isIntSchd) { 155 dispatch2Iq.io.intIQValidNumVec.get := io.intIQValidNumVec 156 io.intIQValidNumVec := MixedVecInit(issueQueues.map(_.io.validCntDeqVec)) 157 } 158 else if (params.isFpSchd) { 159 dispatch2Iq.io.fpIQValidNumVec.get := io.fpIQValidNumVec 160 io.fpIQValidNumVec := MixedVecInit(issueQueues.map(_.io.validCntDeqVec)) 161 } 162 163 // valid count 164 dispatch2Iq.io.iqValidCnt := issueQueues.filter(_.params.StdCnt == 0).map(_.io.status.validCnt) 165 166 // BusyTable Modules 167 val intBusyTable = schdType match { 168 case IntScheduler() | MemScheduler() => Some(Module(new BusyTable(dispatch2Iq.numIntStateRead, wrapper.numIntStateWrite, IntPhyRegs, IntWB()))) 169 case _ => None 170 } 171 val fpBusyTable = schdType match { 172 case FpScheduler() | MemScheduler() => Some(Module(new BusyTable(dispatch2Iq.numFpStateRead, wrapper.numFpStateWrite, FpPhyRegs, FpWB()))) 173 case _ => None 174 } 175 val vfBusyTable = schdType match { 176 case VfScheduler() | MemScheduler() => Some(Module(new BusyTable(dispatch2Iq.numVfStateRead, wrapper.numVfStateWrite, VfPhyRegs, VfWB()))) 177 case _ => None 178 } 179 val v0BusyTable = schdType match { 180 case VfScheduler() | MemScheduler() => Some(Module(new BusyTable(dispatch2Iq.numV0StateRead, wrapper.numV0StateWrite, V0PhyRegs, V0WB()))) 181 case _ => None 182 } 183 val vlBusyTable = schdType match { 184 case VfScheduler() | MemScheduler() => Some(Module(new BusyTable(dispatch2Iq.numVlStateRead, wrapper.numVlStateWrite, VlPhyRegs, VlWB()))) 185 case _ => None 186 } 187 188 // RegCacheTagTable Module 189 val rcTagTable = schdType match { 190 case IntScheduler() | MemScheduler() => Some(Module(new RegCacheTagTable(dispatch2Iq.numRCTagTableStateRead))) 191 case _ => None 192 } 193 194 dispatch2Iq.io match { case dp2iq => 195 dp2iq.redirect <> io.fromCtrlBlock.flush 196 dp2iq.in <> io.fromDispatch.uops 197 dp2iq.readIntState.foreach(_ <> intBusyTable.get.io.read) 198 dp2iq.readFpState.foreach(_ <> fpBusyTable.get.io.read) 199 dp2iq.readVfState.foreach(_ <> vfBusyTable.get.io.read) 200 dp2iq.readV0State.foreach(_ <> v0BusyTable.get.io.read) 201 dp2iq.readVlState.foreach(_ <> vlBusyTable.get.io.read) 202 dp2iq.readRCTagTableState.foreach(_ <> rcTagTable.get.io.readPorts) 203 } 204 205 intBusyTable match { 206 case Some(bt) => 207 bt.io.allocPregs.zip(io.fromDispatch.allocPregs).foreach { case (btAllocPregs, dpAllocPregs) => 208 btAllocPregs.valid := dpAllocPregs.isInt 209 btAllocPregs.bits := dpAllocPregs.preg 210 } 211 bt.io.wbPregs.zipWithIndex.foreach { case (wb, i) => 212 wb.valid := io.intWriteBack(i).wen && io.intWriteBack(i).intWen 213 wb.bits := io.intWriteBack(i).addr 214 } 215 bt.io.wakeUp := io.fromSchedulers.wakeupVec 216 bt.io.og0Cancel := io.fromDataPath.og0Cancel 217 bt.io.ldCancel := io.ldCancel 218 case None => 219 } 220 221 fpBusyTable match { 222 case Some(bt) => 223 bt.io.allocPregs.zip(io.fromDispatch.allocPregs).foreach { case (btAllocPregs, dpAllocPregs) => 224 btAllocPregs.valid := dpAllocPregs.isFp 225 btAllocPregs.bits := dpAllocPregs.preg 226 } 227 bt.io.wbPregs.zipWithIndex.foreach { case (wb, i) => 228 wb.valid := io.fpWriteBack(i).wen && io.fpWriteBack(i).fpWen 229 wb.bits := io.fpWriteBack(i).addr 230 } 231 bt.io.wakeUp := io.fromSchedulers.wakeupVec 232 bt.io.og0Cancel := io.fromDataPath.og0Cancel 233 bt.io.ldCancel := io.ldCancel 234 case None => 235 } 236 237 vfBusyTable match { 238 case Some(bt) => 239 bt.io.allocPregs.zip(io.fromDispatch.allocPregs).foreach { case (btAllocPregs, dpAllocPregs) => 240 btAllocPregs.valid := dpAllocPregs.isVec 241 btAllocPregs.bits := dpAllocPregs.preg 242 } 243 bt.io.wbPregs.zipWithIndex.foreach { case (wb, i) => 244 wb.valid := io.vfWriteBack(i).wen && io.vfWriteBack(i).vecWen 245 wb.bits := io.vfWriteBack(i).addr 246 } 247 bt.io.wakeUp := io.fromSchedulers.wakeupVec 248 bt.io.og0Cancel := io.fromDataPath.og0Cancel 249 bt.io.ldCancel := io.ldCancel 250 case None => 251 } 252 253 v0BusyTable match { 254 case Some(bt) => 255 bt.io.allocPregs.zip(io.fromDispatch.allocPregs).foreach { case (btAllocPregs, dpAllocPregs) => 256 btAllocPregs.valid := dpAllocPregs.isV0 257 btAllocPregs.bits := dpAllocPregs.preg 258 } 259 bt.io.wbPregs.zipWithIndex.foreach { case (wb, i) => 260 wb.valid := io.v0WriteBack(i).wen && io.v0WriteBack(i).v0Wen 261 wb.bits := io.v0WriteBack(i).addr 262 } 263 bt.io.wakeUp := io.fromSchedulers.wakeupVec 264 bt.io.og0Cancel := io.fromDataPath.og0Cancel 265 bt.io.ldCancel := io.ldCancel 266 case None => 267 } 268 269 vlBusyTable match { 270 case Some(bt) => 271 bt.io.allocPregs.zip(io.fromDispatch.allocPregs).foreach { case (btAllocPregs, dpAllocPregs) => 272 btAllocPregs.valid := dpAllocPregs.isVl 273 btAllocPregs.bits := dpAllocPregs.preg 274 } 275 bt.io.wbPregs.zipWithIndex.foreach { case (wb, i) => 276 wb.valid := io.vlWriteBack(i).wen && io.vlWriteBack(i).vlWen 277 wb.bits := io.vlWriteBack(i).addr 278 } 279 bt.io.wakeUp := io.fromSchedulers.wakeupVec 280 bt.io.og0Cancel := io.fromDataPath.og0Cancel 281 bt.io.ldCancel := io.ldCancel 282 case None => 283 } 284 285 rcTagTable match { 286 case Some(rct) => 287 rct.io.allocPregs.zip(io.fromDispatch.allocPregs).foreach { case (btAllocPregs, dpAllocPregs) => 288 btAllocPregs.valid := dpAllocPregs.isInt 289 btAllocPregs.bits := dpAllocPregs.preg 290 } 291 rct.io.wakeupFromIQ := io.fromSchedulers.wakeupVec 292 rct.io.og0Cancel := io.fromDataPath.og0Cancel 293 rct.io.ldCancel := io.ldCancel 294 case None => 295 } 296 297 val wakeupFromIntWBVec = Wire(params.genIntWBWakeUpSinkValidBundle) 298 val wakeupFromFpWBVec = Wire(params.genFpWBWakeUpSinkValidBundle) 299 val wakeupFromVfWBVec = Wire(params.genVfWBWakeUpSinkValidBundle) 300 val wakeupFromV0WBVec = Wire(params.genV0WBWakeUpSinkValidBundle) 301 val wakeupFromVlWBVec = Wire(params.genVlWBWakeUpSinkValidBundle) 302 303 wakeupFromIntWBVec.zip(io.intWriteBack).foreach { case (sink, source) => 304 sink.valid := source.wen 305 sink.bits.rfWen := source.intWen 306 sink.bits.fpWen := source.fpWen 307 sink.bits.vecWen := source.vecWen 308 sink.bits.v0Wen := source.v0Wen 309 sink.bits.vlWen := source.vlWen 310 sink.bits.pdest := source.addr 311 } 312 313 wakeupFromFpWBVec.zip(io.fpWriteBack).foreach { case (sink, source) => 314 sink.valid := source.wen 315 sink.bits.rfWen := source.intWen 316 sink.bits.fpWen := source.fpWen 317 sink.bits.vecWen := source.vecWen 318 sink.bits.v0Wen := source.v0Wen 319 sink.bits.vlWen := source.vlWen 320 sink.bits.pdest := source.addr 321 } 322 323 wakeupFromVfWBVec.zip(io.vfWriteBack).foreach { case (sink, source) => 324 sink.valid := source.wen 325 sink.bits.rfWen := source.intWen 326 sink.bits.fpWen := source.fpWen 327 sink.bits.vecWen := source.vecWen 328 sink.bits.v0Wen := source.v0Wen 329 sink.bits.vlWen := source.vlWen 330 sink.bits.pdest := source.addr 331 } 332 333 wakeupFromV0WBVec.zip(io.v0WriteBack).foreach { case (sink, source) => 334 sink.valid := source.wen 335 sink.bits.rfWen := source.intWen 336 sink.bits.fpWen := source.fpWen 337 sink.bits.vecWen := source.vecWen 338 sink.bits.v0Wen := source.v0Wen 339 sink.bits.vlWen := source.vlWen 340 sink.bits.pdest := source.addr 341 } 342 343 wakeupFromVlWBVec.zip(io.vlWriteBack).foreach { case (sink, source) => 344 sink.valid := source.wen 345 sink.bits.rfWen := source.intWen 346 sink.bits.fpWen := source.fpWen 347 sink.bits.vecWen := source.vecWen 348 sink.bits.v0Wen := source.v0Wen 349 sink.bits.vlWen := source.vlWen 350 sink.bits.pdest := source.addr 351 } 352 353 // Connect bundles having the same wakeup source 354 issueQueues.zipWithIndex.foreach { case(iq, i) => 355 iq.io.wakeupFromIQ.foreach { wakeUp => 356 val wakeUpIn = iqWakeUpInMap(wakeUp.bits.exuIdx) 357 val exuIdx = wakeUp.bits.exuIdx 358 println(s"[Backend] Connect wakeup exuIdx ${exuIdx}") 359 connectSamePort(wakeUp,wakeUpIn) 360 backendParams.connectWakeup(exuIdx) 361 if (backendParams.isCopyPdest(exuIdx)) { 362 println(s"[Backend] exuIdx ${exuIdx} use pdestCopy ${backendParams.getCopyPdestIndex(exuIdx)}") 363 wakeUp.bits.pdest := wakeUpIn.bits.pdestCopy.get(backendParams.getCopyPdestIndex(exuIdx)) 364 if (wakeUpIn.bits.rfWenCopy.nonEmpty) wakeUp.bits.rfWen := wakeUpIn.bits.rfWenCopy.get(backendParams.getCopyPdestIndex(exuIdx)) 365 if (wakeUpIn.bits.fpWenCopy.nonEmpty) wakeUp.bits.fpWen := wakeUpIn.bits.fpWenCopy.get(backendParams.getCopyPdestIndex(exuIdx)) 366 if (wakeUpIn.bits.vecWenCopy.nonEmpty) wakeUp.bits.vecWen := wakeUpIn.bits.vecWenCopy.get(backendParams.getCopyPdestIndex(exuIdx)) 367 if (wakeUpIn.bits.v0WenCopy.nonEmpty) wakeUp.bits.v0Wen := wakeUpIn.bits.v0WenCopy.get(backendParams.getCopyPdestIndex(exuIdx)) 368 if (wakeUpIn.bits.vlWenCopy.nonEmpty) wakeUp.bits.vlWen := wakeUpIn.bits.vlWenCopy.get(backendParams.getCopyPdestIndex(exuIdx)) 369 if (wakeUpIn.bits.loadDependencyCopy.nonEmpty) wakeUp.bits.loadDependency := wakeUpIn.bits.loadDependencyCopy.get(backendParams.getCopyPdestIndex(exuIdx)) 370 } 371 if (iq.params.numIntSrc == 0) wakeUp.bits.rfWen := false.B 372 if (iq.params.numFpSrc == 0) wakeUp.bits.fpWen := false.B 373 if (iq.params.numVfSrc == 0) wakeUp.bits.vecWen := false.B 374 if (iq.params.numV0Src == 0) wakeUp.bits.v0Wen := false.B 375 if (iq.params.numVlSrc == 0) wakeUp.bits.vlWen := false.B 376 } 377 iq.io.og0Cancel := io.fromDataPath.og0Cancel 378 iq.io.og1Cancel := io.fromDataPath.og1Cancel 379 if (iq.params.needLoadDependency) 380 iq.io.ldCancel := io.ldCancel 381 else 382 iq.io.ldCancel := 0.U.asTypeOf(io.ldCancel) 383 } 384 385 // connect the vl writeback informatino to the issue queues 386 issueQueues.zipWithIndex.foreach { case(iq, i) => 387 iq.io.vlIsVlmax := io.vlWriteBackInfo.vlIsVlmax 388 iq.io.vlIsZero := io.vlWriteBackInfo.vlIsZero 389 } 390 391 private val iqWakeUpOutMap: Map[Int, ValidIO[IssueQueueIQWakeUpBundle]] = 392 issueQueues.flatMap(_.io.wakeupToIQ) 393 .map(x => (x.bits.exuIdx, x)) 394 .toMap 395 396 // Connect bundles having the same wakeup source 397 io.toSchedulers.wakeupVec.foreach { wakeUp => 398 wakeUp := iqWakeUpOutMap(wakeUp.bits.exuIdx) 399 } 400 401 io.toDataPathAfterDelay.zipWithIndex.foreach { case (toDpDy, i) => 402 toDpDy <> issueQueues(i).io.deqDelay 403 } 404 405 // Response 406 issueQueues.zipWithIndex.foreach { case (iq, i) => 407 iq.io.og0Resp.zipWithIndex.foreach { case (og0Resp, j) => 408 og0Resp := io.fromDataPath(i)(j).og0resp 409 } 410 iq.io.og1Resp.zipWithIndex.foreach { case (og1Resp, j) => 411 og1Resp := io.fromDataPath(i)(j).og1resp 412 } 413 iq.io.finalIssueResp.foreach(_.zipWithIndex.foreach { case (finalIssueResp, j) => 414 if (io.loadFinalIssueResp(i).isDefinedAt(j)) { 415 finalIssueResp := io.loadFinalIssueResp(i)(j) 416 } else { 417 finalIssueResp := 0.U.asTypeOf(finalIssueResp) 418 } 419 }) 420 iq.io.memAddrIssueResp.foreach(_.zipWithIndex.foreach { case (memAddrIssueResp, j) => 421 if (io.memAddrIssueResp(i).isDefinedAt(j)) { 422 memAddrIssueResp := io.memAddrIssueResp(i)(j) 423 } else { 424 memAddrIssueResp := 0.U.asTypeOf(memAddrIssueResp) 425 } 426 }) 427 iq.io.vecLoadIssueResp.foreach(_.zipWithIndex.foreach { case (resp, deqIdx) => 428 resp := io.vecLoadIssueResp(i)(deqIdx) 429 }) 430 if(params.isVfSchd) { 431 iq.io.og2Resp.get.zipWithIndex.foreach { case (og2Resp, exuIdx) => 432 og2Resp := io.fromOg2.get(i)(exuIdx) 433 } 434 } 435 iq.io.wbBusyTableRead := io.fromWbFuBusyTable.fuBusyTableRead(i) 436 io.wbFuBusyTable(i) := iq.io.wbBusyTableWrite 437 iq.io.replaceRCIdx.foreach(x => x := 0.U.asTypeOf(x)) 438 } 439 440 // Connect each replace RCIdx to IQ 441 if (params.needWriteRegCache) { 442 val iqReplaceRCIdxVec = issueQueues.filter(_.params.needWriteRegCache).flatMap{ iq => 443 iq.params.allExuParams.zip(iq.io.replaceRCIdx.get).filter(_._1.needWriteRegCache).map(_._2) 444 } 445 iqReplaceRCIdxVec.zip(io.fromDataPath.replaceRCIdx.get).foreach{ case (iq, in) => 446 iq := in 447 } 448 449 println(s"[Scheduler] numWriteRegCache: ${params.numWriteRegCache}") 450 println(s"[Scheduler] iqReplaceRCIdxVec: ${iqReplaceRCIdxVec.size}") 451 } 452 453 // perfEvent 454 val lastCycleDp2IqOutFireVec = RegNext(VecInit(dispatch2Iq.io.out.flatten.map(_.fire))) 455 val lastCycleIqEnqFireVec = RegNext(VecInit(issueQueues.map(_.io.enq.map(_.fire)).flatten)) 456 val lastCycleIqFullVec = RegNext(VecInit(issueQueues.map(_.io.enq.head.ready))) 457 458 val issueQueueFullVecPerf = issueQueues.zip(lastCycleIqFullVec)map{ case (iq, full) => (iq.params.getIQName + s"_full", full) } 459 val basePerfEvents = Seq( 460 ("dispatch2Iq_out_fire_cnt", PopCount(lastCycleDp2IqOutFireVec) ), 461 ("issueQueue_enq_fire_cnt", PopCount(lastCycleIqEnqFireVec) ) 462 ) ++ issueQueueFullVecPerf 463 464 println(s"[Scheduler] io.fromSchedulers.wakeupVec: ${io.fromSchedulers.wakeupVec.map(x => backendParams.getExuName(x.bits.exuIdx))}") 465 println(s"[Scheduler] iqWakeUpInKeys: ${iqWakeUpInMap.keys}") 466 467 println(s"[Scheduler] iqWakeUpOutKeys: ${iqWakeUpOutMap.keys}") 468 println(s"[Scheduler] io.toSchedulers.wakeupVec: ${io.toSchedulers.wakeupVec.map(x => backendParams.getExuName(x.bits.exuIdx))}") 469} 470 471class SchedulerArithImp(override val wrapper: Scheduler)(implicit params: SchdBlockParams, p: Parameters) 472 extends SchedulerImpBase(wrapper) 473 with HasXSParameter 474 with HasPerfEvents 475{ 476// dontTouch(io.vfWbFuBusyTable) 477 println(s"[SchedulerArithImp] " + 478 s"has intBusyTable: ${intBusyTable.nonEmpty}, " + 479 s"has vfBusyTable: ${vfBusyTable.nonEmpty}") 480 481 issueQueues.zipWithIndex.foreach { case (iq, i) => 482 iq.io.flush <> io.fromCtrlBlock.flush 483 iq.io.enq <> dispatch2Iq.io.out(i) 484 if (!iq.params.needLoadDependency) { 485 iq.io.enq.map(x => x.bits.srcLoadDependency := 0.U.asTypeOf(x.bits.srcLoadDependency)) 486 } 487 val intWBIQ = params.schdType match { 488 case IntScheduler() => wakeupFromIntWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromIntWBPort.keys.toSeq.contains(x._2)).map(_._1) 489 case FpScheduler() => wakeupFromFpWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromFpWBPort.keys.toSeq.contains(x._2)).map(_._1) 490 case VfScheduler() => (wakeupFromVfWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromVfWBPort.keys.toSeq.contains(x._2)).map(_._1) ++ 491 wakeupFromV0WBVec.zipWithIndex.filter(x => iq.params.needWakeupFromV0WBPort.keys.toSeq.contains(x._2)).map(_._1) ++ 492 wakeupFromVlWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromVlWBPort.keys.toSeq.contains(x._2)).map(_._1)) 493 case _ => null 494 } 495 iq.io.wakeupFromWB.zip(intWBIQ).foreach{ case (sink, source) => sink := source} 496 } 497 498 val perfEvents = basePerfEvents 499 generatePerfEvent() 500} 501 502// FIXME: Vector mem instructions may not be handled properly! 503class SchedulerMemImp(override val wrapper: Scheduler)(implicit params: SchdBlockParams, p: Parameters) 504 extends SchedulerImpBase(wrapper) 505 with HasXSParameter 506 with HasPerfEvents 507{ 508 println(s"[SchedulerMemImp] " + 509 s"has intBusyTable: ${intBusyTable.nonEmpty}, " + 510 s"has vfBusyTable: ${vfBusyTable.nonEmpty}") 511 512 val memAddrIQs = issueQueues.filter(_.params.isMemAddrIQ) 513 val stAddrIQs = issueQueues.filter(iq => iq.params.StaCnt > 0) // included in memAddrIQs 514 val ldAddrIQs = issueQueues.filter(iq => iq.params.LduCnt > 0) 515 val stDataIQs = issueQueues.filter(iq => iq.params.StdCnt > 0) 516 val vecMemIQs = issueQueues.filter(_.params.isVecMemIQ) 517 val (hyuIQs, hyuIQIdxs) = issueQueues.zipWithIndex.filter(_._1.params.HyuCnt > 0).unzip 518 519 println(s"[SchedulerMemImp] memAddrIQs.size: ${memAddrIQs.size}, enq.size: ${memAddrIQs.map(_.io.enq.size).sum}") 520 println(s"[SchedulerMemImp] stAddrIQs.size: ${stAddrIQs.size }, enq.size: ${stAddrIQs.map(_.io.enq.size).sum}") 521 println(s"[SchedulerMemImp] ldAddrIQs.size: ${ldAddrIQs.size }, enq.size: ${ldAddrIQs.map(_.io.enq.size).sum}") 522 println(s"[SchedulerMemImp] stDataIQs.size: ${stDataIQs.size }, enq.size: ${stDataIQs.map(_.io.enq.size).sum}") 523 println(s"[SchedulerMemImp] hyuIQs.size: ${hyuIQs.size }, enq.size: ${hyuIQs.map(_.io.enq.size).sum}") 524 require(memAddrIQs.nonEmpty && stDataIQs.nonEmpty) 525 526 io.toMem.get.loadFastMatch := 0.U.asTypeOf(io.toMem.get.loadFastMatch) // TODO: is still needed? 527 528 private val loadWakeUp = issueQueues.filter(_.params.LdExuCnt > 0).map(_.asInstanceOf[IssueQueueMemAddrImp].io.memIO.get.loadWakeUp).flatten 529 require(loadWakeUp.length == io.fromMem.get.wakeup.length) 530 loadWakeUp.zip(io.fromMem.get.wakeup).foreach(x => x._1 := x._2) 531 532 memAddrIQs.zipWithIndex.foreach { case (iq, i) => 533 iq.io.flush <> io.fromCtrlBlock.flush 534 iq.io.enq <> dispatch2Iq.io.out(i) 535 if (!iq.params.needLoadDependency) { 536 iq.io.enq.map(x => x.bits.srcLoadDependency := 0.U.asTypeOf(x.bits.srcLoadDependency)) 537 } 538 iq.io.wakeupFromWB.zip( 539 wakeupFromIntWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromIntWBPort.keys.toSeq.contains(x._2)).map(_._1) ++ 540 wakeupFromFpWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromFpWBPort.keys.toSeq.contains(x._2)).map(_._1) ++ 541 wakeupFromVfWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromVfWBPort.keys.toSeq.contains(x._2)).map(_._1) ++ 542 wakeupFromV0WBVec.zipWithIndex.filter(x => iq.params.needWakeupFromV0WBPort.keys.toSeq.contains(x._2)).map(_._1) ++ 543 wakeupFromVlWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromVlWBPort.keys.toSeq.contains(x._2)).map(_._1) 544 ).foreach{ case (sink, source) => sink := source} 545 } 546 547 ldAddrIQs.zipWithIndex.foreach { 548 case (imp: IssueQueueMemAddrImp, i) => 549 imp.io.memIO.get.feedbackIO.head := 0.U.asTypeOf(imp.io.memIO.get.feedbackIO.head) 550 imp.io.memIO.get.checkWait.stIssuePtr := io.fromMem.get.stIssuePtr 551 imp.io.memIO.get.checkWait.memWaitUpdateReq := io.fromMem.get.memWaitUpdateReq 552 case _ => 553 } 554 555 stAddrIQs.zipWithIndex.foreach { 556 case (imp: IssueQueueMemAddrImp, i) => 557 imp.io.memIO.get.feedbackIO.head := io.fromMem.get.staFeedback(i) 558 imp.io.memIO.get.checkWait.stIssuePtr := io.fromMem.get.stIssuePtr 559 imp.io.memIO.get.checkWait.memWaitUpdateReq := io.fromMem.get.memWaitUpdateReq 560 case _ => 561 } 562 563 hyuIQs.zip(hyuIQIdxs).foreach { 564 case (imp: IssueQueueMemAddrImp, idx) => 565 imp.io.memIO.get.feedbackIO.head := io.fromMem.get.hyuFeedback.head 566 imp.io.memIO.get.feedbackIO(1) := 0.U.asTypeOf(imp.io.memIO.get.feedbackIO(1)) 567 imp.io.memIO.get.checkWait.stIssuePtr := io.fromMem.get.stIssuePtr 568 imp.io.memIO.get.checkWait.memWaitUpdateReq := io.fromMem.get.memWaitUpdateReq 569 // TODO: refactor ditry code 570 imp.io.deqDelay(1).ready := false.B 571 io.toDataPathAfterDelay(idx)(1).valid := false.B 572 io.toDataPathAfterDelay(idx)(1).bits := 0.U.asTypeOf(io.toDataPathAfterDelay(idx)(1).bits) 573 case _ => 574 } 575 576 private val staIdxSeq = (stAddrIQs).map(iq => iq.params.idxInSchBlk) 577 private val hyaIdxSeq = (hyuIQs).map(iq => iq.params.idxInSchBlk) 578 579 println(s"[SchedulerMemImp] sta iq idx in memSchdBlock: $staIdxSeq") 580 println(s"[SchedulerMemImp] hya iq idx in memSchdBlock: $hyaIdxSeq") 581 582 private val staEnqs = stAddrIQs.map(_.io.enq).flatten 583 private val stdEnqs = stDataIQs.map(_.io.enq).flatten.take(staEnqs.size) 584 private val hyaEnqs = hyuIQs.map(_.io.enq).flatten 585 private val hydEnqs = stDataIQs.map(_.io.enq).flatten.drop(staEnqs.size) 586 587 require(staEnqs.size == stdEnqs.size, s"number of enq ports of store address IQs(${staEnqs.size}) " + 588 s"should be equal to number of enq ports of store data IQs(${stdEnqs.size})") 589 590 require(hyaEnqs.size == hydEnqs.size, s"number of enq ports of hybrid address IQs(${hyaEnqs.size}) " + 591 s"should be equal to number of enq ports of hybrid data IQs(${hydEnqs.size})") 592 593 val d2IqStaOut = dispatch2Iq.io.out.zipWithIndex.filter(staIdxSeq contains _._2).unzip._1.flatten 594 d2IqStaOut.zip(staEnqs).zip(stdEnqs).foreach{ case((dp, staIQ), stdIQ) => 595 val isAllReady = staIQ.ready && stdIQ.ready 596 dp.ready := isAllReady 597 staIQ.valid := dp.valid && isAllReady 598 stdIQ.valid := dp.valid && isAllReady && FuType.FuTypeOrR(dp.bits.fuType, FuType.stu, FuType.mou) 599 } 600 601 val d2IqHyaOut = dispatch2Iq.io.out.zipWithIndex.filter(hyaIdxSeq contains _._2).unzip._1.flatten 602 d2IqHyaOut.zip(hyaEnqs).zip(hydEnqs).foreach{ case((dp, hyaIQ), hydIQ) => 603 val isAllReady = hyaIQ.ready && hydIQ.ready 604 dp.ready := isAllReady 605 hyaIQ.valid := dp.valid && isAllReady 606 hydIQ.valid := dp.valid && isAllReady && FuType.FuTypeOrR(dp.bits.fuType, FuType.stu, FuType.mou) 607 } 608 609 stDataIQs.zipWithIndex.foreach { case (iq, i) => 610 iq.io.flush <> io.fromCtrlBlock.flush 611 iq.io.wakeupFromWB.zip( 612 wakeupFromIntWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromIntWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++ 613 wakeupFromFpWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromFpWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++ 614 wakeupFromVfWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromVfWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++ 615 wakeupFromV0WBVec.zipWithIndex.filter(x => iq.params.needWakeupFromV0WBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++ 616 wakeupFromVlWBVec.zipWithIndex.filter(x => iq.params.needWakeupFromVlWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq 617 ).foreach{ case (sink, source) => sink := source} 618 } 619 620 (stdEnqs ++ hydEnqs).zip(staEnqs ++ hyaEnqs).zipWithIndex.foreach { case ((stdIQEnq, staIQEnq), i) => 621 stdIQEnq.bits := staIQEnq.bits 622 // Store data reuses store addr src(1) in dispatch2iq 623 // [dispatch2iq] --src*------src*(0)--> [staIQ|hyaIQ] 624 // \ 625 // ---src*(1)--> [stdIQ] 626 // Since the src(1) of sta is easier to get, stdIQEnq.bits.src*(0) is assigned to staIQEnq.bits.src*(1) 627 // instead of dispatch2Iq.io.out(x).bits.src*(1) 628 val stdIdx = 1 629 stdIQEnq.bits.srcState(0) := staIQEnq.bits.srcState(stdIdx) 630 stdIQEnq.bits.srcLoadDependency(0) := staIQEnq.bits.srcLoadDependency(stdIdx) 631 stdIQEnq.bits.srcType(0) := staIQEnq.bits.srcType(stdIdx) 632 stdIQEnq.bits.psrc(0) := staIQEnq.bits.psrc(stdIdx) 633 stdIQEnq.bits.sqIdx := staIQEnq.bits.sqIdx 634 stdIQEnq.bits.useRegCache(0) := staIQEnq.bits.useRegCache(stdIdx) 635 stdIQEnq.bits.regCacheIdx(0) := staIQEnq.bits.regCacheIdx(stdIdx) 636 } 637 638 vecMemIQs.foreach { 639 case imp: IssueQueueVecMemImp => 640 imp.io.memIO.get.sqDeqPtr.foreach(_ := io.fromMem.get.sqDeqPtr) 641 imp.io.memIO.get.lqDeqPtr.foreach(_ := io.fromMem.get.lqDeqPtr) 642 // not used 643 //imp.io.memIO.get.feedbackIO.head := io.fromMem.get.vstuFeedback.head // only vector store replay 644 // maybe not used 645 imp.io.memIO.get.checkWait.stIssuePtr := io.fromMem.get.stIssuePtr 646 imp.io.memIO.get.checkWait.memWaitUpdateReq := io.fromMem.get.memWaitUpdateReq 647 imp.io.wakeupFromWB.zip( 648 wakeupFromIntWBVec.zipWithIndex.filter(x => imp.params.needWakeupFromIntWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++ 649 wakeupFromFpWBVec.zipWithIndex.filter(x => imp.params.needWakeupFromFpWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++ 650 wakeupFromVfWBVec.zipWithIndex.filter(x => imp.params.needWakeupFromVfWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++ 651 wakeupFromV0WBVec.zipWithIndex.filter(x => imp.params.needWakeupFromV0WBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq ++ 652 wakeupFromVlWBVec.zipWithIndex.filter(x => imp.params.needWakeupFromVlWBPort.keys.toSeq.contains(x._2)).map(_._1).toSeq 653 ).foreach{ case (sink, source) => sink := source} 654 655 case _ => 656 } 657 val vecMemFeedbackIO: Seq[MemRSFeedbackIO] = vecMemIQs.map { 658 case imp: IssueQueueVecMemImp => 659 imp.io.memIO.get.feedbackIO 660 }.flatten 661 assert(vecMemFeedbackIO.size == io.fromMem.get.vstuFeedback.size, "vecMemFeedback size dont match!") 662 vecMemFeedbackIO.zip(io.fromMem.get.vstuFeedback).foreach{ 663 case (sink, source) => 664 sink := source 665 } 666 667 val lsqEnqCtrl = Module(new LsqEnqCtrl) 668 669 lsqEnqCtrl.io.redirect <> io.fromCtrlBlock.flush 670 lsqEnqCtrl.io.enq <> dispatch2Iq.io.enqLsqIO.get 671 lsqEnqCtrl.io.lcommit := io.fromMem.get.lcommit 672 lsqEnqCtrl.io.scommit := io.fromMem.get.scommit 673 lsqEnqCtrl.io.lqCancelCnt := io.fromMem.get.lqCancelCnt 674 lsqEnqCtrl.io.sqCancelCnt := io.fromMem.get.sqCancelCnt 675 dispatch2Iq.io.lqFreeCount.get := lsqEnqCtrl.io.lqFreeCount 676 dispatch2Iq.io.sqFreeCount.get := lsqEnqCtrl.io.sqFreeCount 677 io.memIO.get.lsqEnqIO <> lsqEnqCtrl.io.enqLsq 678 679 dontTouch(io.vecLoadIssueResp) 680 681 val intBusyTablePerf = intBusyTable.get 682 val fpBusyTablePerf = fpBusyTable.get 683 val vecBusyTablePerf = vfBusyTable.get 684 val v0BusyTablePerf = v0BusyTable.get 685 val vlBusyTablePerf = vlBusyTable.get 686 687 val perfEvents = basePerfEvents ++ Seq(intBusyTablePerf, fpBusyTablePerf, vecBusyTablePerf, v0BusyTablePerf, vlBusyTablePerf).flatten(_.getPerfEvents) 688 generatePerfEvent() 689} 690