1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.frontend.icache 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import freechips.rocketchip.diplomacy.IdRange 23import freechips.rocketchip.tilelink.ClientStates._ 24import freechips.rocketchip.tilelink.TLPermissions._ 25import freechips.rocketchip.tilelink._ 26import xiangshan._ 27import huancun.{AliasKey, DirtyKey} 28import xiangshan.cache._ 29import utils._ 30 31 32abstract class ICacheMissUnitModule(implicit p: Parameters) extends XSModule 33 with HasICacheParameters 34 35abstract class ICacheMissUnitBundle(implicit p: Parameters) extends XSBundle 36 with HasICacheParameters 37 38class ICacheMissReq(implicit p: Parameters) extends ICacheBundle 39{ 40 val paddr = UInt(PAddrBits.W) 41 val vaddr = UInt(VAddrBits.W) 42 val waymask = UInt(nWays.W) 43 val coh = new ClientMetadata 44 45 def getVirSetIdx = get_idx(vaddr) 46 def getPhyTag = get_phy_tag(paddr) 47} 48 49 50class ICacheMissResp(implicit p: Parameters) extends ICacheBundle 51{ 52 val data = UInt(blockBits.W) 53 val corrupt = Bool() 54} 55 56class ICacheMissBundle(implicit p: Parameters) extends ICacheBundle{ 57 val req = Vec(2, Flipped(DecoupledIO(new ICacheMissReq))) 58 val resp = Vec(2,ValidIO(new ICacheMissResp)) 59 val flush = Input(Bool()) 60} 61 62 63class ICacheMissEntry(edge: TLEdgeOut, id: Int)(implicit p: Parameters) extends ICacheMissUnitModule 64 with MemoryOpConstants 65{ 66 val io = IO(new Bundle { 67 val id = Input(UInt(log2Ceil(PortNumber).W)) 68 69 val req = Flipped(DecoupledIO(new ICacheMissReq)) 70 val resp = ValidIO(new ICacheMissResp) 71 72 //tilelink channel 73 val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle)) 74 val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle))) 75 val mem_finish = DecoupledIO(new TLBundleE(edge.bundle)) 76 77 val meta_write = DecoupledIO(new ICacheMetaWriteBundle) 78 val data_write = DecoupledIO(new ICacheDataWriteBundle) 79 80 val release_req = DecoupledIO(new ReplacePipeReq) 81 val release_resp = Flipped(ValidIO(UInt(ReplaceIdWid.W))) 82 val victimInfor = Output(new ICacheVictimInfor()) 83 84 val toPrefetch = ValidIO(UInt(PAddrBits.W)) 85 86 }) 87 88 /** default value for control signals */ 89 io.resp := DontCare 90 io.mem_acquire.bits := DontCare 91 io.mem_grant.ready := true.B 92 io.meta_write.bits := DontCare 93 io.data_write.bits := DontCare 94 95 val s_idle :: s_send_mem_aquire :: s_wait_mem_grant :: s_write_back :: s_send_grant_ack :: s_send_replace :: s_wait_replace :: s_wait_resp :: Nil = Enum(8) 96 val state = RegInit(s_idle) 97 /** control logic transformation */ 98 //request register 99 val req = Reg(new ICacheMissReq) 100 val req_idx = req.getVirSetIdx //virtual index 101 val req_tag = req.getPhyTag //physical tag 102 val req_waymask = req.waymask 103 val release_id = Cat(MainPipeKey.U, id.U) 104 val req_corrupt = RegInit(false.B) 105 106 io.victimInfor.valid := state === s_send_replace || state === s_wait_replace || state === s_wait_resp 107 io.victimInfor.vidx := req_idx 108 109 val (_, _, refill_done, refill_address_inc) = edge.addr_inc(io.mem_grant) 110 111 //cacheline register 112 val readBeatCnt = Reg(UInt(log2Up(refillCycles).W)) 113 val respDataReg = Reg(Vec(refillCycles, UInt(beatBits.W))) 114 115 //initial 116 io.resp.bits := DontCare 117 io.mem_acquire.bits := DontCare 118 io.mem_grant.ready := true.B 119 io.meta_write.bits := DontCare 120 io.data_write.bits := DontCare 121 122 io.release_req.bits.paddr := req.paddr 123 io.release_req.bits.vaddr := req.vaddr 124 io.release_req.bits.voluntary := true.B 125 io.release_req.bits.waymask := req.waymask 126 io.release_req.bits.needData := false.B 127 io.release_req.bits.id := release_id 128 io.release_req.bits.param := DontCare //release will not care tilelink param 129 130 io.req.ready := (state === s_idle) 131 io.mem_acquire.valid := (state === s_send_mem_aquire) 132 io.release_req.valid := (state === s_send_replace) 133 134 io.toPrefetch.valid := (state =/= s_idle) 135 io.toPrefetch.bits := addrAlign(req.paddr, blockBytes, PAddrBits) 136 137 val grantack = RegEnable(edge.GrantAck(io.mem_grant.bits), io.mem_grant.fire()) 138 val grant_param = Reg(UInt(TLPermissions.bdWidth.W)) 139 val is_dirty = RegInit(false.B) 140 val is_grant = RegEnable(edge.isRequest(io.mem_grant.bits), io.mem_grant.fire()) 141 142 //state change 143 switch(state) { 144 is(s_idle) { 145 when(io.req.fire()) { 146 readBeatCnt := 0.U 147 state := s_send_mem_aquire 148 req := io.req.bits 149 } 150 } 151 152 // memory request 153 is(s_send_mem_aquire) { 154 when(io.mem_acquire.fire()) { 155 state := s_wait_mem_grant 156 } 157 } 158 159 is(s_wait_mem_grant) { 160 when(edge.hasData(io.mem_grant.bits)) { 161 when(io.mem_grant.fire()) { 162 readBeatCnt := readBeatCnt + 1.U 163 respDataReg(readBeatCnt) := io.mem_grant.bits.data 164 req_corrupt := io.mem_grant.bits.corrupt 165 grant_param := io.mem_grant.bits.param 166 is_dirty := io.mem_grant.bits.echo.lift(DirtyKey).getOrElse(false.B) 167 when(readBeatCnt === (refillCycles - 1).U) { 168 assert(refill_done, "refill not done!") 169 state := s_send_grant_ack 170 } 171 } 172 } 173 } 174 175 is(s_send_grant_ack) { 176 when(io.mem_finish.fire()) { 177 state := s_send_replace 178 } 179 } 180 181 is(s_send_replace){ 182 when(io.release_req.fire()){ 183 state := s_wait_replace 184 } 185 } 186 187 is(s_wait_replace){ 188 when(io.release_resp.valid && io.release_resp.bits === release_id){ 189 state := s_write_back 190 } 191 } 192 193 is(s_write_back) { 194 state := Mux(io.meta_write.fire() && io.data_write.fire(), s_wait_resp, s_write_back) 195 } 196 197 is(s_wait_resp) { 198 io.resp.bits.data := respDataReg.asUInt 199 io.resp.bits.corrupt := req_corrupt 200 when(io.resp.fire()) { 201 state := s_idle 202 } 203 } 204 } 205 206 /** refill write and meta write */ 207 val missCoh = ClientMetadata(Nothing) 208 val grow_param = missCoh.onAccess(M_XRD)._2 209 val acquireBlock = edge.AcquireBlock( 210 fromSource = io.id, 211 toAddress = addrAlign(req.paddr, blockBytes, PAddrBits), 212 lgSize = (log2Up(cacheParams.blockBytes)).U, 213 growPermissions = grow_param 214 )._2 215 io.mem_acquire.bits := acquireBlock 216 // resolve cache alias by L2 217 io.mem_acquire.bits.user.lift(AliasKey).foreach(_ := req.vaddr(13, 12)) 218 require(nSets <= 256) // icache size should not be more than 128KB 219 220 /** Grant ACK */ 221 io.mem_finish.valid := (state === s_send_grant_ack) && is_grant 222 io.mem_finish.bits := grantack 223 224 //resp to ifu 225 io.resp.valid := state === s_wait_resp 226 /** update coh meta */ 227 def missCohGen(param: UInt, dirty: Bool): UInt = { 228 MuxLookup(Cat(param, dirty), Nothing, Seq( 229 Cat(toB, false.B) -> Branch, 230 Cat(toB, true.B) -> Branch, 231 Cat(toT, false.B) -> Trunk, 232 Cat(toT, true.B) -> Dirty)) 233 } 234 235 val miss_new_coh = ClientMetadata(missCohGen(grant_param, is_dirty)) 236 237 io.meta_write.valid := (state === s_write_back) 238 io.meta_write.bits.generate(tag = req_tag, coh = miss_new_coh, idx = req_idx, waymask = req_waymask, bankIdx = req_idx(0)) 239 240 io.data_write.valid := (state === s_write_back) 241 io.data_write.bits.generate(data = respDataReg.asUInt, idx = req_idx, waymask = req_waymask, bankIdx = req_idx(0)) 242 243 XSPerfAccumulate( 244 "entryPenalty" + Integer.toString(id, 10), 245 BoolStopWatch( 246 start = io.req.fire(), 247 stop = io.resp.valid, 248 startHighPriority = true) 249 ) 250 XSPerfAccumulate("entryReq" + Integer.toString(id, 10), io.req.fire()) 251 252} 253 254 255class ICacheMissUnit(edge: TLEdgeOut)(implicit p: Parameters) extends ICacheMissUnitModule 256{ 257 val io = IO(new Bundle{ 258 val req = Vec(2, Flipped(DecoupledIO(new ICacheMissReq))) 259 val resp = Vec(2, ValidIO(new ICacheMissResp)) 260 261 val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle)) 262 val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle))) 263 val mem_finish = DecoupledIO(new TLBundleE(edge.bundle)) 264 265 val meta_write = DecoupledIO(new ICacheMetaWriteBundle) 266 val data_write = DecoupledIO(new ICacheDataWriteBundle) 267 268 val release_req = DecoupledIO(new ReplacePipeReq) 269 val release_resp = Flipped(ValidIO(UInt(ReplaceIdWid.W))) 270 271 val victimInfor = Vec(PortNumber, Output(new ICacheVictimInfor())) 272 273 val prefetch_req = Flipped(DecoupledIO(new PIQReq)) 274 val prefetch_check = Vec(PortNumber,ValidIO(UInt(PAddrBits.W))) 275 276 277 }) 278 // assign default values to output signals 279 io.mem_grant.ready := false.B 280 281 val meta_write_arb = Module(new Arbiter(new ICacheMetaWriteBundle, PortNumber)) 282 val refill_arb = Module(new Arbiter(new ICacheDataWriteBundle, PortNumber)) 283 val release_arb = Module(new Arbiter(new ReplacePipeReq, PortNumber)) 284 285 io.mem_grant.ready := true.B 286 287 val entries = (0 until PortNumber) map { i => 288 val entry = Module(new ICacheMissEntry(edge, i)) 289 290 entry.io.id := i.U 291 292 // entry req 293 entry.io.req.valid := io.req(i).valid 294 entry.io.req.bits := io.req(i).bits 295 io.req(i).ready := entry.io.req.ready 296 297 // entry resp 298 meta_write_arb.io.in(i) <> entry.io.meta_write 299 refill_arb.io.in(i) <> entry.io.data_write 300 release_arb.io.in(i) <> entry.io.release_req 301 302 entry.io.mem_grant.valid := false.B 303 entry.io.mem_grant.bits := DontCare 304 when (io.mem_grant.bits.source === i.U) { 305 entry.io.mem_grant <> io.mem_grant 306 } 307 308 io.resp(i) <> entry.io.resp 309 310 io.victimInfor(i) := entry.io.victimInfor 311 io.prefetch_check(i) <> entry.io.toPrefetch 312 313 entry.io.release_resp <> io.release_resp 314 315 XSPerfAccumulate( 316 "entryPenalty" + Integer.toString(i, 10), 317 BoolStopWatch( 318 start = entry.io.req.fire(), 319 stop = entry.io.resp.fire(), 320 startHighPriority = true) 321 ) 322 XSPerfAccumulate("entryReq" + Integer.toString(i, 10), entry.io.req.fire()) 323 324 entry 325 } 326 327 val alloc = Wire(UInt(log2Ceil(nPrefetchEntries).W)) 328 329 val prefEntries = (PortNumber until PortNumber + nPrefetchEntries) map { i => 330 val prefetchEntry = Module(new IPrefetchEntry(edge, PortNumber)) 331 332 prefetchEntry.io.mem_hint_ack.valid := false.B 333 prefetchEntry.io.mem_hint_ack.bits := DontCare 334 335 when(io.mem_grant.bits.source === PortNumber.U) { 336 prefetchEntry.io.mem_hint_ack <> io.mem_grant 337 } 338 339 prefetchEntry.io.req.valid := io.prefetch_req.valid && ((i-PortNumber).U === alloc) 340 prefetchEntry.io.req.bits := io.prefetch_req.bits 341 342 prefetchEntry.io.id := i.U 343 344 prefetchEntry 345 } 346 347 alloc := PriorityEncoder(prefEntries.map(_.io.req.ready)) 348 io.prefetch_req.ready := ParallelOR(prefEntries.map(_.io.req.ready)) 349 val tl_a_chanel = entries.map(_.io.mem_acquire) ++ prefEntries.map(_.io.mem_hint) 350 TLArbiter.lowest(edge, io.mem_acquire, tl_a_chanel:_*) 351 352 TLArbiter.lowest(edge, io.mem_finish, entries.map(_.io.mem_finish):_*) 353 354 io.meta_write <> meta_write_arb.io.out 355 io.data_write <> refill_arb.io.out 356 io.release_req <> release_arb.io.out 357 358 (0 until nWays).map{ w => 359 XSPerfAccumulate("line_0_refill_way_" + Integer.toString(w, 10), entries(0).io.meta_write.valid && OHToUInt(entries(0).io.meta_write.bits.waymask) === w.U) 360 XSPerfAccumulate("line_1_refill_way_" + Integer.toString(w, 10), entries(1).io.meta_write.valid && OHToUInt(entries(1).io.meta_write.bits.waymask) === w.U) 361 } 362 363} 364 365 366 367