1package xiangshan.mem.prefetch 2 3import org.chipsalliance.cde.config.Parameters 4import freechips.rocketchip.util.ValidPseudoLRU 5import chisel3._ 6import chisel3.util._ 7import xiangshan._ 8import utils._ 9import utility._ 10import xiangshan.cache.HasDCacheParameters 11import xiangshan.cache.mmu._ 12import xiangshan.mem.{L1PrefetchReq, LdPrefetchTrainBundle} 13import xiangshan.mem.trace._ 14import xiangshan.mem.L1PrefetchSource 15 16trait HasL1PrefetchHelper extends HasCircularQueuePtrHelper with HasDCacheParameters { 17 // region related 18 val REGION_SIZE = 1024 19 val PAGE_OFFSET = 12 20 val BLOCK_OFFSET = log2Up(dcacheParameters.blockBytes) 21 val BIT_VEC_WITDH = REGION_SIZE / dcacheParameters.blockBytes 22 val REGION_BITS = log2Up(BIT_VEC_WITDH) 23 val REGION_TAG_OFFSET = BLOCK_OFFSET + REGION_BITS 24 val REGION_TAG_BITS = VAddrBits - BLOCK_OFFSET - REGION_BITS 25 26 // hash related 27 val VADDR_HASH_WIDTH = 5 28 val BLK_ADDR_RAW_WIDTH = 10 29 val HASH_TAG_WIDTH = VADDR_HASH_WIDTH + BLK_ADDR_RAW_WIDTH 30 31 // capacity related 32 val MLP_SIZE = 32 33 val MLP_L1_SIZE = 16 34 val MLP_L2L3_SIZE = MLP_SIZE - MLP_L1_SIZE 35 36 // prefetch sink related 37 val SINK_BITS = 2 38 def SINK_L1 = "b00".U 39 def SINK_L2 = "b01".U 40 def SINK_L3 = "b10".U 41 42 // vaddr: | region tag | region bits | block offset | 43 def get_region_tag(vaddr: UInt) = { 44 require(vaddr.getWidth == VAddrBits) 45 vaddr(vaddr.getWidth - 1, REGION_TAG_OFFSET) 46 } 47 48 def get_region_bits(vaddr: UInt) = { 49 require(vaddr.getWidth == VAddrBits) 50 vaddr(REGION_TAG_OFFSET - 1, BLOCK_OFFSET) 51 } 52 53 def block_addr(x: UInt): UInt = { 54 x(x.getWidth - 1, BLOCK_OFFSET) 55 } 56 57 def vaddr_hash(x: UInt): UInt = { 58 val width = VADDR_HASH_WIDTH 59 val low = x(width - 1, 0) 60 val mid = x(2 * width - 1, width) 61 val high = x(3 * width - 1, 2 * width) 62 low ^ mid ^ high 63 } 64 65 def pc_hash_tag(x: UInt): UInt = { 66 val low = x(BLK_ADDR_RAW_WIDTH - 1, 0) 67 val high = x(BLK_ADDR_RAW_WIDTH - 1 + 3 * VADDR_HASH_WIDTH, BLK_ADDR_RAW_WIDTH) 68 val high_hash = vaddr_hash(high) 69 Cat(high_hash, low) 70 } 71 72 def block_hash_tag(x: UInt): UInt = { 73 val blk_addr = block_addr(x) 74 val low = blk_addr(BLK_ADDR_RAW_WIDTH - 1, 0) 75 val high = blk_addr(BLK_ADDR_RAW_WIDTH - 1 + 3 * VADDR_HASH_WIDTH, BLK_ADDR_RAW_WIDTH) 76 val high_hash = vaddr_hash(high) 77 Cat(high_hash, low) 78 } 79 80 def region_hash_tag(region_tag: UInt): UInt = { 81 val low = region_tag(BLK_ADDR_RAW_WIDTH - 1, 0) 82 val high = region_tag(BLK_ADDR_RAW_WIDTH - 1 + 3 * VADDR_HASH_WIDTH, BLK_ADDR_RAW_WIDTH) 83 val high_hash = vaddr_hash(high) 84 Cat(high_hash, low) 85 } 86 87 def region_to_block_addr(region_tag: UInt, region_bits: UInt): UInt = { 88 Cat(region_tag, region_bits) 89 } 90 91 def get_candidate_oh(x: UInt): UInt = { 92 require(x.getWidth == PAddrBits) 93 UIntToOH(x(REGION_BITS + BLOCK_OFFSET - 1, BLOCK_OFFSET)) 94 } 95 96 def toBinary(n: Int): String = n match { 97 case 0|1 => s"$n" 98 case _ => s"${toBinary(n/2)}${n%2}" 99 } 100} 101 102trait HasTrainFilterHelper extends HasCircularQueuePtrHelper { 103 def reorder[T <: LdPrefetchTrainBundle](source: Vec[ValidIO[T]]): Vec[ValidIO[T]] = { 104 if(source.length == 1) { 105 source 106 }else if(source.length == 2) { 107 val source_v = source.map(_.valid) 108 val res = Wire(source.cloneType) 109 // source 1 is older than source 0 (only when source0/1 are both valid) 110 val source_1_older = Mux(Cat(source_v).andR, 111 isBefore(source(1).bits.uop.robIdx, source(0).bits.uop.robIdx), 112 false.B 113 ) 114 when(source_1_older) { 115 res(0) := source(1) 116 res(1) := source(0) 117 }.otherwise { 118 res := source 119 } 120 121 res 122 }else if(source.length == 3) { 123 // TODO: generalize 124 val res_0_1 = Reg(source.cloneType) 125 val res_1_2 = Reg(source.cloneType) 126 val res = Reg(source.cloneType) 127 128 val tmp = reorder(VecInit(source.slice(0, 2))) 129 res_0_1(0) := tmp(0) 130 res_0_1(1) := tmp(1) 131 res_0_1(2) := source(2) 132 val tmp_1 = reorder(VecInit(res_0_1.slice(1, 3))) 133 res_1_2(0) := res_0_1(0) 134 res_1_2(1) := tmp_1(0) 135 res_1_2(2) := tmp_1(1) 136 val tmp_2 = reorder(VecInit(res_1_2.slice(0, 2))) 137 res(0) := tmp_2(0) 138 res(1) := tmp_2(1) 139 res(2) := res_1_2(2) 140 141 res 142 }else { 143 require(false, "for now, 4 or more sources are invalid") 144 source 145 } 146 } 147} 148 149// get prefetch train reqs from `exuParameters.LduCnt` load pipelines (up to `exuParameters.LduCnt`/cycle) 150// filter by cache line address, send out train req to stride (up to 1 req/cycle) 151class TrainFilter(size: Int, name: String)(implicit p: Parameters) extends XSModule with HasL1PrefetchHelper with HasTrainFilterHelper { 152 val io = IO(new Bundle() { 153 val enable = Input(Bool()) 154 val flush = Input(Bool()) 155 // train input, only from load for now 156 val ld_in = Flipped(Vec(backendParams.LduCnt, ValidIO(new LdPrefetchTrainBundle()))) 157 // filter out 158 val train_req = DecoupledIO(new PrefetchReqBundle()) 159 }) 160 161 class Ptr(implicit p: Parameters) extends CircularQueuePtr[Ptr]( p => size ){} 162 object Ptr { 163 def apply(f: Bool, v: UInt)(implicit p: Parameters): Ptr = { 164 val ptr = Wire(new Ptr) 165 ptr.flag := f 166 ptr.value := v 167 ptr 168 } 169 } 170 171 val entries = RegInit(VecInit(Seq.fill(size){ (0.U.asTypeOf(new PrefetchReqBundle())) })) 172 val valids = RegInit(VecInit(Seq.fill(size){ (false.B) })) 173 174 // enq 175 val enqLen = backendParams.LduCnt 176 val enqPtrExt = RegInit(VecInit((0 until enqLen).map(_.U.asTypeOf(new Ptr)))) 177 val deqPtrExt = RegInit(0.U.asTypeOf(new Ptr)) 178 179 val deqPtr = WireInit(deqPtrExt.value) 180 181 require(size >= enqLen) 182 183 val ld_in_reordered = reorder(io.ld_in) 184 val reqs_l = ld_in_reordered.map(_.bits.asPrefetchReqBundle()) 185 val reqs_vl = ld_in_reordered.map(_.valid) 186 val needAlloc = Wire(Vec(enqLen, Bool())) 187 val canAlloc = Wire(Vec(enqLen, Bool())) 188 189 for(i <- (0 until enqLen)) { 190 val req = reqs_l(i) 191 val req_v = reqs_vl(i) 192 val index = PopCount(needAlloc.take(i)) 193 val allocPtr = enqPtrExt(index) 194 val entry_match = Cat(entries.zip(valids).map { 195 case(e, v) => v && block_hash_tag(e.vaddr) === block_hash_tag(req.vaddr) 196 }).orR 197 val prev_enq_match = if(i == 0) false.B else Cat(reqs_l.zip(reqs_vl).take(i).map { 198 case(pre, pre_v) => pre_v && block_hash_tag(pre.vaddr) === block_hash_tag(req.vaddr) 199 }).orR 200 201 needAlloc(i) := req_v && !entry_match && !prev_enq_match 202 canAlloc(i) := needAlloc(i) && allocPtr >= deqPtrExt && io.enable 203 204 when(canAlloc(i)) { 205 valids(allocPtr.value) := true.B 206 entries(allocPtr.value) := req 207 } 208 } 209 val allocNum = PopCount(canAlloc) 210 211 enqPtrExt.foreach{case x => when(canAlloc.asUInt.orR) {x := x + allocNum} } 212 213 // deq 214 io.train_req.valid := false.B 215 io.train_req.bits := DontCare 216 valids.zip(entries).zipWithIndex.foreach { 217 case((valid, entry), i) => { 218 when(deqPtr === i.U) { 219 io.train_req.valid := valid && io.enable 220 io.train_req.bits := entry 221 } 222 } 223 } 224 225 when(io.train_req.fire) { 226 valids(deqPtr) := false.B 227 deqPtrExt := deqPtrExt + 1.U 228 } 229 230 when(RegNext(io.flush)) { 231 valids.foreach {case valid => valid := false.B} 232 (0 until enqLen).map {case i => enqPtrExt(i) := i.U.asTypeOf(new Ptr)} 233 deqPtrExt := 0.U.asTypeOf(new Ptr) 234 } 235 236 XSPerfAccumulate(s"${name}_train_filter_full", PopCount(valids) === size.U) 237 XSPerfAccumulate(s"${name}_train_filter_half", PopCount(valids) >= (size / 2).U) 238 XSPerfAccumulate(s"${name}_train_filter_empty", PopCount(valids) === 0.U) 239 240 val raw_enq_pattern = Cat(reqs_vl) 241 val filtered_enq_pattern = Cat(needAlloc) 242 val actual_enq_pattern = Cat(canAlloc) 243 XSPerfAccumulate(s"${name}_train_filter_enq", allocNum > 0.U) 244 XSPerfAccumulate(s"${name}_train_filter_deq", io.train_req.fire) 245 for(i <- 0 until (1 << enqLen)) { 246 XSPerfAccumulate(s"${name}_train_filter_raw_enq_pattern_${toBinary(i)}", raw_enq_pattern === i.U) 247 XSPerfAccumulate(s"${name}_train_filter_filtered_enq_pattern_${toBinary(i)}", filtered_enq_pattern === i.U) 248 XSPerfAccumulate(s"${name}_train_filter_actual_enq_pattern_${toBinary(i)}", actual_enq_pattern === i.U) 249 } 250} 251 252class MLPReqFilterBundle(implicit p: Parameters) extends XSBundle with HasL1PrefetchHelper { 253 val valid = Bool() 254 val tag = UInt(HASH_TAG_WIDTH.W) 255 val region = UInt(REGION_TAG_BITS.W) 256 val bit_vec = UInt(BIT_VEC_WITDH.W) 257 // NOTE: l1 will not use sent_vec, for making more prefetch reqs to l1 dcache 258 val sent_vec = UInt(BIT_VEC_WITDH.W) 259 val sink = UInt(SINK_BITS.W) 260 val alias = UInt(2.W) 261 val is_vaddr = Bool() 262 val source = new L1PrefetchSource() 263 val debug_va_region = UInt(REGION_TAG_BITS.W) 264 265 def reset(index: Int) = { 266 valid := false.B 267 tag := region_hash_tag(index.U) 268 region := index.U 269 bit_vec := 0.U 270 sent_vec := 0.U 271 sink := SINK_L1 272 alias := 0.U 273 is_vaddr := false.B 274 source.value := L1_HW_PREFETCH_NULL 275 debug_va_region := 0.U 276 } 277 278 def tag_match(new_tag: UInt): Bool = { 279 require(new_tag.getWidth == HASH_TAG_WIDTH) 280 (tag === new_tag) && valid 281 } 282 283 def update(update_bit_vec: UInt, update_sink: UInt) = { 284 bit_vec := bit_vec | update_bit_vec 285 when(update_sink < sink) { 286 bit_vec := (bit_vec & ~sent_vec) | update_bit_vec 287 sink := update_sink 288 } 289 290 assert(PopCount(update_bit_vec) >= 1.U, "valid bits in update vector should greater than one") 291 } 292 293 def can_send_pf(): Bool = { 294 Mux( 295 sink === SINK_L1, 296 !is_vaddr && bit_vec.orR, 297 !is_vaddr && (bit_vec & ~sent_vec).orR 298 ) && valid 299 } 300 301 def may_be_replace(): Bool = { 302 // either invalid or has sent out all reqs out 303 !valid || RegNext(PopCount(sent_vec) === BIT_VEC_WITDH.U) 304 } 305 306 def get_pf_addr(): UInt = { 307 require(PAddrBits <= VAddrBits) 308 require((region.getWidth + REGION_BITS + BLOCK_OFFSET) == VAddrBits) 309 310 val candidate = Mux( 311 sink === SINK_L1, 312 PriorityEncoder(bit_vec).asTypeOf(UInt(REGION_BITS.W)), 313 PriorityEncoder(bit_vec & ~sent_vec).asTypeOf(UInt(REGION_BITS.W)) 314 ) 315 Cat(region, candidate, 0.U(BLOCK_OFFSET.W)) 316 } 317 318 def get_pf_debug_vaddr(): UInt = { 319 val candidate = Mux( 320 sink === SINK_L1, 321 PriorityEncoder(bit_vec).asTypeOf(UInt(REGION_BITS.W)), 322 PriorityEncoder(bit_vec & ~sent_vec).asTypeOf(UInt(REGION_BITS.W)) 323 ) 324 Cat(debug_va_region, candidate, 0.U(BLOCK_OFFSET.W)) 325 } 326 327 def get_tlb_va(): UInt = { 328 require((region.getWidth + REGION_TAG_OFFSET) == VAddrBits) 329 Cat(region, 0.U(REGION_TAG_OFFSET.W)) 330 } 331 332 def fromStreamPrefetchReqBundle(x : StreamPrefetchReqBundle): MLPReqFilterBundle = { 333 require(PAGE_OFFSET >= REGION_TAG_OFFSET, "region is greater than 4k, alias bit may be incorrect") 334 335 val res = Wire(new MLPReqFilterBundle) 336 res.valid := true.B 337 res.tag := region_hash_tag(x.region) 338 res.region := x.region 339 res.bit_vec := x.bit_vec 340 res.sent_vec := 0.U 341 res.sink := x.sink 342 res.is_vaddr := true.B 343 res.source := x.source 344 res.alias := x.region(PAGE_OFFSET - REGION_TAG_OFFSET + 1, PAGE_OFFSET - REGION_TAG_OFFSET) 345 res.debug_va_region := x.region 346 347 res 348 } 349 350 def invalidate() = { 351 // disable sending pf req 352 when(sink === SINK_L1) { 353 bit_vec := 0.U(BIT_VEC_WITDH.W) 354 }.otherwise { 355 sent_vec := ~(0.U(BIT_VEC_WITDH.W)) 356 } 357 // disable sending tlb req 358 is_vaddr := false.B 359 valid := false.B 360 } 361} 362 363// there are 5 independent pipelines inside 364// 1. prefetch enqueue 365// 2. tlb request 366// 3. actual l1 prefetch 367// 4. actual l2 prefetch 368// 5. actual l3 prefetch 369class MutiLevelPrefetchFilter(implicit p: Parameters) extends XSModule with HasL1PrefetchHelper { 370 val io = IO(new XSBundle { 371 val enable = Input(Bool()) 372 val flush = Input(Bool()) 373 val l1_prefetch_req = Flipped(ValidIO(new StreamPrefetchReqBundle)) 374 val l2_l3_prefetch_req = Flipped(ValidIO(new StreamPrefetchReqBundle)) 375 val tlb_req = new TlbRequestIO(nRespDups = 2) 376 val l1_req = DecoupledIO(new L1PrefetchReq()) 377 val l2_pf_addr = ValidIO(new L2PrefetchReq()) 378 val l3_pf_addr = ValidIO(UInt(PAddrBits.W)) // TODO: l3 pf source 379 val confidence = Input(UInt(1.W)) 380 val l2PfqBusy = Input(Bool()) 381 }) 382 383 val l1_array = Reg(Vec(MLP_L1_SIZE, new MLPReqFilterBundle)) 384 val l2_array = Reg(Vec(MLP_L2L3_SIZE, new MLPReqFilterBundle)) 385 val l1_replacement = new ValidPseudoLRU(MLP_L1_SIZE) 386 val l2_replacement = new ValidPseudoLRU(MLP_L2L3_SIZE) 387 val tlb_req_arb = Module(new RRArbiterInit(new TlbReq, MLP_SIZE)) 388 val l1_pf_req_arb = Module(new RRArbiterInit(new Bundle { 389 val req = new L1PrefetchReq 390 val debug_vaddr = UInt(VAddrBits.W) 391 }, MLP_L1_SIZE)) 392 val l2_pf_req_arb = Module(new RRArbiterInit(new Bundle { 393 val req = new L2PrefetchReq 394 val debug_vaddr = UInt(VAddrBits.W) 395 }, MLP_L2L3_SIZE)) 396 val l3_pf_req_arb = Module(new RRArbiterInit(UInt(PAddrBits.W), MLP_L2L3_SIZE)) 397 398 val l1_opt_replace_vec = VecInit(l1_array.map(s => s.may_be_replace())) 399 val l2_opt_replace_vec = VecInit(l2_array.map(s => s.may_be_replace())) 400 // if we have something to replace, then choose it, otherwise follow the plru manner 401 val l1_real_replace_vec = Mux(Cat(l1_opt_replace_vec).orR, l1_opt_replace_vec, VecInit(Seq.fill(MLP_L1_SIZE)(true.B))) 402 val l2_real_replace_vec = Mux(Cat(l2_opt_replace_vec).orR, l2_opt_replace_vec, VecInit(Seq.fill(MLP_L2L3_SIZE)(true.B))) 403 404 // l1 pf req enq 405 // s0: hash tag match 406 val s0_l1_can_accept = Wire(Bool()) 407 val s0_l1_valid = io.l1_prefetch_req.valid && s0_l1_can_accept 408 val s0_l1_region = io.l1_prefetch_req.bits.region 409 val s0_l1_region_hash = region_hash_tag(s0_l1_region) 410 val s0_l1_match_vec = l1_array.map(_.tag_match(s0_l1_region_hash)) 411 val s0_l1_hit = VecInit(s0_l1_match_vec).asUInt.orR 412 val s0_l1_index = Wire(UInt(log2Up(MLP_L1_SIZE).W)) 413 val s0_l1_prefetch_req = (new MLPReqFilterBundle).fromStreamPrefetchReqBundle(io.l1_prefetch_req.bits) 414 415 s0_l1_index := Mux(s0_l1_hit, OHToUInt(VecInit(s0_l1_match_vec).asUInt), l1_replacement.way(l1_real_replace_vec.reverse)._2) 416 417 when(s0_l1_valid) { 418 l1_replacement.access(s0_l1_index) 419 } 420 421 assert(!s0_l1_valid || PopCount(VecInit(s0_l1_match_vec)) <= 1.U, "req region should match no more than 1 entry") 422 423 XSPerfAccumulate("s0_l1_enq_fire", s0_l1_valid) 424 XSPerfAccumulate("s0_l1_enq_valid", io.l1_prefetch_req.valid) 425 XSPerfAccumulate("s0_l1_cannot_enq", io.l1_prefetch_req.valid && !s0_l1_can_accept) 426 427 // s1: alloc or update 428 val s1_l1_valid = RegNext(s0_l1_valid) 429 val s1_l1_region = RegEnable(s0_l1_region, s0_l1_valid) 430 val s1_l1_region_hash = RegEnable(s0_l1_region_hash, s0_l1_valid) 431 val s1_l1_hit = RegEnable(s0_l1_hit, s0_l1_valid) 432 val s1_l1_index = RegEnable(s0_l1_index, s0_l1_valid) 433 val s1_l1_prefetch_req = RegEnable(s0_l1_prefetch_req, s0_l1_valid) 434 val s1_l1_alloc = s1_l1_valid && !s1_l1_hit 435 val s1_l1_update = s1_l1_valid && s1_l1_hit 436 s0_l1_can_accept := !(s1_l1_valid && s1_l1_alloc && (s0_l1_region_hash === s1_l1_region_hash)) 437 438 when(s1_l1_alloc) { 439 l1_array(s1_l1_index) := s1_l1_prefetch_req 440 }.elsewhen(s1_l1_update) { 441 l1_array(s1_l1_index).update( 442 update_bit_vec = s1_l1_prefetch_req.bit_vec, 443 update_sink = s1_l1_prefetch_req.sink 444 ) 445 } 446 447 XSPerfAccumulate("s1_l1_enq_valid", s1_l1_valid) 448 XSPerfAccumulate("s1_l1_enq_alloc", s1_l1_alloc) 449 XSPerfAccumulate("s1_l1_enq_update", s1_l1_update) 450 XSPerfAccumulate("l1_hash_conflict", s0_l1_valid && RegNext(s1_l1_valid) && (s0_l1_region =/= RegNext(s1_l1_region)) && (s0_l1_region_hash === RegNext(s1_l1_region_hash))) 451 XSPerfAccumulate("s1_l1_enq_evict_useful_entry", s1_l1_alloc && l1_array(s1_l1_index).can_send_pf()) 452 453 // l2 l3 pf req enq 454 // s0: hash tag match 455 val s0_l2_can_accept = Wire(Bool()) 456 val s0_l2_valid = io.l2_l3_prefetch_req.valid && s0_l2_can_accept 457 val s0_l2_region = io.l2_l3_prefetch_req.bits.region 458 val s0_l2_region_hash = region_hash_tag(s0_l2_region) 459 val s0_l2_match_vec = l2_array.map(_.tag_match(s0_l2_region_hash)) 460 val s0_l2_hit = VecInit(s0_l2_match_vec).asUInt.orR 461 val s0_l2_index = Wire(UInt(log2Up(MLP_L2L3_SIZE).W)) 462 val s0_l2_prefetch_req = (new MLPReqFilterBundle).fromStreamPrefetchReqBundle(io.l2_l3_prefetch_req.bits) 463 464 s0_l2_index := Mux(s0_l2_hit, OHToUInt(VecInit(s0_l2_match_vec).asUInt), l2_replacement.way(l2_real_replace_vec.reverse)._2) 465 466 when(s0_l2_valid) { 467 l2_replacement.access(s0_l2_index) 468 } 469 470 assert(!s0_l2_valid || PopCount(VecInit(s0_l2_match_vec)) <= 1.U, "req region should match no more than 1 entry") 471 472 XSPerfAccumulate("s0_l2_enq_fire", s0_l2_valid) 473 XSPerfAccumulate("s0_l2_enq_valid", io.l2_l3_prefetch_req.valid) 474 XSPerfAccumulate("s0_l2_cannot_enq", io.l2_l3_prefetch_req.valid && !s0_l2_can_accept) 475 476 // s1: alloc or update 477 val s1_l2_valid = RegNext(s0_l2_valid) 478 val s1_l2_region = RegEnable(s0_l2_region, s0_l2_valid) 479 val s1_l2_region_hash = RegEnable(s0_l2_region_hash, s0_l2_valid) 480 val s1_l2_hit = RegEnable(s0_l2_hit, s0_l2_valid) 481 val s1_l2_index = RegEnable(s0_l2_index, s0_l2_valid) 482 val s1_l2_prefetch_req = RegEnable(s0_l2_prefetch_req, s0_l2_valid) 483 val s1_l2_alloc = s1_l2_valid && !s1_l2_hit 484 val s1_l2_update = s1_l2_valid && s1_l2_hit 485 s0_l2_can_accept := !(s1_l2_valid && s1_l2_alloc && (s0_l2_region_hash === s1_l2_region_hash)) 486 487 when(s1_l2_alloc) { 488 l2_array(s1_l2_index) := s1_l2_prefetch_req 489 }.elsewhen(s1_l2_update) { 490 l2_array(s1_l2_index).update( 491 update_bit_vec = s1_l2_prefetch_req.bit_vec, 492 update_sink = s1_l2_prefetch_req.sink 493 ) 494 } 495 496 XSPerfAccumulate("s1_l2_enq_valid", s1_l2_valid) 497 XSPerfAccumulate("s1_l2_enq_alloc", s1_l2_alloc) 498 XSPerfAccumulate("s1_l2_enq_update", s1_l2_update) 499 XSPerfAccumulate("l2_hash_conflict", s0_l2_valid && RegNext(s1_l2_valid) && (s0_l2_region =/= RegNext(s1_l2_region)) && (s0_l2_region_hash === RegNext(s1_l2_region_hash))) 500 XSPerfAccumulate("s1_l2_enq_evict_useful_entry", s1_l2_alloc && l2_array(s1_l2_index).can_send_pf()) 501 502 // stream pf debug db here 503 // Hit: 504 // now seens only pending = (region_bits & ~filter_bits) are the peeding request 505 // if a PfGen comes, new added request can be new_req = PfGen.region_bits & ~(pending) 506 // Alloc: 507 // new_req = PfGen.region_bits 508 val stream_pf_trace_debug_table = ChiselDB.createTable("StreamPFTrace" + p(XSCoreParamsKey).HartId.toString, new StreamPFTraceInEntry, basicDB = false) 509 for (i <- 0 until BIT_VEC_WITDH) { 510 // l1 enq log 511 val hit_entry = l1_array(s0_l1_index) 512 val new_req = Mux( 513 s0_l1_hit, 514 io.l1_prefetch_req.bits.bit_vec & ~(hit_entry.bit_vec), 515 io.l1_prefetch_req.bits.bit_vec 516 ) 517 val log_enable = s0_l1_valid && new_req(i) && (io.l1_prefetch_req.bits.source.value === L1_HW_PREFETCH_STREAM) 518 val log_data = Wire(new StreamPFTraceInEntry) 519 520 log_data.TriggerPC := io.l1_prefetch_req.bits.trigger_pc 521 log_data.TriggerVaddr := io.l1_prefetch_req.bits.trigger_va 522 log_data.PFVaddr := Cat(s0_l1_region, i.U(REGION_BITS.W), 0.U(log2Up(dcacheParameters.blockBytes).W)) 523 log_data.PFSink := s0_l1_prefetch_req.sink 524 525 stream_pf_trace_debug_table.log( 526 data = log_data, 527 en = log_enable, 528 site = "StreamPFTrace", 529 clock = clock, 530 reset = reset 531 ) 532 } 533 for (i <- 0 until BIT_VEC_WITDH) { 534 // l2 l3 enq log 535 val hit_entry = l2_array(s0_l2_index) 536 val new_req = Mux( 537 s0_l2_hit, 538 io.l2_l3_prefetch_req.bits.bit_vec & ~(hit_entry.bit_vec), 539 io.l2_l3_prefetch_req.bits.bit_vec 540 ) 541 val log_enable = s0_l2_valid && new_req(i) && (io.l2_l3_prefetch_req.bits.source.value === L1_HW_PREFETCH_STREAM) 542 val log_data = Wire(new StreamPFTraceInEntry) 543 544 log_data.TriggerPC := io.l2_l3_prefetch_req.bits.trigger_pc 545 log_data.TriggerVaddr := io.l2_l3_prefetch_req.bits.trigger_va 546 log_data.PFVaddr := Cat(s0_l2_region, i.U(REGION_BITS.W), 0.U(log2Up(dcacheParameters.blockBytes).W)) 547 log_data.PFSink := s0_l2_prefetch_req.sink 548 549 stream_pf_trace_debug_table.log( 550 data = log_data, 551 en = log_enable, 552 site = "StreamPFTrace", 553 clock = clock, 554 reset = reset 555 ) 556 } 557 558 // tlb req 559 // s0: arb all tlb reqs 560 val s0_tlb_fire_vec = VecInit((0 until MLP_SIZE).map{case i => tlb_req_arb.io.in(i).fire}) 561 val s1_tlb_fire_vec = GatedValidRegNext(s0_tlb_fire_vec) 562 val s2_tlb_fire_vec = GatedValidRegNext(s1_tlb_fire_vec) 563 564 for(i <- 0 until MLP_SIZE) { 565 val l1_evict = s1_l1_alloc && (s1_l1_index === i.U) 566 val l2_evict = s1_l2_alloc && ((s1_l2_index + MLP_L1_SIZE.U) === i.U) 567 if(i < MLP_L1_SIZE) { 568 tlb_req_arb.io.in(i).valid := l1_array(i).is_vaddr && !s1_tlb_fire_vec(i) && !s2_tlb_fire_vec(i) && !l1_evict 569 tlb_req_arb.io.in(i).bits.vaddr := l1_array(i).get_tlb_va() 570 }else { 571 tlb_req_arb.io.in(i).valid := l2_array(i - MLP_L1_SIZE).is_vaddr && !s1_tlb_fire_vec(i) && !s2_tlb_fire_vec(i) && !l2_evict 572 tlb_req_arb.io.in(i).bits.vaddr := l2_array(i - MLP_L1_SIZE).get_tlb_va() 573 } 574 tlb_req_arb.io.in(i).bits.cmd := TlbCmd.read 575 tlb_req_arb.io.in(i).bits.size := 3.U 576 tlb_req_arb.io.in(i).bits.kill := false.B 577 tlb_req_arb.io.in(i).bits.no_translate := false.B 578 tlb_req_arb.io.in(i).bits.memidx := DontCare 579 tlb_req_arb.io.in(i).bits.debug := DontCare 580 tlb_req_arb.io.in(i).bits.hlvx := DontCare 581 tlb_req_arb.io.in(i).bits.hyperinst := DontCare 582 } 583 584 assert(PopCount(s0_tlb_fire_vec) <= 1.U, "s0_tlb_fire_vec should be one-hot or empty") 585 586 // s1: send out the req 587 val s1_tlb_req_valid = GatedValidRegNext(tlb_req_arb.io.out.valid) 588 val s1_tlb_req_bits = RegEnable(tlb_req_arb.io.out.bits, tlb_req_arb.io.out.valid) 589 val s1_tlb_req_index = RegEnable(OHToUInt(s0_tlb_fire_vec.asUInt), tlb_req_arb.io.out.valid) 590 val s1_l1_tlb_evict = s1_l1_alloc && (s1_l1_index === s1_tlb_req_index) 591 val s1_l2_tlb_evict = s1_l2_alloc && ((s1_l2_index + MLP_L1_SIZE.U) === s1_tlb_req_index) 592 val s1_tlb_evict = s1_l1_tlb_evict || s1_l2_tlb_evict 593 io.tlb_req.req.valid := s1_tlb_req_valid && !s1_tlb_evict 594 io.tlb_req.req.bits := s1_tlb_req_bits 595 io.tlb_req.req_kill := false.B 596 tlb_req_arb.io.out.ready := true.B 597 598 XSPerfAccumulate("s1_tlb_req_sent", io.tlb_req.req.valid) 599 XSPerfAccumulate("s1_tlb_req_evict", s1_tlb_req_valid && s1_tlb_evict) 600 601 // s2: get response from tlb 602 val s2_tlb_resp = io.tlb_req.resp 603 val s2_tlb_update_index = RegEnable(s1_tlb_req_index, s1_tlb_req_valid) 604 val s2_l1_tlb_evict = s1_l1_alloc && (s1_l1_index === s2_tlb_update_index) 605 val s2_l2_tlb_evict = s1_l2_alloc && ((s1_l2_index + MLP_L1_SIZE.U) === s2_tlb_update_index) 606 val s2_tlb_evict = s2_l1_tlb_evict || s2_l2_tlb_evict 607 when(s2_tlb_resp.valid && !s2_tlb_evict) { 608 when(s2_tlb_update_index < MLP_L1_SIZE.U) { 609 l1_array(s2_tlb_update_index).is_vaddr := s2_tlb_resp.bits.miss 610 611 when(!s2_tlb_resp.bits.miss) { 612 l1_array(s2_tlb_update_index).region := Cat(0.U((VAddrBits - PAddrBits).W), s2_tlb_resp.bits.paddr.head(s2_tlb_resp.bits.paddr.head.getWidth - 1, REGION_TAG_OFFSET)) 613 when(s2_tlb_resp.bits.excp.head.pf.ld || s2_tlb_resp.bits.excp.head.af.ld) { 614 l1_array(s2_tlb_update_index).invalidate() 615 } 616 } 617 }.otherwise { 618 val inner_index = s2_tlb_update_index - MLP_L1_SIZE.U 619 l2_array(inner_index).is_vaddr := s2_tlb_resp.bits.miss 620 621 when(!s2_tlb_resp.bits.miss) { 622 l2_array(inner_index).region := Cat(0.U((VAddrBits - PAddrBits).W), s2_tlb_resp.bits.paddr.head(s2_tlb_resp.bits.paddr.head.getWidth - 1, REGION_TAG_OFFSET)) 623 when(s2_tlb_resp.bits.excp.head.pf.ld || s2_tlb_resp.bits.excp.head.af.ld) { 624 l2_array(inner_index).invalidate() 625 } 626 } 627 } 628 } 629 s2_tlb_resp.ready := true.B 630 631 XSPerfAccumulate("s2_tlb_resp_valid", s2_tlb_resp.valid) 632 XSPerfAccumulate("s2_tlb_resp_evict", s2_tlb_resp.valid && s2_tlb_evict) 633 XSPerfAccumulate("s2_tlb_resp_miss", s2_tlb_resp.valid && !s2_tlb_evict && s2_tlb_resp.bits.miss) 634 XSPerfAccumulate("s2_tlb_resp_updated", s2_tlb_resp.valid && !s2_tlb_evict && !s2_tlb_resp.bits.miss) 635 XSPerfAccumulate("s2_tlb_resp_page_fault", s2_tlb_resp.valid && !s2_tlb_evict && !s2_tlb_resp.bits.miss && s2_tlb_resp.bits.excp.head.pf.ld) 636 XSPerfAccumulate("s2_tlb_resp_access_fault", s2_tlb_resp.valid && !s2_tlb_evict && !s2_tlb_resp.bits.miss && s2_tlb_resp.bits.excp.head.af.ld) 637 638 // l1 pf 639 // s0: generate prefetch req paddr per entry, arb them 640 val s0_pf_fire_vec = VecInit((0 until MLP_L1_SIZE).map{case i => l1_pf_req_arb.io.in(i).fire}) 641 val s1_pf_fire_vec = GatedValidRegNext(s0_pf_fire_vec) 642 643 val s0_pf_fire = l1_pf_req_arb.io.out.fire 644 val s0_pf_index = l1_pf_req_arb.io.chosen 645 val s0_pf_candidate_oh = get_candidate_oh(l1_pf_req_arb.io.out.bits.req.paddr) 646 647 for(i <- 0 until MLP_L1_SIZE) { 648 val evict = s1_l1_alloc && (s1_l1_index === i.U) 649 l1_pf_req_arb.io.in(i).valid := l1_array(i).can_send_pf() && !evict 650 l1_pf_req_arb.io.in(i).bits.req.paddr := l1_array(i).get_pf_addr() 651 l1_pf_req_arb.io.in(i).bits.req.alias := l1_array(i).alias 652 l1_pf_req_arb.io.in(i).bits.req.confidence := io.confidence 653 l1_pf_req_arb.io.in(i).bits.req.is_store := false.B 654 l1_pf_req_arb.io.in(i).bits.req.pf_source := l1_array(i).source 655 l1_pf_req_arb.io.in(i).bits.debug_vaddr := l1_array(i).get_pf_debug_vaddr() 656 } 657 658 when(s0_pf_fire) { 659 l1_array(s0_pf_index).sent_vec := l1_array(s0_pf_index).sent_vec | s0_pf_candidate_oh 660 } 661 662 assert(PopCount(s0_pf_fire_vec) <= 1.U, "s0_pf_fire_vec should be one-hot or empty") 663 664 // s1: send out to dcache 665 val s1_pf_valid = Reg(Bool()) 666 val s1_pf_bits = RegEnable(l1_pf_req_arb.io.out.bits, l1_pf_req_arb.io.out.fire) 667 val s1_pf_index = RegEnable(s0_pf_index, l1_pf_req_arb.io.out.fire) 668 val s1_pf_candidate_oh = RegEnable(s0_pf_candidate_oh, l1_pf_req_arb.io.out.fire) 669 val s1_pf_evict = s1_l1_alloc && (s1_l1_index === s1_pf_index) 670 val s1_pf_update = s1_l1_update && (s1_l1_index === s1_pf_index) 671 val s1_pf_can_go = io.l1_req.ready && !s1_pf_evict && !s1_pf_update 672 val s1_pf_fire = s1_pf_valid && s1_pf_can_go 673 674 when(s1_pf_can_go) { 675 s1_pf_valid := false.B 676 } 677 678 when(l1_pf_req_arb.io.out.fire) { 679 s1_pf_valid := true.B 680 } 681 682 when(s1_pf_fire) { 683 l1_array(s1_pf_index).bit_vec := l1_array(s1_pf_index).bit_vec & ~s1_pf_candidate_oh 684 } 685 686 io.l1_req.valid := s1_pf_valid && !s1_pf_evict && !s1_pf_update && (s1_pf_bits.req.paddr >= 0x80000000L.U) && io.enable 687 io.l1_req.bits := s1_pf_bits.req 688 689 l1_pf_req_arb.io.out.ready := s1_pf_can_go || !s1_pf_valid 690 691 assert(!((s1_l1_alloc || s1_l1_update) && s1_pf_fire && (s1_l1_index === s1_pf_index)), "pf pipeline & enq pipeline bit_vec harzard!") 692 693 XSPerfAccumulate("s1_pf_valid", s1_pf_valid) 694 XSPerfAccumulate("s1_pf_block_by_pipe_unready", s1_pf_valid && !io.l1_req.ready) 695 XSPerfAccumulate("s1_pf_block_by_enq_alloc_harzard", s1_pf_valid && s1_pf_evict) 696 XSPerfAccumulate("s1_pf_block_by_enq_update_harzard", s1_pf_valid && s1_pf_update) 697 XSPerfAccumulate("s1_pf_fire", s1_pf_fire) 698 699 // l2 pf 700 // s0: generate prefetch req paddr per entry, arb them, sent out 701 io.l2_pf_addr.valid := l2_pf_req_arb.io.out.valid 702 io.l2_pf_addr.bits := l2_pf_req_arb.io.out.bits.req 703 704 l2_pf_req_arb.io.out.ready := true.B 705 706 for(i <- 0 until MLP_L2L3_SIZE) { 707 val evict = s1_l2_alloc && (s1_l2_index === i.U) 708 l2_pf_req_arb.io.in(i).valid := l2_array(i).can_send_pf() && (l2_array(i).sink === SINK_L2) && !evict 709 l2_pf_req_arb.io.in(i).bits.req.addr := l2_array(i).get_pf_addr() 710 l2_pf_req_arb.io.in(i).bits.req.source := MuxLookup(l2_array(i).source.value, MemReqSource.Prefetch2L2Unknown.id.U)(Seq( 711 L1_HW_PREFETCH_STRIDE -> MemReqSource.Prefetch2L2Stride.id.U, 712 L1_HW_PREFETCH_STREAM -> MemReqSource.Prefetch2L2Stream.id.U 713 )) 714 l2_pf_req_arb.io.in(i).bits.debug_vaddr := l2_array(i).get_pf_debug_vaddr() 715 } 716 717 when(l2_pf_req_arb.io.out.valid) { 718 l2_array(l2_pf_req_arb.io.chosen).sent_vec := l2_array(l2_pf_req_arb.io.chosen).sent_vec | get_candidate_oh(l2_pf_req_arb.io.out.bits.req.addr) 719 } 720 721 val stream_out_debug_table = ChiselDB.createTable("StreamPFTraceOut" + p(XSCoreParamsKey).HartId.toString, new StreamPFTraceOutEntry, basicDB = false) 722 val l1_debug_data = Wire(new StreamPFTraceOutEntry) 723 val l2_debug_data = Wire(new StreamPFTraceOutEntry) 724 l1_debug_data.PFVaddr := l1_pf_req_arb.io.out.bits.debug_vaddr 725 l1_debug_data.PFSink := SINK_L1 726 l2_debug_data.PFVaddr := l2_pf_req_arb.io.out.bits.debug_vaddr 727 l2_debug_data.PFSink := SINK_L2 728 729 stream_out_debug_table.log( 730 data = l1_debug_data, 731 en = l1_pf_req_arb.io.out.fire && (l1_pf_req_arb.io.out.bits.req.pf_source.value === L1_HW_PREFETCH_STREAM), 732 site = "StreamPFTraceOut", 733 clock = clock, 734 reset = reset 735 ) 736 stream_out_debug_table.log( 737 data = l2_debug_data, 738 en = l2_pf_req_arb.io.out.fire && (l2_pf_req_arb.io.out.bits.req.source === MemReqSource.Prefetch2L2Stream.id.U), 739 site = "StreamPFTraceOut", 740 clock = clock, 741 reset = reset 742 ) 743 744 // last level cache pf 745 // s0: generate prefetch req paddr per entry, arb them, sent out 746 io.l3_pf_addr.valid := l3_pf_req_arb.io.out.valid 747 io.l3_pf_addr.bits := l3_pf_req_arb.io.out.bits 748 749 l3_pf_req_arb.io.out.ready := true.B 750 751 for(i <- 0 until MLP_L2L3_SIZE) { 752 val evict = s1_l2_alloc && (s1_l2_index === i.U) 753 l3_pf_req_arb.io.in(i).valid := l2_array(i).can_send_pf() && (l2_array(i).sink === SINK_L3) && !evict 754 l3_pf_req_arb.io.in(i).bits := l2_array(i).get_pf_addr() 755 } 756 757 when(l3_pf_req_arb.io.out.valid) { 758 l2_array(l3_pf_req_arb.io.chosen).sent_vec := l2_array(l3_pf_req_arb.io.chosen).sent_vec | get_candidate_oh(l3_pf_req_arb.io.out.bits) 759 } 760 761 // reset meta to avoid muti-hit problem 762 for(i <- 0 until MLP_SIZE) { 763 if(i < MLP_L1_SIZE) { 764 when(reset.asBool || RegNext(io.flush)) { 765 l1_array(i).reset(i) 766 } 767 }else { 768 when(reset.asBool || RegNext(io.flush)) { 769 l2_array(i - MLP_L1_SIZE).reset(i - MLP_L1_SIZE) 770 } 771 } 772 } 773 774 XSPerfAccumulate("l2_prefetche_queue_busby", io.l2PfqBusy) 775 XSPerfHistogram("filter_active", PopCount(VecInit(l1_array.map(_.can_send_pf()) ++ l2_array.map(_.can_send_pf())).asUInt), true.B, 0, MLP_SIZE, 1) 776 XSPerfHistogram("l1_filter_active", PopCount(VecInit(l1_array.map(_.can_send_pf())).asUInt), true.B, 0, MLP_L1_SIZE, 1) 777 XSPerfHistogram("l2_filter_active", PopCount(VecInit(l2_array.map(x => x.can_send_pf() && (x.sink === SINK_L2))).asUInt), true.B, 0, MLP_L2L3_SIZE, 1) 778 XSPerfHistogram("l3_filter_active", PopCount(VecInit(l2_array.map(x => x.can_send_pf() && (x.sink === SINK_L3))).asUInt), true.B, 0, MLP_L2L3_SIZE, 1) 779} 780 781class L1Prefetcher(implicit p: Parameters) extends BasePrefecher with HasStreamPrefetchHelper with HasStridePrefetchHelper { 782 val pf_ctrl = IO(Input(new PrefetchControlBundle)) 783 val stride_train = IO(Flipped(Vec(backendParams.LduCnt + backendParams.HyuCnt, ValidIO(new LdPrefetchTrainBundle())))) 784 val l2PfqBusy = IO(Input(Bool())) 785 786 val stride_train_filter = Module(new TrainFilter(STRIDE_FILTER_SIZE, "stride")) 787 val stride_meta_array = Module(new StrideMetaArray) 788 val stream_train_filter = Module(new TrainFilter(STREAM_FILTER_SIZE, "stream")) 789 val stream_bit_vec_array = Module(new StreamBitVectorArray) 790 val pf_queue_filter = Module(new MutiLevelPrefetchFilter) 791 792 // for now, if the stream is disabled, train and prefetch process will continue, without sending out and reqs 793 val enable = io.enable 794 val flush = pf_ctrl.flush 795 796 stream_train_filter.io.ld_in.zipWithIndex.foreach { 797 case (ld_in, i) => { 798 ld_in.valid := io.ld_in(i).valid && enable 799 ld_in.bits := io.ld_in(i).bits 800 } 801 } 802 stream_train_filter.io.enable := enable 803 stream_train_filter.io.flush := flush 804 805 stride_train_filter.io.ld_in.zipWithIndex.foreach { 806 case (ld_in, i) => { 807 ld_in.valid := stride_train(i).valid && enable 808 ld_in.bits := stride_train(i).bits 809 } 810 } 811 stride_train_filter.io.enable := enable 812 stride_train_filter.io.flush := flush 813 814 stream_bit_vec_array.io.enable := enable 815 stream_bit_vec_array.io.flush := flush 816 stream_bit_vec_array.io.dynamic_depth := pf_ctrl.dynamic_depth 817 stream_bit_vec_array.io.train_req <> stream_train_filter.io.train_req 818 819 stride_meta_array.io.enable := enable 820 stride_meta_array.io.flush := flush 821 stride_meta_array.io.dynamic_depth := 0.U 822 stride_meta_array.io.train_req <> stride_train_filter.io.train_req 823 stride_meta_array.io.stream_lookup_req <> stream_bit_vec_array.io.stream_lookup_req 824 stride_meta_array.io.stream_lookup_resp <> stream_bit_vec_array.io.stream_lookup_resp 825 826 // stream has higher priority than stride 827 pf_queue_filter.io.l1_prefetch_req.valid := stream_bit_vec_array.io.l1_prefetch_req.valid || stride_meta_array.io.l1_prefetch_req.valid 828 pf_queue_filter.io.l1_prefetch_req.bits := Mux( 829 stream_bit_vec_array.io.l1_prefetch_req.valid, 830 stream_bit_vec_array.io.l1_prefetch_req.bits, 831 stride_meta_array.io.l1_prefetch_req.bits 832 ) 833 834 pf_queue_filter.io.l2_l3_prefetch_req.valid := stream_bit_vec_array.io.l2_l3_prefetch_req.valid || stride_meta_array.io.l2_l3_prefetch_req.valid 835 pf_queue_filter.io.l2_l3_prefetch_req.bits := Mux( 836 stream_bit_vec_array.io.l2_l3_prefetch_req.valid, 837 stream_bit_vec_array.io.l2_l3_prefetch_req.bits, 838 stride_meta_array.io.l2_l3_prefetch_req.bits 839 ) 840 841 io.l1_req.valid := pf_queue_filter.io.l1_req.valid && enable && pf_ctrl.enable 842 io.l1_req.bits := pf_queue_filter.io.l1_req.bits 843 844 pf_queue_filter.io.l1_req.ready := Mux(pf_ctrl.enable, io.l1_req.ready, true.B) 845 pf_queue_filter.io.tlb_req <> io.tlb_req 846 pf_queue_filter.io.enable := enable 847 pf_queue_filter.io.flush := flush 848 pf_queue_filter.io.confidence := pf_ctrl.confidence 849 pf_queue_filter.io.l2PfqBusy := l2PfqBusy 850 851 io.l2_req.valid := pf_queue_filter.io.l2_pf_addr.valid && pf_queue_filter.io.l2_pf_addr.bits.addr > 0x80000000L.U && enable && pf_ctrl.enable 852 io.l2_req.bits := pf_queue_filter.io.l2_pf_addr.bits 853 854 io.l3_req.valid := pf_queue_filter.io.l3_pf_addr.valid && pf_queue_filter.io.l3_pf_addr.bits > 0x80000000L.U && enable && pf_ctrl.enable 855 io.l3_req.bits := pf_queue_filter.io.l3_pf_addr.bits 856}