1package xiangshan.mem.prefetch 2 3import org.chipsalliance.cde.config.Parameters 4import freechips.rocketchip.util._ 5import chisel3._ 6import chisel3.util._ 7import xiangshan._ 8import utils._ 9import utility._ 10import xiangshan.backend.fu.PMPRespBundle 11import xiangshan.cache.HasDCacheParameters 12import xiangshan.cache.mmu._ 13import xiangshan.mem.{L1PrefetchReq, LdPrefetchTrainBundle} 14import xiangshan.mem.trace._ 15import xiangshan.mem.L1PrefetchSource 16 17trait HasL1PrefetchHelper extends HasCircularQueuePtrHelper with HasDCacheParameters { 18 // region related 19 val REGION_SIZE = 1024 20 val PAGE_OFFSET = 12 21 val BLOCK_OFFSET = log2Up(dcacheParameters.blockBytes) 22 val BIT_VEC_WITDH = REGION_SIZE / dcacheParameters.blockBytes 23 val REGION_BITS = log2Up(BIT_VEC_WITDH) 24 val REGION_TAG_OFFSET = BLOCK_OFFSET + REGION_BITS 25 val REGION_TAG_BITS = VAddrBits - BLOCK_OFFSET - REGION_BITS 26 27 // hash related 28 val VADDR_HASH_WIDTH = 5 29 val BLK_ADDR_RAW_WIDTH = 10 30 val HASH_TAG_WIDTH = VADDR_HASH_WIDTH + BLK_ADDR_RAW_WIDTH 31 32 // capacity related 33 val MLP_SIZE = 32 34 val MLP_L1_SIZE = 16 35 val MLP_L2L3_SIZE = MLP_SIZE - MLP_L1_SIZE 36 37 // prefetch sink related 38 val SINK_BITS = 2 39 def SINK_L1 = "b00".U 40 def SINK_L2 = "b01".U 41 def SINK_L3 = "b10".U 42 43 // vaddr: | region tag | region bits | block offset | 44 def get_region_tag(vaddr: UInt) = { 45 require(vaddr.getWidth == VAddrBits) 46 vaddr(vaddr.getWidth - 1, REGION_TAG_OFFSET) 47 } 48 49 def get_region_bits(vaddr: UInt) = { 50 require(vaddr.getWidth == VAddrBits) 51 vaddr(REGION_TAG_OFFSET - 1, BLOCK_OFFSET) 52 } 53 54 def block_addr(x: UInt): UInt = { 55 x(x.getWidth - 1, BLOCK_OFFSET) 56 } 57 58 def vaddr_hash(x: UInt): UInt = { 59 val width = VADDR_HASH_WIDTH 60 val low = x(width - 1, 0) 61 val mid = x(2 * width - 1, width) 62 val high = x(3 * width - 1, 2 * width) 63 low ^ mid ^ high 64 } 65 66 def pc_hash_tag(x: UInt): UInt = { 67 val low = x(BLK_ADDR_RAW_WIDTH - 1, 0) 68 val high = x(BLK_ADDR_RAW_WIDTH - 1 + 3 * VADDR_HASH_WIDTH, BLK_ADDR_RAW_WIDTH) 69 val high_hash = vaddr_hash(high) 70 Cat(high_hash, low) 71 } 72 73 def block_hash_tag(x: UInt): UInt = { 74 val blk_addr = block_addr(x) 75 val low = blk_addr(BLK_ADDR_RAW_WIDTH - 1, 0) 76 val high = blk_addr(BLK_ADDR_RAW_WIDTH - 1 + 3 * VADDR_HASH_WIDTH, BLK_ADDR_RAW_WIDTH) 77 val high_hash = vaddr_hash(high) 78 Cat(high_hash, low) 79 } 80 81 def region_hash_tag(region_tag: UInt): UInt = { 82 val low = region_tag(BLK_ADDR_RAW_WIDTH - 1, 0) 83 val high = region_tag(BLK_ADDR_RAW_WIDTH - 1 + 3 * VADDR_HASH_WIDTH, BLK_ADDR_RAW_WIDTH) 84 val high_hash = vaddr_hash(high) 85 Cat(high_hash, low) 86 } 87 88 def region_to_block_addr(region_tag: UInt, region_bits: UInt): UInt = { 89 Cat(region_tag, region_bits) 90 } 91 92 def get_candidate_oh(x: UInt): UInt = { 93 require(x.getWidth == PAddrBits) 94 UIntToOH(x(REGION_BITS + BLOCK_OFFSET - 1, BLOCK_OFFSET)) 95 } 96 97 def toBinary(n: Int): String = n match { 98 case 0|1 => s"$n" 99 case _ => s"${toBinary(n/2)}${n%2}" 100 } 101} 102 103trait HasTrainFilterHelper extends HasCircularQueuePtrHelper { 104 def reorder[T <: LdPrefetchTrainBundle](source: Vec[ValidIO[T]]): Vec[ValidIO[T]] = { 105 if(source.length == 1) { 106 source 107 }else if(source.length == 2) { 108 val source_v = source.map(_.valid) 109 val res = Wire(source.cloneType) 110 // source 1 is older than source 0 (only when source0/1 are both valid) 111 val source_1_older = Mux(Cat(source_v).andR, 112 isBefore(source(1).bits.uop.robIdx, source(0).bits.uop.robIdx), 113 false.B 114 ) 115 when(source_1_older) { 116 res(0) := source(1) 117 res(1) := source(0) 118 }.otherwise { 119 res := source 120 } 121 122 res 123 }else if(source.length == 3) { 124 // TODO: generalize 125 val res_0_1 = Reg(source.cloneType) 126 val res_1_2 = Reg(source.cloneType) 127 val res = Reg(source.cloneType) 128 129 val tmp = reorder(VecInit(source.slice(0, 2))) 130 res_0_1(0) := tmp(0) 131 res_0_1(1) := tmp(1) 132 res_0_1(2) := source(2) 133 val tmp_1 = reorder(VecInit(res_0_1.slice(1, 3))) 134 res_1_2(0) := res_0_1(0) 135 res_1_2(1) := tmp_1(0) 136 res_1_2(2) := tmp_1(1) 137 val tmp_2 = reorder(VecInit(res_1_2.slice(0, 2))) 138 res(0) := tmp_2(0) 139 res(1) := tmp_2(1) 140 res(2) := res_1_2(2) 141 142 res 143 }else { 144 require(false, "for now, 4 or more sources are invalid") 145 source 146 } 147 } 148} 149 150// get prefetch train reqs from `exuParameters.LduCnt` load pipelines (up to `exuParameters.LduCnt`/cycle) 151// filter by cache line address, send out train req to stride (up to 1 req/cycle) 152class TrainFilter(size: Int, name: String)(implicit p: Parameters) extends XSModule with HasL1PrefetchHelper with HasTrainFilterHelper { 153 val io = IO(new Bundle() { 154 val enable = Input(Bool()) 155 val flush = Input(Bool()) 156 // train input, only from load for now 157 val ld_in = Flipped(Vec(backendParams.LduCnt, ValidIO(new LdPrefetchTrainBundle()))) 158 // filter out 159 val train_req = DecoupledIO(new PrefetchReqBundle()) 160 }) 161 162 class Ptr(implicit p: Parameters) extends CircularQueuePtr[Ptr]( p => size ){} 163 object Ptr { 164 def apply(f: Bool, v: UInt)(implicit p: Parameters): Ptr = { 165 val ptr = Wire(new Ptr) 166 ptr.flag := f 167 ptr.value := v 168 ptr 169 } 170 } 171 172 val entries = Reg(Vec(size, new PrefetchReqBundle)) 173 val valids = RegInit(VecInit(Seq.fill(size){ (false.B) })) 174 175 // enq 176 val enqLen = backendParams.LduCnt 177 val enqPtrExt = RegInit(VecInit((0 until enqLen).map(_.U.asTypeOf(new Ptr)))) 178 val deqPtrExt = RegInit(0.U.asTypeOf(new Ptr)) 179 180 val deqPtr = WireInit(deqPtrExt.value) 181 182 require(size >= enqLen) 183 184 val ld_in_reordered = reorder(io.ld_in) 185 val reqs_l = ld_in_reordered.map(_.bits.asPrefetchReqBundle()) 186 val reqs_vl = ld_in_reordered.map(_.valid) 187 val needAlloc = Wire(Vec(enqLen, Bool())) 188 val canAlloc = Wire(Vec(enqLen, Bool())) 189 190 for(i <- (0 until enqLen)) { 191 val req = reqs_l(i) 192 val req_v = reqs_vl(i) 193 val index = PopCount(needAlloc.take(i)) 194 val allocPtr = enqPtrExt(index) 195 val entry_match = Cat(entries.zip(valids).map { 196 case(e, v) => v && block_hash_tag(e.vaddr) === block_hash_tag(req.vaddr) 197 }).orR 198 val prev_enq_match = if(i == 0) false.B else Cat(reqs_l.zip(reqs_vl).take(i).map { 199 case(pre, pre_v) => pre_v && block_hash_tag(pre.vaddr) === block_hash_tag(req.vaddr) 200 }).orR 201 202 needAlloc(i) := req_v && !entry_match && !prev_enq_match 203 canAlloc(i) := needAlloc(i) && allocPtr >= deqPtrExt && io.enable 204 205 when(canAlloc(i)) { 206 valids(allocPtr.value) := true.B 207 entries(allocPtr.value) := req 208 } 209 } 210 val allocNum = PopCount(canAlloc) 211 212 enqPtrExt.foreach{case x => when(canAlloc.asUInt.orR) {x := x + allocNum} } 213 214 // deq 215 io.train_req.valid := false.B 216 io.train_req.bits := DontCare 217 valids.zip(entries).zipWithIndex.foreach { 218 case((valid, entry), i) => { 219 when(deqPtr === i.U) { 220 io.train_req.valid := valid && io.enable 221 io.train_req.bits := entry 222 } 223 } 224 } 225 226 when(io.train_req.fire) { 227 valids(deqPtr) := false.B 228 deqPtrExt := deqPtrExt + 1.U 229 } 230 231 when(RegNext(io.flush)) { 232 valids.foreach {case valid => valid := false.B} 233 (0 until enqLen).map {case i => enqPtrExt(i) := i.U.asTypeOf(new Ptr)} 234 deqPtrExt := 0.U.asTypeOf(new Ptr) 235 } 236 237 XSPerfAccumulate(s"${name}_train_filter_full", PopCount(valids) === size.U) 238 XSPerfAccumulate(s"${name}_train_filter_half", PopCount(valids) >= (size / 2).U) 239 XSPerfAccumulate(s"${name}_train_filter_empty", PopCount(valids) === 0.U) 240 241 val raw_enq_pattern = Cat(reqs_vl) 242 val filtered_enq_pattern = Cat(needAlloc) 243 val actual_enq_pattern = Cat(canAlloc) 244 XSPerfAccumulate(s"${name}_train_filter_enq", allocNum > 0.U) 245 XSPerfAccumulate(s"${name}_train_filter_deq", io.train_req.fire) 246 for(i <- 0 until (1 << enqLen)) { 247 XSPerfAccumulate(s"${name}_train_filter_raw_enq_pattern_${toBinary(i)}", raw_enq_pattern === i.U) 248 XSPerfAccumulate(s"${name}_train_filter_filtered_enq_pattern_${toBinary(i)}", filtered_enq_pattern === i.U) 249 XSPerfAccumulate(s"${name}_train_filter_actual_enq_pattern_${toBinary(i)}", actual_enq_pattern === i.U) 250 } 251} 252 253class MLPReqFilterBundle(implicit p: Parameters) extends XSBundle with HasL1PrefetchHelper { 254 val tag = UInt(HASH_TAG_WIDTH.W) 255 val region = UInt(REGION_TAG_BITS.W) 256 val bit_vec = UInt(BIT_VEC_WITDH.W) 257 // NOTE: l1 will not use sent_vec, for making more prefetch reqs to l1 dcache 258 val sent_vec = UInt(BIT_VEC_WITDH.W) 259 val sink = UInt(SINK_BITS.W) 260 val alias = UInt(2.W) 261 val is_vaddr = Bool() 262 val source = new L1PrefetchSource() 263 val debug_va_region = UInt(REGION_TAG_BITS.W) 264 265 def reset(index: Int) = { 266 tag := region_hash_tag(index.U) 267 region := index.U 268 bit_vec := 0.U 269 sent_vec := 0.U 270 sink := SINK_L1 271 alias := 0.U 272 is_vaddr := false.B 273 source.value := L1_HW_PREFETCH_NULL 274 debug_va_region := 0.U 275 } 276 277 def tag_match(valid1: Bool, valid2: Bool, new_tag: UInt): Bool = { 278 require(new_tag.getWidth == HASH_TAG_WIDTH) 279 (tag === new_tag) && valid1 && valid2 280 } 281 282 def update(update_bit_vec: UInt, update_sink: UInt) = { 283 bit_vec := bit_vec | update_bit_vec 284 when(update_sink < sink) { 285 bit_vec := (bit_vec & ~sent_vec) | update_bit_vec 286 sink := update_sink 287 } 288 289 assert(PopCount(update_bit_vec) >= 1.U, "valid bits in update vector should greater than one") 290 } 291 292 def can_send_pf(valid: Bool): Bool = { 293 Mux( 294 sink === SINK_L1, 295 !is_vaddr && bit_vec.orR, 296 !is_vaddr && (bit_vec & ~sent_vec).orR 297 ) && valid 298 } 299 300 def may_be_replace(valid: Bool): Bool = { 301 // either invalid or has sent out all reqs out 302 !valid || RegNext(PopCount(sent_vec) === BIT_VEC_WITDH.U) 303 } 304 305 def get_pf_addr(): UInt = { 306 require(PAddrBits <= VAddrBits) 307 require((region.getWidth + REGION_BITS + BLOCK_OFFSET) == VAddrBits) 308 309 val candidate = Mux( 310 sink === SINK_L1, 311 PriorityEncoder(bit_vec).asTypeOf(UInt(REGION_BITS.W)), 312 PriorityEncoder(bit_vec & ~sent_vec).asTypeOf(UInt(REGION_BITS.W)) 313 ) 314 Cat(region, candidate, 0.U(BLOCK_OFFSET.W)) 315 } 316 317 def get_pf_debug_vaddr(): UInt = { 318 val candidate = Mux( 319 sink === SINK_L1, 320 PriorityEncoder(bit_vec).asTypeOf(UInt(REGION_BITS.W)), 321 PriorityEncoder(bit_vec & ~sent_vec).asTypeOf(UInt(REGION_BITS.W)) 322 ) 323 Cat(debug_va_region, candidate, 0.U(BLOCK_OFFSET.W)) 324 } 325 326 def get_tlb_va(): UInt = { 327 require((region.getWidth + REGION_TAG_OFFSET) == VAddrBits) 328 Cat(region, 0.U(REGION_TAG_OFFSET.W)) 329 } 330 331 def fromStreamPrefetchReqBundle(x : StreamPrefetchReqBundle): MLPReqFilterBundle = { 332 require(PAGE_OFFSET >= REGION_TAG_OFFSET, "region is greater than 4k, alias bit may be incorrect") 333 334 val res = Wire(new MLPReqFilterBundle) 335 res.tag := region_hash_tag(x.region) 336 res.region := x.region 337 res.bit_vec := x.bit_vec 338 res.sent_vec := 0.U 339 res.sink := x.sink 340 res.is_vaddr := true.B 341 res.source := x.source 342 res.alias := x.region(PAGE_OFFSET - REGION_TAG_OFFSET + 1, PAGE_OFFSET - REGION_TAG_OFFSET) 343 res.debug_va_region := x.region 344 345 res 346 } 347 348 def invalidate() = { 349 // disable sending pf req 350 when(sink === SINK_L1) { 351 bit_vec := 0.U(BIT_VEC_WITDH.W) 352 }.otherwise { 353 sent_vec := ~(0.U(BIT_VEC_WITDH.W)) 354 } 355 // disable sending tlb req 356 is_vaddr := false.B 357 } 358} 359 360// there are 5 independent pipelines inside 361// 1. prefetch enqueue 362// 2. tlb request 363// 3. actual l1 prefetch 364// 4. actual l2 prefetch 365// 5. actual l3 prefetch 366class MutiLevelPrefetchFilter(implicit p: Parameters) extends XSModule with HasL1PrefetchHelper { 367 val io = IO(new XSBundle { 368 val enable = Input(Bool()) 369 val flush = Input(Bool()) 370 val l1_prefetch_req = Flipped(ValidIO(new StreamPrefetchReqBundle)) 371 val l2_l3_prefetch_req = Flipped(ValidIO(new StreamPrefetchReqBundle)) 372 val tlb_req = new TlbRequestIO(nRespDups = 2) 373 val pmp_resp = Flipped(new PMPRespBundle()) 374 val l1_req = DecoupledIO(new L1PrefetchReq()) 375 val l2_pf_addr = ValidIO(new L2PrefetchReq()) 376 val l3_pf_addr = ValidIO(UInt(PAddrBits.W)) // TODO: l3 pf source 377 val confidence = Input(UInt(1.W)) 378 val l2PfqBusy = Input(Bool()) 379 }) 380 381 val l1_array = Reg(Vec(MLP_L1_SIZE, new MLPReqFilterBundle)) 382 val l2_array = Reg(Vec(MLP_L2L3_SIZE, new MLPReqFilterBundle)) 383 val l1_valids = RegInit(VecInit(Seq.fill(MLP_L1_SIZE)(false.B))) 384 val l2_valids = RegInit(VecInit(Seq.fill(MLP_L2L3_SIZE)(false.B))) 385 386 def _invalid(e: MLPReqFilterBundle, v: Bool): Unit = { 387 v := false.B 388 e.invalidate() 389 } 390 391 def invalid_array(i: UInt, isL2: Boolean): Unit = { 392 if (isL2) { 393 _invalid(l2_array(i), l2_valids(i)) 394 } else { 395 _invalid(l1_array(i), l1_valids(i)) 396 } 397 } 398 399 def _reset(e: MLPReqFilterBundle, v: Bool, idx: Int): Unit = { 400 v := false.B 401 //only need to reset control signals for firendly area 402 // e.reset(idx) 403 } 404 405 406 def reset_array(i: Int, isL2: Boolean): Unit = { 407 if(isL2){ 408 _reset(l2_array(i), l2_valids(i), i) 409 }else{ 410 _reset(l1_array(i), l1_valids(i), i) 411 } 412 } 413 414 val l1_replacement = new ValidPseudoLRU(MLP_L1_SIZE) 415 val l2_replacement = new ValidPseudoLRU(MLP_L2L3_SIZE) 416 val tlb_req_arb = Module(new RRArbiterInit(new TlbReq, MLP_SIZE)) 417 val l1_pf_req_arb = Module(new RRArbiterInit(new Bundle { 418 val req = new L1PrefetchReq 419 val debug_vaddr = UInt(VAddrBits.W) 420 }, MLP_L1_SIZE)) 421 val l2_pf_req_arb = Module(new RRArbiterInit(new Bundle { 422 val req = new L2PrefetchReq 423 val debug_vaddr = UInt(VAddrBits.W) 424 }, MLP_L2L3_SIZE)) 425 val l3_pf_req_arb = Module(new RRArbiterInit(UInt(PAddrBits.W), MLP_L2L3_SIZE)) 426 427 val l1_opt_replace_vec = VecInit(l1_array.zip(l1_valids).map{case (e, v) => e.may_be_replace(v)}) 428 val l2_opt_replace_vec = VecInit(l2_array.zip(l2_valids).map{case (e, v) => e.may_be_replace(v)}) 429 // if we have something to replace, then choose it, otherwise follow the plru manner 430 val l1_real_replace_vec = Mux(Cat(l1_opt_replace_vec).orR, l1_opt_replace_vec, VecInit(Seq.fill(MLP_L1_SIZE)(true.B))) 431 val l2_real_replace_vec = Mux(Cat(l2_opt_replace_vec).orR, l2_opt_replace_vec, VecInit(Seq.fill(MLP_L2L3_SIZE)(true.B))) 432 433 // l1 pf req enq 434 // s0: hash tag match 435 val s0_l1_can_accept = Wire(Bool()) 436 val s0_l1_valid = io.l1_prefetch_req.valid && s0_l1_can_accept 437 val s0_l1_region = io.l1_prefetch_req.bits.region 438 val s0_l1_region_hash = region_hash_tag(s0_l1_region) 439 val s0_l1_match_vec = l1_array.zip(l1_valids).map{ case (e, v) => e.tag_match(v, s0_l1_valid, s0_l1_region_hash)} 440 val s0_l1_hit = VecInit(s0_l1_match_vec).asUInt.orR 441 val s0_l1_index = Wire(UInt(log2Up(MLP_L1_SIZE).W)) 442 val s0_l1_prefetch_req = (new MLPReqFilterBundle).fromStreamPrefetchReqBundle(io.l1_prefetch_req.bits) 443 444 s0_l1_index := Mux(s0_l1_hit, OHToUInt(VecInit(s0_l1_match_vec).asUInt), l1_replacement.way(l1_real_replace_vec.reverse)._2) 445 446 when(s0_l1_valid) { 447 l1_replacement.access(s0_l1_index) 448 } 449 450 assert(!s0_l1_valid || PopCount(VecInit(s0_l1_match_vec)) <= 1.U, "req region should match no more than 1 entry") 451 452 XSPerfAccumulate("s0_l1_enq_fire", s0_l1_valid) 453 XSPerfAccumulate("s0_l1_enq_valid", io.l1_prefetch_req.valid) 454 XSPerfAccumulate("s0_l1_cannot_enq", io.l1_prefetch_req.valid && !s0_l1_can_accept) 455 456 // s1: alloc or update 457 val s1_l1_valid = RegNext(s0_l1_valid) 458 val s1_l1_region = RegEnable(s0_l1_region, s0_l1_valid) 459 val s1_l1_region_hash = RegEnable(s0_l1_region_hash, s0_l1_valid) 460 val s1_l1_hit = RegEnable(s0_l1_hit, s0_l1_valid) 461 val s1_l1_index = RegEnable(s0_l1_index, s0_l1_valid) 462 val s1_l1_prefetch_req = RegEnable(s0_l1_prefetch_req, s0_l1_valid) 463 val s1_l1_alloc = s1_l1_valid && !s1_l1_hit 464 val s1_l1_update = s1_l1_valid && s1_l1_hit 465 s0_l1_can_accept := !(s1_l1_valid && s1_l1_alloc && (s0_l1_region_hash === s1_l1_region_hash)) 466 467 when(s1_l1_alloc) { 468 l1_valids(s1_l1_index) := true.B 469 l1_array(s1_l1_index) := s1_l1_prefetch_req 470 }.elsewhen(s1_l1_update) { 471 l1_array(s1_l1_index).update( 472 update_bit_vec = s1_l1_prefetch_req.bit_vec, 473 update_sink = s1_l1_prefetch_req.sink 474 ) 475 } 476 477 XSPerfAccumulate("s1_l1_enq_valid", s1_l1_valid) 478 XSPerfAccumulate("s1_l1_enq_alloc", s1_l1_alloc) 479 XSPerfAccumulate("s1_l1_enq_update", s1_l1_update) 480 XSPerfAccumulate("l1_hash_conflict", s0_l1_valid && RegNext(s1_l1_valid) && (s0_l1_region =/= RegNext(s1_l1_region)) && (s0_l1_region_hash === RegNext(s1_l1_region_hash))) 481 XSPerfAccumulate("s1_l1_enq_evict_useful_entry", s1_l1_alloc && l1_array(s1_l1_index).can_send_pf(l1_valids(s1_l1_index))) 482 483 // l2 l3 pf req enq 484 // s0: hash tag match 485 val s0_l2_can_accept = Wire(Bool()) 486 val s0_l2_valid = io.l2_l3_prefetch_req.valid && s0_l2_can_accept 487 val s0_l2_region = io.l2_l3_prefetch_req.bits.region 488 val s0_l2_region_hash = region_hash_tag(s0_l2_region) 489 val s0_l2_match_vec = l2_array.zip(l2_valids).map{ case (e, v) => e.tag_match(v, s0_l2_valid, s0_l2_region_hash) } 490 val s0_l2_hit = VecInit(s0_l2_match_vec).asUInt.orR 491 val s0_l2_index = Wire(UInt(log2Up(MLP_L2L3_SIZE).W)) 492 val s0_l2_prefetch_req = (new MLPReqFilterBundle).fromStreamPrefetchReqBundle(io.l2_l3_prefetch_req.bits) 493 494 s0_l2_index := Mux(s0_l2_hit, OHToUInt(VecInit(s0_l2_match_vec).asUInt), l2_replacement.way(l2_real_replace_vec.reverse)._2) 495 496 when(s0_l2_valid) { 497 l2_replacement.access(s0_l2_index) 498 } 499 500 assert(!s0_l2_valid || PopCount(VecInit(s0_l2_match_vec)) <= 1.U, "req region should match no more than 1 entry") 501 502 XSPerfAccumulate("s0_l2_enq_fire", s0_l2_valid) 503 XSPerfAccumulate("s0_l2_enq_valid", io.l2_l3_prefetch_req.valid) 504 XSPerfAccumulate("s0_l2_cannot_enq", io.l2_l3_prefetch_req.valid && !s0_l2_can_accept) 505 506 // s1: alloc or update 507 val s1_l2_valid = RegNext(s0_l2_valid) 508 val s1_l2_region = RegEnable(s0_l2_region, s0_l2_valid) 509 val s1_l2_region_hash = RegEnable(s0_l2_region_hash, s0_l2_valid) 510 val s1_l2_hit = RegEnable(s0_l2_hit, s0_l2_valid) 511 val s1_l2_index = RegEnable(s0_l2_index, s0_l2_valid) 512 val s1_l2_prefetch_req = RegEnable(s0_l2_prefetch_req, s0_l2_valid) 513 val s1_l2_alloc = s1_l2_valid && !s1_l2_hit 514 val s1_l2_update = s1_l2_valid && s1_l2_hit 515 s0_l2_can_accept := !(s1_l2_valid && s1_l2_alloc && (s0_l2_region_hash === s1_l2_region_hash)) 516 517 when(s1_l2_alloc) { 518 l2_valids(s1_l2_index) := true.B 519 l2_array(s1_l2_index) := s1_l2_prefetch_req 520 }.elsewhen(s1_l2_update) { 521 l2_array(s1_l2_index).update( 522 update_bit_vec = s1_l2_prefetch_req.bit_vec, 523 update_sink = s1_l2_prefetch_req.sink 524 ) 525 } 526 527 XSPerfAccumulate("s1_l2_enq_valid", s1_l2_valid) 528 XSPerfAccumulate("s1_l2_enq_alloc", s1_l2_alloc) 529 XSPerfAccumulate("s1_l2_enq_update", s1_l2_update) 530 XSPerfAccumulate("l2_hash_conflict", s0_l2_valid && RegNext(s1_l2_valid) && (s0_l2_region =/= RegNext(s1_l2_region)) && (s0_l2_region_hash === RegNext(s1_l2_region_hash))) 531 XSPerfAccumulate("s1_l2_enq_evict_useful_entry", s1_l2_alloc && l2_array(s1_l2_index).can_send_pf(l2_valids(s1_l2_index))) 532 533 // stream pf debug db here 534 // Hit: 535 // now seens only pending = (region_bits & ~filter_bits) are the peeding request 536 // if a PfGen comes, new added request can be new_req = PfGen.region_bits & ~(pending) 537 // Alloc: 538 // new_req = PfGen.region_bits 539 val stream_pf_trace_debug_table = ChiselDB.createTable("StreamPFTrace" + p(XSCoreParamsKey).HartId.toString, new StreamPFTraceInEntry, basicDB = false) 540 for (i <- 0 until BIT_VEC_WITDH) { 541 // l1 enq log 542 val hit_entry = l1_array(s0_l1_index) 543 val new_req = Mux( 544 s0_l1_hit, 545 io.l1_prefetch_req.bits.bit_vec & ~(hit_entry.bit_vec), 546 io.l1_prefetch_req.bits.bit_vec 547 ) 548 val log_enable = s0_l1_valid && new_req(i) && (io.l1_prefetch_req.bits.source.value === L1_HW_PREFETCH_STREAM) 549 val log_data = Wire(new StreamPFTraceInEntry) 550 551 log_data.TriggerPC := io.l1_prefetch_req.bits.trigger_pc 552 log_data.TriggerVaddr := io.l1_prefetch_req.bits.trigger_va 553 log_data.PFVaddr := Cat(s0_l1_region, i.U(REGION_BITS.W), 0.U(log2Up(dcacheParameters.blockBytes).W)) 554 log_data.PFSink := s0_l1_prefetch_req.sink 555 556 stream_pf_trace_debug_table.log( 557 data = log_data, 558 en = log_enable, 559 site = "StreamPFTrace", 560 clock = clock, 561 reset = reset 562 ) 563 } 564 for (i <- 0 until BIT_VEC_WITDH) { 565 // l2 l3 enq log 566 val hit_entry = l2_array(s0_l2_index) 567 val new_req = Mux( 568 s0_l2_hit, 569 io.l2_l3_prefetch_req.bits.bit_vec & ~(hit_entry.bit_vec), 570 io.l2_l3_prefetch_req.bits.bit_vec 571 ) 572 val log_enable = s0_l2_valid && new_req(i) && (io.l2_l3_prefetch_req.bits.source.value === L1_HW_PREFETCH_STREAM) 573 val log_data = Wire(new StreamPFTraceInEntry) 574 575 log_data.TriggerPC := io.l2_l3_prefetch_req.bits.trigger_pc 576 log_data.TriggerVaddr := io.l2_l3_prefetch_req.bits.trigger_va 577 log_data.PFVaddr := Cat(s0_l2_region, i.U(REGION_BITS.W), 0.U(log2Up(dcacheParameters.blockBytes).W)) 578 log_data.PFSink := s0_l2_prefetch_req.sink 579 580 stream_pf_trace_debug_table.log( 581 data = log_data, 582 en = log_enable, 583 site = "StreamPFTrace", 584 clock = clock, 585 reset = reset 586 ) 587 } 588 589 // tlb req 590 // s0: arb all tlb reqs 591 val s0_tlb_fire_vec = VecInit((0 until MLP_SIZE).map{case i => tlb_req_arb.io.in(i).fire}) 592 val s1_tlb_fire_vec = GatedValidRegNext(s0_tlb_fire_vec) 593 val s2_tlb_fire_vec = GatedValidRegNext(s1_tlb_fire_vec) 594 val s3_tlb_fire_vec = GatedValidRegNext(s2_tlb_fire_vec) 595 val not_tlbing_vec = VecInit((0 until MLP_SIZE).map{case i => 596 !s1_tlb_fire_vec(i) && !s2_tlb_fire_vec(i) && !s3_tlb_fire_vec(i) 597 }) 598 599 for(i <- 0 until MLP_SIZE) { 600 val l1_evict = s1_l1_alloc && (s1_l1_index === i.U) 601 val l2_evict = s1_l2_alloc && ((s1_l2_index + MLP_L1_SIZE.U) === i.U) 602 if(i < MLP_L1_SIZE) { 603 tlb_req_arb.io.in(i).valid := l1_valids(i) && l1_array(i).is_vaddr && not_tlbing_vec(i) && !l1_evict 604 tlb_req_arb.io.in(i).bits.vaddr := l1_array(i).get_tlb_va() 605 }else { 606 tlb_req_arb.io.in(i).valid := l2_valids(i - MLP_L1_SIZE) && l2_array(i - MLP_L1_SIZE).is_vaddr && not_tlbing_vec(i) && !l2_evict 607 tlb_req_arb.io.in(i).bits.vaddr := l2_array(i - MLP_L1_SIZE).get_tlb_va() 608 } 609 tlb_req_arb.io.in(i).bits.cmd := TlbCmd.read 610 tlb_req_arb.io.in(i).bits.isPrefetch := true.B 611 tlb_req_arb.io.in(i).bits.size := 3.U 612 tlb_req_arb.io.in(i).bits.kill := false.B 613 tlb_req_arb.io.in(i).bits.no_translate := false.B 614 tlb_req_arb.io.in(i).bits.fullva := 0.U 615 tlb_req_arb.io.in(i).bits.checkfullva := false.B 616 tlb_req_arb.io.in(i).bits.memidx := DontCare 617 tlb_req_arb.io.in(i).bits.debug := DontCare 618 tlb_req_arb.io.in(i).bits.hlvx := DontCare 619 tlb_req_arb.io.in(i).bits.hyperinst := DontCare 620 tlb_req_arb.io.in(i).bits.pmp_addr := DontCare 621 } 622 623 assert(PopCount(s0_tlb_fire_vec) <= 1.U, "s0_tlb_fire_vec should be one-hot or empty") 624 625 // s1: send out the req 626 val s1_tlb_req_valid = GatedValidRegNext(tlb_req_arb.io.out.valid) 627 val s1_tlb_req_bits = RegEnable(tlb_req_arb.io.out.bits, tlb_req_arb.io.out.valid) 628 val s1_tlb_req_index = RegEnable(OHToUInt(s0_tlb_fire_vec.asUInt), tlb_req_arb.io.out.valid) 629 val s1_l1_tlb_evict = s1_l1_alloc && (s1_l1_index === s1_tlb_req_index) 630 val s1_l2_tlb_evict = s1_l2_alloc && ((s1_l2_index + MLP_L1_SIZE.U) === s1_tlb_req_index) 631 val s1_tlb_evict = s1_l1_tlb_evict || s1_l2_tlb_evict 632 io.tlb_req.req.valid := s1_tlb_req_valid && !s1_tlb_evict 633 io.tlb_req.req.bits := s1_tlb_req_bits 634 io.tlb_req.req_kill := false.B 635 tlb_req_arb.io.out.ready := true.B 636 637 XSPerfAccumulate("s1_tlb_req_sent", io.tlb_req.req.valid) 638 XSPerfAccumulate("s1_tlb_req_evict", s1_tlb_req_valid && s1_tlb_evict) 639 640 // s2: get response from tlb 641 val s2_tlb_resp_valid = io.tlb_req.resp.valid 642 val s2_tlb_resp = io.tlb_req.resp.bits 643 val s2_tlb_update_index = RegEnable(s1_tlb_req_index, s1_tlb_req_valid) 644 val s2_l1_tlb_evict = s1_l1_alloc && (s1_l1_index === s2_tlb_update_index) 645 val s2_l2_tlb_evict = s1_l2_alloc && ((s1_l2_index + MLP_L1_SIZE.U) === s2_tlb_update_index) 646 val s2_tlb_evict = s2_l1_tlb_evict || s2_l2_tlb_evict 647 648 // s3: get pmp response form PMPChecker 649 val s3_tlb_resp_valid = RegNext(s2_tlb_resp_valid) 650 val s3_tlb_resp = RegEnable(s2_tlb_resp, s2_tlb_resp_valid) 651 val s3_tlb_update_index = RegEnable(s2_tlb_update_index, s2_tlb_resp_valid) 652 val s3_tlb_evict = RegNext(s2_tlb_evict) 653 val s3_pmp_resp = io.pmp_resp 654 val s3_update_valid = s3_tlb_resp_valid && !s3_tlb_evict && !s3_tlb_resp.miss 655 val s3_drop = s3_update_valid && ( 656 // page/access fault 657 s3_tlb_resp.excp.head.pf.ld || s3_tlb_resp.excp.head.gpf.ld || s3_tlb_resp.excp.head.af.ld || 658 // uncache 659 s3_pmp_resp.mmio || Pbmt.isUncache(s3_tlb_resp.pbmt.head) || 660 // pmp access fault 661 s3_pmp_resp.ld 662 ) 663 when(s3_tlb_resp_valid && !s3_tlb_evict) { 664 when(s3_tlb_update_index < MLP_L1_SIZE.U) { 665 l1_array(s3_tlb_update_index).is_vaddr := s3_tlb_resp.miss 666 667 when(!s3_tlb_resp.miss) { 668 l1_array(s3_tlb_update_index).region := Cat(0.U((VAddrBits - PAddrBits).W), s3_tlb_resp.paddr.head(s3_tlb_resp.paddr.head.getWidth - 1, REGION_TAG_OFFSET)) 669 when(s3_drop) { 670 invalid_array(s3_tlb_update_index, false) 671 } 672 } 673 }.otherwise { 674 val inner_index = s3_tlb_update_index - MLP_L1_SIZE.U 675 l2_array(inner_index).is_vaddr := s3_tlb_resp.miss 676 677 when(!s3_tlb_resp.miss) { 678 l2_array(inner_index).region := Cat(0.U((VAddrBits - PAddrBits).W), s3_tlb_resp.paddr.head(s3_tlb_resp.paddr.head.getWidth - 1, REGION_TAG_OFFSET)) 679 when(s3_drop) { 680 invalid_array(inner_index, true) 681 } 682 } 683 } 684 } 685 io.tlb_req.resp.ready := true.B 686 687 XSPerfAccumulate("s3_tlb_resp_valid", s3_tlb_resp_valid) 688 XSPerfAccumulate("s3_tlb_resp_evict", s3_tlb_resp_valid && s3_tlb_evict) 689 XSPerfAccumulate("s3_tlb_resp_miss", s3_tlb_resp_valid && !s3_tlb_evict && s3_tlb_resp.miss) 690 XSPerfAccumulate("s3_tlb_resp_updated", s3_update_valid) 691 XSPerfAccumulate("s3_tlb_resp_page_fault", s3_update_valid && s3_tlb_resp.excp.head.pf.ld) 692 XSPerfAccumulate("s3_tlb_resp_guestpage_fault", s3_update_valid && s3_tlb_resp.excp.head.gpf.ld) 693 XSPerfAccumulate("s3_tlb_resp_access_fault", s3_update_valid && s3_tlb_resp.excp.head.af.ld) 694 XSPerfAccumulate("s3_tlb_resp_pmp_access_fault", s3_update_valid && s3_pmp_resp.ld) 695 XSPerfAccumulate("s3_tlb_resp_uncache", s3_update_valid && (Pbmt.isUncache(s3_tlb_resp.pbmt.head) || s3_pmp_resp.mmio)) 696 697 // l1 pf 698 // s0: generate prefetch req paddr per entry, arb them 699 val s0_pf_fire_vec = VecInit((0 until MLP_L1_SIZE).map{case i => l1_pf_req_arb.io.in(i).fire}) 700 val s1_pf_fire_vec = GatedValidRegNext(s0_pf_fire_vec) 701 702 val s0_pf_fire = l1_pf_req_arb.io.out.fire 703 val s0_pf_index = l1_pf_req_arb.io.chosen 704 val s0_pf_candidate_oh = get_candidate_oh(l1_pf_req_arb.io.out.bits.req.paddr) 705 706 for(i <- 0 until MLP_L1_SIZE) { 707 val evict = s1_l1_alloc && (s1_l1_index === i.U) 708 l1_pf_req_arb.io.in(i).valid := l1_array(i).can_send_pf(l1_valids(i)) && !evict 709 l1_pf_req_arb.io.in(i).bits.req.paddr := l1_array(i).get_pf_addr() 710 l1_pf_req_arb.io.in(i).bits.req.alias := l1_array(i).alias 711 l1_pf_req_arb.io.in(i).bits.req.confidence := io.confidence 712 l1_pf_req_arb.io.in(i).bits.req.is_store := false.B 713 l1_pf_req_arb.io.in(i).bits.req.pf_source := l1_array(i).source 714 l1_pf_req_arb.io.in(i).bits.debug_vaddr := l1_array(i).get_pf_debug_vaddr() 715 } 716 717 when(s0_pf_fire) { 718 l1_array(s0_pf_index).sent_vec := l1_array(s0_pf_index).sent_vec | s0_pf_candidate_oh 719 } 720 721 assert(PopCount(s0_pf_fire_vec) <= 1.U, "s0_pf_fire_vec should be one-hot or empty") 722 723 // s1: send out to dcache 724 val s1_pf_valid = Reg(Bool()) 725 val s1_pf_bits = RegEnable(l1_pf_req_arb.io.out.bits, l1_pf_req_arb.io.out.fire) 726 val s1_pf_index = RegEnable(s0_pf_index, l1_pf_req_arb.io.out.fire) 727 val s1_pf_candidate_oh = RegEnable(s0_pf_candidate_oh, l1_pf_req_arb.io.out.fire) 728 val s1_pf_evict = s1_l1_alloc && (s1_l1_index === s1_pf_index) 729 val s1_pf_update = s1_l1_update && (s1_l1_index === s1_pf_index) 730 val s1_pf_can_go = io.l1_req.ready && !s1_pf_evict && !s1_pf_update 731 val s1_pf_fire = s1_pf_valid && s1_pf_can_go 732 733 when(s1_pf_can_go) { 734 s1_pf_valid := false.B 735 } 736 737 when(l1_pf_req_arb.io.out.fire) { 738 s1_pf_valid := true.B 739 } 740 741 when(s1_pf_fire) { 742 l1_array(s1_pf_index).bit_vec := l1_array(s1_pf_index).bit_vec & ~s1_pf_candidate_oh 743 } 744 745 val in_pmem = PmemRanges.map(_.cover(s1_pf_bits.req.paddr)).reduce(_ || _) 746 io.l1_req.valid := s1_pf_valid && !s1_pf_evict && !s1_pf_update && in_pmem && io.enable 747 io.l1_req.bits := s1_pf_bits.req 748 749 l1_pf_req_arb.io.out.ready := s1_pf_can_go || !s1_pf_valid 750 751 assert(!((s1_l1_alloc || s1_l1_update) && s1_pf_fire && (s1_l1_index === s1_pf_index)), "pf pipeline & enq pipeline bit_vec harzard!") 752 753 XSPerfAccumulate("s1_pf_valid", s1_pf_valid) 754 XSPerfAccumulate("s1_pf_block_by_pipe_unready", s1_pf_valid && !io.l1_req.ready) 755 XSPerfAccumulate("s1_pf_block_by_enq_alloc_harzard", s1_pf_valid && s1_pf_evict) 756 XSPerfAccumulate("s1_pf_block_by_enq_update_harzard", s1_pf_valid && s1_pf_update) 757 XSPerfAccumulate("s1_pf_fire", s1_pf_fire) 758 759 // l2 pf 760 // s0: generate prefetch req paddr per entry, arb them, sent out 761 io.l2_pf_addr.valid := l2_pf_req_arb.io.out.valid 762 io.l2_pf_addr.bits := l2_pf_req_arb.io.out.bits.req 763 764 l2_pf_req_arb.io.out.ready := true.B 765 766 for(i <- 0 until MLP_L2L3_SIZE) { 767 val evict = s1_l2_alloc && (s1_l2_index === i.U) 768 l2_pf_req_arb.io.in(i).valid := l2_array(i).can_send_pf(l2_valids(i)) && (l2_array(i).sink === SINK_L2) && !evict 769 l2_pf_req_arb.io.in(i).bits.req.addr := l2_array(i).get_pf_addr() 770 l2_pf_req_arb.io.in(i).bits.req.source := MuxLookup(l2_array(i).source.value, MemReqSource.Prefetch2L2Unknown.id.U)(Seq( 771 L1_HW_PREFETCH_STRIDE -> MemReqSource.Prefetch2L2Stride.id.U, 772 L1_HW_PREFETCH_STREAM -> MemReqSource.Prefetch2L2Stream.id.U 773 )) 774 l2_pf_req_arb.io.in(i).bits.debug_vaddr := l2_array(i).get_pf_debug_vaddr() 775 } 776 777 when(l2_pf_req_arb.io.out.valid) { 778 l2_array(l2_pf_req_arb.io.chosen).sent_vec := l2_array(l2_pf_req_arb.io.chosen).sent_vec | get_candidate_oh(l2_pf_req_arb.io.out.bits.req.addr) 779 } 780 781 val stream_out_debug_table = ChiselDB.createTable("StreamPFTraceOut" + p(XSCoreParamsKey).HartId.toString, new StreamPFTraceOutEntry, basicDB = false) 782 val l1_debug_data = Wire(new StreamPFTraceOutEntry) 783 val l2_debug_data = Wire(new StreamPFTraceOutEntry) 784 l1_debug_data.PFVaddr := l1_pf_req_arb.io.out.bits.debug_vaddr 785 l1_debug_data.PFSink := SINK_L1 786 l2_debug_data.PFVaddr := l2_pf_req_arb.io.out.bits.debug_vaddr 787 l2_debug_data.PFSink := SINK_L2 788 789 stream_out_debug_table.log( 790 data = l1_debug_data, 791 en = l1_pf_req_arb.io.out.fire && (l1_pf_req_arb.io.out.bits.req.pf_source.value === L1_HW_PREFETCH_STREAM), 792 site = "StreamPFTraceOut", 793 clock = clock, 794 reset = reset 795 ) 796 stream_out_debug_table.log( 797 data = l2_debug_data, 798 en = l2_pf_req_arb.io.out.fire && (l2_pf_req_arb.io.out.bits.req.source === MemReqSource.Prefetch2L2Stream.id.U), 799 site = "StreamPFTraceOut", 800 clock = clock, 801 reset = reset 802 ) 803 804 // last level cache pf 805 // s0: generate prefetch req paddr per entry, arb them, sent out 806 io.l3_pf_addr.valid := l3_pf_req_arb.io.out.valid 807 io.l3_pf_addr.bits := l3_pf_req_arb.io.out.bits 808 809 l3_pf_req_arb.io.out.ready := true.B 810 811 for(i <- 0 until MLP_L2L3_SIZE) { 812 val evict = s1_l2_alloc && (s1_l2_index === i.U) 813 l3_pf_req_arb.io.in(i).valid := l2_array(i).can_send_pf(l2_valids(i)) && (l2_array(i).sink === SINK_L3) && !evict 814 l3_pf_req_arb.io.in(i).bits := l2_array(i).get_pf_addr() 815 } 816 817 when(l3_pf_req_arb.io.out.valid) { 818 l2_array(l3_pf_req_arb.io.chosen).sent_vec := l2_array(l3_pf_req_arb.io.chosen).sent_vec | get_candidate_oh(l3_pf_req_arb.io.out.bits) 819 } 820 821 // reset meta to avoid muti-hit problem 822 for(i <- 0 until MLP_SIZE) { 823 if(i < MLP_L1_SIZE) { 824 when(RegNext(io.flush)) { 825 reset_array(i, false) 826 } 827 }else { 828 when(RegNext(io.flush)) { 829 reset_array(i - MLP_L1_SIZE, true) 830 } 831 } 832 } 833 834 XSPerfAccumulate("l2_prefetche_queue_busby", io.l2PfqBusy) 835 XSPerfHistogram("filter_active", PopCount(VecInit( 836 l1_array.zip(l1_valids).map{ case (e, v) => e.can_send_pf(v) } ++ 837 l2_array.zip(l2_valids).map{ case (e, v) => e.can_send_pf(v) } 838 ).asUInt), true.B, 0, MLP_SIZE, 1) 839 XSPerfHistogram("l1_filter_active", PopCount(VecInit(l1_array.zip(l1_valids).map{ case (e, v) => e.can_send_pf(v)}).asUInt), true.B, 0, MLP_L1_SIZE, 1) 840 XSPerfHistogram("l2_filter_active", PopCount(VecInit(l2_array.zip(l2_valids).map{ case (e, v) => e.can_send_pf(v) && (e.sink === SINK_L2)}).asUInt), true.B, 0, MLP_L2L3_SIZE, 1) 841 XSPerfHistogram("l3_filter_active", PopCount(VecInit(l2_array.zip(l2_valids).map{ case (e, v) => e.can_send_pf(v) && (e.sink === SINK_L3)}).asUInt), true.B, 0, MLP_L2L3_SIZE, 1) 842} 843 844class L1Prefetcher(implicit p: Parameters) extends BasePrefecher with HasStreamPrefetchHelper with HasStridePrefetchHelper { 845 val pf_ctrl = IO(Input(new PrefetchControlBundle)) 846 val stride_train = IO(Flipped(Vec(backendParams.LduCnt + backendParams.HyuCnt, ValidIO(new LdPrefetchTrainBundle())))) 847 val l2PfqBusy = IO(Input(Bool())) 848 849 val stride_train_filter = Module(new TrainFilter(STRIDE_FILTER_SIZE, "stride")) 850 val stride_meta_array = Module(new StrideMetaArray) 851 val stream_train_filter = Module(new TrainFilter(STREAM_FILTER_SIZE, "stream")) 852 val stream_bit_vec_array = Module(new StreamBitVectorArray) 853 val pf_queue_filter = Module(new MutiLevelPrefetchFilter) 854 855 // for now, if the stream is disabled, train and prefetch process will continue, without sending out and reqs 856 val enable = io.enable 857 val flush = pf_ctrl.flush 858 859 stream_train_filter.io.ld_in.zipWithIndex.foreach { 860 case (ld_in, i) => { 861 ld_in.valid := io.ld_in(i).valid && enable 862 ld_in.bits := io.ld_in(i).bits 863 } 864 } 865 stream_train_filter.io.enable := enable 866 stream_train_filter.io.flush := flush 867 868 stride_train_filter.io.ld_in.zipWithIndex.foreach { 869 case (ld_in, i) => { 870 ld_in.valid := stride_train(i).valid && enable 871 ld_in.bits := stride_train(i).bits 872 } 873 } 874 stride_train_filter.io.enable := enable 875 stride_train_filter.io.flush := flush 876 877 stream_bit_vec_array.io.enable := enable 878 stream_bit_vec_array.io.flush := flush 879 stream_bit_vec_array.io.dynamic_depth := pf_ctrl.dynamic_depth 880 stream_bit_vec_array.io.train_req <> stream_train_filter.io.train_req 881 882 stride_meta_array.io.enable := enable 883 stride_meta_array.io.flush := flush 884 stride_meta_array.io.dynamic_depth := 0.U 885 stride_meta_array.io.train_req <> stride_train_filter.io.train_req 886 stride_meta_array.io.stream_lookup_req <> stream_bit_vec_array.io.stream_lookup_req 887 stride_meta_array.io.stream_lookup_resp <> stream_bit_vec_array.io.stream_lookup_resp 888 889 // stream has higher priority than stride 890 pf_queue_filter.io.l1_prefetch_req.valid := stream_bit_vec_array.io.l1_prefetch_req.valid || stride_meta_array.io.l1_prefetch_req.valid 891 pf_queue_filter.io.l1_prefetch_req.bits := Mux( 892 stream_bit_vec_array.io.l1_prefetch_req.valid, 893 stream_bit_vec_array.io.l1_prefetch_req.bits, 894 stride_meta_array.io.l1_prefetch_req.bits 895 ) 896 897 pf_queue_filter.io.l2_l3_prefetch_req.valid := stream_bit_vec_array.io.l2_l3_prefetch_req.valid || stride_meta_array.io.l2_l3_prefetch_req.valid 898 pf_queue_filter.io.l2_l3_prefetch_req.bits := Mux( 899 stream_bit_vec_array.io.l2_l3_prefetch_req.valid, 900 stream_bit_vec_array.io.l2_l3_prefetch_req.bits, 901 stride_meta_array.io.l2_l3_prefetch_req.bits 902 ) 903 904 io.l1_req.valid := pf_queue_filter.io.l1_req.valid && enable && pf_ctrl.enable 905 io.l1_req.bits := pf_queue_filter.io.l1_req.bits 906 907 pf_queue_filter.io.l1_req.ready := Mux(pf_ctrl.enable, io.l1_req.ready, true.B) 908 pf_queue_filter.io.tlb_req <> io.tlb_req 909 pf_queue_filter.io.pmp_resp := io.pmp_resp 910 pf_queue_filter.io.enable := enable 911 pf_queue_filter.io.flush := flush 912 pf_queue_filter.io.confidence := pf_ctrl.confidence 913 pf_queue_filter.io.l2PfqBusy := l2PfqBusy 914 915 val l2_in_pmem = PmemRanges.map(_.cover(pf_queue_filter.io.l2_pf_addr.bits.addr)).reduce(_ || _) 916 io.l2_req.valid := pf_queue_filter.io.l2_pf_addr.valid && l2_in_pmem && enable && pf_ctrl.enable 917 io.l2_req.bits := pf_queue_filter.io.l2_pf_addr.bits 918 919 val l3_in_pmem = PmemRanges.map(_.cover(pf_queue_filter.io.l3_pf_addr.bits)).reduce(_ || _) 920 io.l3_req.valid := pf_queue_filter.io.l3_pf_addr.valid && l3_in_pmem && enable && pf_ctrl.enable 921 io.l3_req.bits := pf_queue_filter.io.l3_pf_addr.bits 922} 923