1/*************************************************************************************** 2* Copyright (c) 2024 Beijing Institute of Open Source Chip (BOSC) 3* Copyright (c) 2020-2024 Institute of Computing Technology, Chinese Academy of Sciences 4* Copyright (c) 2020-2021 Peng Cheng Laboratory 5* 6* XiangShan is licensed under Mulan PSL v2. 7* You can use this software according to the terms and conditions of the Mulan PSL v2. 8* You may obtain a copy of Mulan PSL v2 at: 9* http://license.coscl.org.cn/MulanPSL2 10* 11* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 12* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 13* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 14* 15* See the Mulan PSL v2 for more details. 16* 17* 18* Acknowledgement 19* 20* This implementation is inspired by several key papers: 21* [1] Jean-Loup Baer, and Tien-Fu Chen. "[An effective on-chip preloading scheme to reduce data access penalty.] 22* (https://doi.org/10.1145/125826.125932)" ACM/IEEE Conference on Supercomputing. 1991. 23***************************************************************************************/ 24 25package xiangshan.mem.prefetch 26 27import org.chipsalliance.cde.config.Parameters 28import chisel3._ 29import chisel3.util._ 30import xiangshan._ 31import utils._ 32import utility._ 33import xiangshan.cache.HasDCacheParameters 34import xiangshan.cache.mmu._ 35import xiangshan.mem.{L1PrefetchReq, LdPrefetchTrainBundle} 36import xiangshan.mem.trace._ 37import scala.collection.SeqLike 38 39trait HasStridePrefetchHelper extends HasL1PrefetchHelper { 40 val STRIDE_FILTER_SIZE = 6 41 val STRIDE_ENTRY_NUM = 10 42 val STRIDE_BITS = 10 + BLOCK_OFFSET 43 val STRIDE_VADDR_BITS = 10 + BLOCK_OFFSET 44 val STRIDE_CONF_BITS = 2 45 46 // detail control 47 val ALWAYS_UPDATE_PRE_VADDR = true 48 val AGGRESIVE_POLICY = false // if true, prefetch degree is greater than 1, 1 otherwise 49 val STRIDE_LOOK_AHEAD_BLOCKS = 2 // aggressive degree 50 val LOOK_UP_STREAM = false // if true, avoid collision with stream 51 52 val STRIDE_WIDTH_BLOCKS = if(AGGRESIVE_POLICY) STRIDE_LOOK_AHEAD_BLOCKS else 1 53 54 def MAX_CONF = (1 << STRIDE_CONF_BITS) - 1 55} 56 57class StrideMetaBundle(implicit p: Parameters) extends XSBundle with HasStridePrefetchHelper { 58 val pre_vaddr = UInt(STRIDE_VADDR_BITS.W) 59 val stride = UInt(STRIDE_BITS.W) 60 val confidence = UInt(STRIDE_CONF_BITS.W) 61 val hash_pc = UInt(HASH_TAG_WIDTH.W) 62 63 def reset(index: Int) = { 64 pre_vaddr := 0.U 65 stride := 0.U 66 confidence := 0.U 67 hash_pc := index.U 68 } 69 70 def tag_match(valid1: Bool, valid2: Bool, new_hash_pc: UInt): Bool = { 71 valid1 && valid2 && hash_pc === new_hash_pc 72 } 73 74 def alloc(vaddr: UInt, alloc_hash_pc: UInt) = { 75 pre_vaddr := vaddr(STRIDE_VADDR_BITS - 1, 0) 76 stride := 0.U 77 confidence := 0.U 78 hash_pc := alloc_hash_pc 79 } 80 81 def update(vaddr: UInt, always_update_pre_vaddr: Bool) = { 82 val new_vaddr = vaddr(STRIDE_VADDR_BITS - 1, 0) 83 val new_stride = new_vaddr - pre_vaddr 84 val new_stride_blk = block_addr(new_stride) 85 // NOTE: for now, disable negtive stride 86 val stride_valid = new_stride_blk =/= 0.U && new_stride_blk =/= 1.U && new_stride(STRIDE_VADDR_BITS - 1) === 0.U 87 val stride_match = new_stride === stride 88 val low_confidence = confidence <= 1.U 89 val can_send_pf = stride_valid && stride_match && confidence === MAX_CONF.U 90 91 when(stride_valid) { 92 when(stride_match) { 93 confidence := Mux(confidence === MAX_CONF.U, confidence, confidence + 1.U) 94 }.otherwise { 95 confidence := Mux(confidence === 0.U, confidence, confidence - 1.U) 96 when(low_confidence) { 97 stride := new_stride 98 } 99 } 100 pre_vaddr := new_vaddr 101 } 102 when(always_update_pre_vaddr) { 103 pre_vaddr := new_vaddr 104 } 105 106 (can_send_pf, new_stride) 107 } 108 109} 110 111class StrideMetaArray(implicit p: Parameters) extends XSModule with HasStridePrefetchHelper { 112 val io = IO(new XSBundle { 113 val enable = Input(Bool()) 114 // TODO: flush all entry when process changing happens, or disable stream prefetch for a while 115 val flush = Input(Bool()) 116 val dynamic_depth = Input(UInt(32.W)) // TODO: enable dynamic stride depth 117 val train_req = Flipped(DecoupledIO(new PrefetchReqBundle)) 118 val l1_prefetch_req = ValidIO(new StreamPrefetchReqBundle) 119 val l2_l3_prefetch_req = ValidIO(new StreamPrefetchReqBundle) 120 // query Stream component to see if a stream pattern has already been detected 121 val stream_lookup_req = ValidIO(new PrefetchReqBundle) 122 val stream_lookup_resp = Input(Bool()) 123 }) 124 125 val array = Reg(Vec(STRIDE_ENTRY_NUM, new StrideMetaBundle)) 126 val valids = RegInit(VecInit(Seq.fill(STRIDE_ENTRY_NUM)(false.B))) 127 128 def reset_array(i: Int): Unit = { 129 valids(i) := false.B 130 //only need to rest control signals for firendly area 131 // array(i).reset(i) 132 } 133 134 val replacement = ReplacementPolicy.fromString("plru", STRIDE_ENTRY_NUM) 135 136 // s0: hash pc -> cam all entries 137 val s0_can_accept = Wire(Bool()) 138 val s0_valid = io.train_req.fire 139 val s0_vaddr = io.train_req.bits.vaddr 140 val s0_pc = io.train_req.bits.pc 141 val s0_pc_hash = pc_hash_tag(s0_pc) 142 val s0_pc_match_vec = VecInit(array zip valids map { case (e, v) => e.tag_match(v, s0_valid, s0_pc_hash) }).asUInt 143 val s0_hit = s0_pc_match_vec.orR 144 val s0_index = Mux(s0_hit, OHToUInt(s0_pc_match_vec), replacement.way) 145 io.train_req.ready := s0_can_accept 146 io.stream_lookup_req.valid := s0_valid 147 io.stream_lookup_req.bits := io.train_req.bits 148 149 when(s0_valid) { 150 replacement.access(s0_index) 151 } 152 153 assert(PopCount(s0_pc_match_vec) <= 1.U) 154 XSPerfAccumulate("s0_valid", s0_valid) 155 XSPerfAccumulate("s0_hit", s0_valid && s0_hit) 156 XSPerfAccumulate("s0_miss", s0_valid && !s0_hit) 157 158 // s1: alloc or update 159 val s1_valid = GatedValidRegNext(s0_valid) 160 val s1_index = RegEnable(s0_index, s0_valid) 161 val s1_pc_hash = RegEnable(s0_pc_hash, s0_valid) 162 val s1_vaddr = RegEnable(s0_vaddr, s0_valid) 163 val s1_hit = RegEnable(s0_hit, s0_valid) 164 val s1_alloc = s1_valid && !s1_hit 165 val s1_update = s1_valid && s1_hit 166 val s1_stride = array(s1_index).stride 167 val s1_new_stride = WireInit(0.U(STRIDE_BITS.W)) 168 val s1_can_send_pf = WireInit(false.B) 169 s0_can_accept := !(s1_valid && s1_pc_hash === s0_pc_hash) 170 171 val always_update = Constantin.createRecord(s"always_update${p(XSCoreParamsKey).HartId}", initValue = ALWAYS_UPDATE_PRE_VADDR) 172 173 when(s1_alloc) { 174 valids(s1_index) := true.B 175 array(s1_index).alloc( 176 vaddr = s1_vaddr, 177 alloc_hash_pc = s1_pc_hash 178 ) 179 }.elsewhen(s1_update) { 180 val res = array(s1_index).update(s1_vaddr, always_update) 181 s1_can_send_pf := res._1 182 s1_new_stride := res._2 183 } 184 185 val l1_stride_ratio_const = Constantin.createRecord(s"l1_stride_ratio${p(XSCoreParamsKey).HartId}", initValue = 2) 186 val l1_stride_ratio = l1_stride_ratio_const(3, 0) 187 val l2_stride_ratio_const = Constantin.createRecord(s"l2_stride_ratio${p(XSCoreParamsKey).HartId}", initValue = 5) 188 val l2_stride_ratio = l2_stride_ratio_const(3, 0) 189 // s2: calculate L1 & L2 pf addr 190 val s2_valid = GatedValidRegNext(s1_valid && s1_can_send_pf) 191 val s2_vaddr = RegEnable(s1_vaddr, s1_valid && s1_can_send_pf) 192 val s2_stride = RegEnable(s1_stride, s1_valid && s1_can_send_pf) 193 val s2_l1_depth = s2_stride << l1_stride_ratio 194 val s2_l1_pf_vaddr = (s2_vaddr + s2_l1_depth)(VAddrBits - 1, 0) 195 val s2_l2_depth = s2_stride << l2_stride_ratio 196 val s2_l2_pf_vaddr = (s2_vaddr + s2_l2_depth)(VAddrBits - 1, 0) 197 val s2_l1_pf_req_bits = (new StreamPrefetchReqBundle).getStreamPrefetchReqBundle( 198 valid = s2_valid, 199 vaddr = s2_l1_pf_vaddr, 200 width = STRIDE_WIDTH_BLOCKS, 201 decr_mode = false.B, 202 sink = SINK_L1, 203 source = L1_HW_PREFETCH_STRIDE, 204 // TODO: add stride debug db, not useful for now 205 t_pc = 0xdeadbeefL.U, 206 t_va = 0xdeadbeefL.U 207 ) 208 val s2_l2_pf_req_bits = (new StreamPrefetchReqBundle).getStreamPrefetchReqBundle( 209 valid = s2_valid, 210 vaddr = s2_l2_pf_vaddr, 211 width = STRIDE_WIDTH_BLOCKS, 212 decr_mode = false.B, 213 sink = SINK_L2, 214 source = L1_HW_PREFETCH_STRIDE, 215 // TODO: add stride debug db, not useful for now 216 t_pc = 0xdeadbeefL.U, 217 t_va = 0xdeadbeefL.U 218 ) 219 220 // s3: send l1 pf out 221 val s3_valid = if (LOOK_UP_STREAM) GatedValidRegNext(s2_valid) && !io.stream_lookup_resp else GatedValidRegNext(s2_valid) 222 val s3_l1_pf_req_bits = RegEnable(s2_l1_pf_req_bits, s2_valid) 223 val s3_l2_pf_req_bits = RegEnable(s2_l2_pf_req_bits, s2_valid) 224 225 // s4: send l2 pf out 226 val s4_valid = GatedValidRegNext(s3_valid) 227 val s4_l2_pf_req_bits = RegEnable(s3_l2_pf_req_bits, s3_valid) 228 229 io.l1_prefetch_req.valid := s3_valid 230 io.l1_prefetch_req.bits := s3_l1_pf_req_bits 231 io.l2_l3_prefetch_req.valid := s4_valid 232 io.l2_l3_prefetch_req.bits := s4_l2_pf_req_bits 233 234 XSPerfAccumulate("pf_valid", PopCount(Seq(io.l1_prefetch_req.valid, io.l2_l3_prefetch_req.valid))) 235 XSPerfAccumulate("l1_pf_valid", s3_valid) 236 XSPerfAccumulate("l2_pf_valid", s4_valid) 237 XSPerfAccumulate("detect_stream", io.stream_lookup_resp) 238 XSPerfHistogram("high_conf_num", PopCount(VecInit(array.map(_.confidence === MAX_CONF.U))).asUInt, true.B, 0, STRIDE_ENTRY_NUM, 1) 239 for(i <- 0 until STRIDE_ENTRY_NUM) { 240 XSPerfAccumulate(s"entry_${i}_update", i.U === s1_index && s1_update) 241 for(j <- 0 until 4) { 242 XSPerfAccumulate(s"entry_${i}_disturb_${j}", i.U === s1_index && s1_update && 243 j.U === s1_new_stride && 244 array(s1_index).confidence === MAX_CONF.U && 245 array(s1_index).stride =/= s1_new_stride 246 ) 247 } 248 } 249 250 for(i <- 0 until STRIDE_ENTRY_NUM) { 251 when(GatedValidRegNext(io.flush)) { 252 reset_array(i) 253 } 254 } 255}