xref: /XiangShan/src/main/scala/xiangshan/mem/prefetch/L1StridePrefetcher.scala (revision b03c55a5df5dc8793cb44b42dd60141566e57e78)
1package xiangshan.mem.prefetch
2
3import org.chipsalliance.cde.config.Parameters
4import chisel3._
5import chisel3.util._
6import xiangshan._
7import utils._
8import utility._
9import xiangshan.cache.HasDCacheParameters
10import xiangshan.cache.mmu._
11import xiangshan.mem.{L1PrefetchReq, LdPrefetchTrainBundle}
12import xiangshan.mem.trace._
13import scala.collection.SeqLike
14
15trait HasStridePrefetchHelper extends HasL1PrefetchHelper {
16  val STRIDE_FILTER_SIZE = 6
17  val STRIDE_ENTRY_NUM = 10
18  val STRIDE_BITS = 10 + BLOCK_OFFSET
19  val STRIDE_VADDR_BITS = 10 + BLOCK_OFFSET
20  val STRIDE_CONF_BITS = 2
21
22  // detail control
23  val ALWAYS_UPDATE_PRE_VADDR = true
24  val AGGRESIVE_POLICY = false // if true, prefetch degree is greater than 1, 1 otherwise
25  val STRIDE_LOOK_AHEAD_BLOCKS = 2 // aggressive degree
26  val LOOK_UP_STREAM = false // if true, avoid collision with stream
27
28  val STRIDE_WIDTH_BLOCKS = if(AGGRESIVE_POLICY) STRIDE_LOOK_AHEAD_BLOCKS else 1
29
30  def MAX_CONF = (1 << STRIDE_CONF_BITS) - 1
31}
32
33class StrideMetaBundle(implicit p: Parameters) extends XSBundle with HasStridePrefetchHelper {
34  val pre_vaddr = UInt(STRIDE_VADDR_BITS.W)
35  val stride = UInt(STRIDE_BITS.W)
36  val confidence = UInt(STRIDE_CONF_BITS.W)
37  val hash_pc = UInt(HASH_TAG_WIDTH.W)
38
39  def reset(index: Int) = {
40    pre_vaddr := 0.U
41    stride := 0.U
42    confidence := 0.U
43    hash_pc := index.U
44  }
45
46  def tag_match(valid1: Bool, valid2: Bool, new_hash_pc: UInt): Bool = {
47    valid1 && valid2 && hash_pc === new_hash_pc
48  }
49
50  def alloc(vaddr: UInt, alloc_hash_pc: UInt) = {
51    pre_vaddr := vaddr(STRIDE_VADDR_BITS - 1, 0)
52    stride := 0.U
53    confidence := 0.U
54    hash_pc := alloc_hash_pc
55  }
56
57  def update(vaddr: UInt, always_update_pre_vaddr: Bool) = {
58    val new_vaddr = vaddr(STRIDE_VADDR_BITS - 1, 0)
59    val new_stride = new_vaddr - pre_vaddr
60    val new_stride_blk = block_addr(new_stride)
61    // NOTE: for now, disable negtive stride
62    val stride_valid = new_stride_blk =/= 0.U && new_stride_blk =/= 1.U && new_stride(STRIDE_VADDR_BITS - 1) === 0.U
63    val stride_match = new_stride === stride
64    val low_confidence = confidence <= 1.U
65    val can_send_pf = stride_valid && stride_match && confidence === MAX_CONF.U
66
67    when(stride_valid) {
68      when(stride_match) {
69        confidence := Mux(confidence === MAX_CONF.U, confidence, confidence + 1.U)
70      }.otherwise {
71        confidence := Mux(confidence === 0.U, confidence, confidence - 1.U)
72        when(low_confidence) {
73          stride := new_stride
74        }
75      }
76      pre_vaddr := new_vaddr
77    }
78    when(always_update_pre_vaddr) {
79      pre_vaddr := new_vaddr
80    }
81
82    (can_send_pf, new_stride)
83  }
84
85}
86
87class StrideMetaArray(implicit p: Parameters) extends XSModule with HasStridePrefetchHelper {
88  val io = IO(new XSBundle {
89    val enable = Input(Bool())
90    // TODO: flush all entry when process changing happens, or disable stream prefetch for a while
91    val flush = Input(Bool())
92    val dynamic_depth = Input(UInt(32.W)) // TODO: enable dynamic stride depth
93    val train_req = Flipped(DecoupledIO(new PrefetchReqBundle))
94    val l1_prefetch_req = ValidIO(new StreamPrefetchReqBundle)
95    val l2_l3_prefetch_req = ValidIO(new StreamPrefetchReqBundle)
96    // query Stream component to see if a stream pattern has already been detected
97    val stream_lookup_req  = ValidIO(new PrefetchReqBundle)
98    val stream_lookup_resp = Input(Bool())
99  })
100
101  val array = Reg(Vec(STRIDE_ENTRY_NUM, new StrideMetaBundle))
102  val valids = RegInit(VecInit(Seq.fill(STRIDE_ENTRY_NUM)(false.B)))
103
104  def reset_array(i: Int): Unit = {
105    valids(i) := false.B
106    //only need to rest control signals for firendly area
107    // array(i).reset(i)
108  }
109
110  val replacement = ReplacementPolicy.fromString("plru", STRIDE_ENTRY_NUM)
111
112  // s0: hash pc -> cam all entries
113  val s0_can_accept = Wire(Bool())
114  val s0_valid = io.train_req.fire
115  val s0_vaddr = io.train_req.bits.vaddr
116  val s0_pc = io.train_req.bits.pc
117  val s0_pc_hash = pc_hash_tag(s0_pc)
118  val s0_pc_match_vec = VecInit(array zip valids map { case (e, v) => e.tag_match(v, s0_valid, s0_pc_hash) }).asUInt
119  val s0_hit = s0_pc_match_vec.orR
120  val s0_index = Mux(s0_hit, OHToUInt(s0_pc_match_vec), replacement.way)
121  io.train_req.ready := s0_can_accept
122  io.stream_lookup_req.valid := s0_valid
123  io.stream_lookup_req.bits  := io.train_req.bits
124
125  when(s0_valid) {
126    replacement.access(s0_index)
127  }
128
129  assert(PopCount(s0_pc_match_vec) <= 1.U)
130  XSPerfAccumulate("s0_valid", s0_valid)
131  XSPerfAccumulate("s0_hit", s0_valid && s0_hit)
132  XSPerfAccumulate("s0_miss", s0_valid && !s0_hit)
133
134  // s1: alloc or update
135  val s1_valid = GatedValidRegNext(s0_valid)
136  val s1_index = RegEnable(s0_index, s0_valid)
137  val s1_pc_hash = RegEnable(s0_pc_hash, s0_valid)
138  val s1_vaddr = RegEnable(s0_vaddr, s0_valid)
139  val s1_hit = RegEnable(s0_hit, s0_valid)
140  val s1_alloc = s1_valid && !s1_hit
141  val s1_update = s1_valid && s1_hit
142  val s1_stride = array(s1_index).stride
143  val s1_new_stride = WireInit(0.U(STRIDE_BITS.W))
144  val s1_can_send_pf = WireInit(false.B)
145  s0_can_accept := !(s1_valid && s1_pc_hash === s0_pc_hash)
146
147  val always_update = Constantin.createRecord(s"always_update${p(XSCoreParamsKey).HartId}", initValue = ALWAYS_UPDATE_PRE_VADDR)
148
149  when(s1_alloc) {
150    valids(s1_index) := true.B
151    array(s1_index).alloc(
152      vaddr = s1_vaddr,
153      alloc_hash_pc = s1_pc_hash
154    )
155  }.elsewhen(s1_update) {
156    val res = array(s1_index).update(s1_vaddr, always_update)
157    s1_can_send_pf := res._1
158    s1_new_stride := res._2
159  }
160
161  val l1_stride_ratio_const = Constantin.createRecord(s"l1_stride_ratio${p(XSCoreParamsKey).HartId}", initValue = 2)
162  val l1_stride_ratio = l1_stride_ratio_const(3, 0)
163  val l2_stride_ratio_const = Constantin.createRecord(s"l2_stride_ratio${p(XSCoreParamsKey).HartId}", initValue = 5)
164  val l2_stride_ratio = l2_stride_ratio_const(3, 0)
165  // s2: calculate L1 & L2 pf addr
166  val s2_valid = GatedValidRegNext(s1_valid && s1_can_send_pf)
167  val s2_vaddr = RegEnable(s1_vaddr, s1_valid && s1_can_send_pf)
168  val s2_stride = RegEnable(s1_stride, s1_valid && s1_can_send_pf)
169  val s2_l1_depth = s2_stride << l1_stride_ratio
170  val s2_l1_pf_vaddr = (s2_vaddr + s2_l1_depth)(VAddrBits - 1, 0)
171  val s2_l2_depth = s2_stride << l2_stride_ratio
172  val s2_l2_pf_vaddr = (s2_vaddr + s2_l2_depth)(VAddrBits - 1, 0)
173  val s2_l1_pf_req_bits = (new StreamPrefetchReqBundle).getStreamPrefetchReqBundle(
174    valid = s2_valid,
175    vaddr = s2_l1_pf_vaddr,
176    width = STRIDE_WIDTH_BLOCKS,
177    decr_mode = false.B,
178    sink = SINK_L1,
179    source = L1_HW_PREFETCH_STRIDE,
180    // TODO: add stride debug db, not useful for now
181    t_pc = 0xdeadbeefL.U,
182    t_va = 0xdeadbeefL.U
183    )
184  val s2_l2_pf_req_bits = (new StreamPrefetchReqBundle).getStreamPrefetchReqBundle(
185    valid = s2_valid,
186    vaddr = s2_l2_pf_vaddr,
187    width = STRIDE_WIDTH_BLOCKS,
188    decr_mode = false.B,
189    sink = SINK_L2,
190    source = L1_HW_PREFETCH_STRIDE,
191    // TODO: add stride debug db, not useful for now
192    t_pc = 0xdeadbeefL.U,
193    t_va = 0xdeadbeefL.U
194    )
195
196  // s3: send l1 pf out
197  val s3_valid = if (LOOK_UP_STREAM) GatedValidRegNext(s2_valid) && !io.stream_lookup_resp else GatedValidRegNext(s2_valid)
198  val s3_l1_pf_req_bits = RegEnable(s2_l1_pf_req_bits, s2_valid)
199  val s3_l2_pf_req_bits = RegEnable(s2_l2_pf_req_bits, s2_valid)
200
201  // s4: send l2 pf out
202  val s4_valid = GatedValidRegNext(s3_valid)
203  val s4_l2_pf_req_bits = RegEnable(s3_l2_pf_req_bits, s3_valid)
204
205  io.l1_prefetch_req.valid := s3_valid
206  io.l1_prefetch_req.bits := s3_l1_pf_req_bits
207  io.l2_l3_prefetch_req.valid := s4_valid
208  io.l2_l3_prefetch_req.bits := s4_l2_pf_req_bits
209
210  XSPerfAccumulate("pf_valid", PopCount(Seq(io.l1_prefetch_req.valid, io.l2_l3_prefetch_req.valid)))
211  XSPerfAccumulate("l1_pf_valid", s3_valid)
212  XSPerfAccumulate("l2_pf_valid", s4_valid)
213  XSPerfAccumulate("detect_stream", io.stream_lookup_resp)
214  XSPerfHistogram("high_conf_num", PopCount(VecInit(array.map(_.confidence === MAX_CONF.U))).asUInt, true.B, 0, STRIDE_ENTRY_NUM, 1)
215  for(i <- 0 until STRIDE_ENTRY_NUM) {
216    XSPerfAccumulate(s"entry_${i}_update", i.U === s1_index && s1_update)
217    for(j <- 0 until 4) {
218      XSPerfAccumulate(s"entry_${i}_disturb_${j}", i.U === s1_index && s1_update &&
219                                                   j.U === s1_new_stride &&
220                                                   array(s1_index).confidence === MAX_CONF.U &&
221                                                   array(s1_index).stride =/= s1_new_stride
222      )
223    }
224  }
225
226  for(i <- 0 until STRIDE_ENTRY_NUM) {
227    when(GatedValidRegNext(io.flush)) {
228      reset_array(i)
229    }
230  }
231}