xref: /XiangShan/src/main/scala/xiangshan/frontend/icache/ICacheMissUnit.scala (revision 45f43e6e5f88874a7573ff096d1e5c2855bd16c7)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.frontend.icache
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import freechips.rocketchip.diplomacy.IdRange
23import freechips.rocketchip.tilelink.ClientStates._
24import freechips.rocketchip.tilelink.TLPermissions._
25import freechips.rocketchip.tilelink._
26import xiangshan._
27import xiangshan.cache._
28import utils._
29import utility._
30import difftest._
31
32
33abstract class ICacheMissUnitModule(implicit p: Parameters) extends XSModule
34  with HasICacheParameters
35
36abstract class ICacheMissUnitBundle(implicit p: Parameters) extends XSBundle
37  with HasICacheParameters
38
39class ICacheMissReq(implicit p: Parameters) extends ICacheBundle
40{
41    val paddr      = UInt(PAddrBits.W)
42    val vSetIdx    = UInt(log2Ceil(nSets).W)
43    val waymask     = UInt(nWays.W)
44
45    def getPhyTag    = get_phy_tag(paddr)
46}
47
48
49class ICacheMissResp(implicit p: Parameters) extends ICacheBundle
50{
51    val data     = UInt(blockBits.W)
52    val corrupt  = Bool()
53}
54
55class ICacheMissBundle(implicit p: Parameters) extends ICacheBundle{
56    val req       =   Vec(2, Flipped(DecoupledIO(new ICacheMissReq)))
57    val resp      =   Vec(2,ValidIO(new ICacheMissResp))
58    val flush     =   Input(Bool())
59}
60
61
62class ICacheMissEntry(edge: TLEdgeOut, id: Int)(implicit p: Parameters) extends ICacheMissUnitModule
63  with MemoryOpConstants
64{
65  val io = IO(new Bundle {
66    val id = Input(UInt(log2Ceil(PortNumber).W))
67
68    val req = Flipped(DecoupledIO(new ICacheMissReq))
69    val resp = ValidIO(new ICacheMissResp)
70
71    //tilelink channel
72    val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle))
73    val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
74
75    val meta_write = DecoupledIO(new ICacheMetaWriteBundle)
76    val data_write = DecoupledIO(new ICacheDataWriteBundle)
77
78    val ongoing_req    = Output(new FilterInfo)
79    val fencei = Input(Bool())
80  })
81
82  /** default value for control signals */
83  io.resp := DontCare
84  io.mem_acquire.bits := DontCare
85  io.mem_grant.ready := true.B
86  io.meta_write.bits := DontCare
87  io.data_write.bits := DontCare
88
89  val s_idle  :: s_send_mem_aquire :: s_wait_mem_grant :: s_write_back_wait_resp :: s_write_back :: s_wait_resp :: Nil = Enum(6)
90  val state = RegInit(s_idle)
91  /** control logic transformation */
92  //request register
93  val req = Reg(new ICacheMissReq)
94  val req_idx = req.vSetIdx //virtual index
95  val req_tag = req.getPhyTag //physical tag
96  val req_waymask = req.waymask
97  val req_corrupt = RegInit(false.B)
98
99  val (_, _, refill_done, refill_address_inc) = edge.addr_inc(io.mem_grant)
100
101  val needflush_r = RegInit(false.B)
102  when (state === s_idle) { needflush_r := false.B }
103  when (state =/= s_idle && io.fencei) { needflush_r := true.B }
104  val needflush = needflush_r | io.fencei
105
106  //cacheline register
107  val readBeatCnt = Reg(UInt(log2Up(refillCycles).W))
108  val respDataReg = Reg(Vec(refillCycles, UInt(beatBits.W)))
109
110  //initial
111  io.resp.bits := DontCare
112  io.mem_acquire.bits := DontCare
113  io.mem_grant.ready := true.B
114  io.meta_write.bits := DontCare
115  io.data_write.bits := DontCare
116
117  io.req.ready := (state === s_idle)
118  io.mem_acquire.valid := (state === s_send_mem_aquire)
119
120  io.ongoing_req.valid := (state =/= s_idle)
121  io.ongoing_req.paddr :=  addrAlign(req.paddr, blockBytes, PAddrBits)
122
123  //state change
124  switch(state) {
125    is(s_idle) {
126      when(io.req.fire) {
127        readBeatCnt := 0.U
128        state := s_send_mem_aquire
129        req := io.req.bits
130      }
131    }
132
133    // memory request
134    is(s_send_mem_aquire) {
135      when(io.mem_acquire.fire) {
136        state := s_wait_mem_grant
137      }
138    }
139
140    is(s_wait_mem_grant) {
141      when(edge.hasData(io.mem_grant.bits)) {
142        when(io.mem_grant.fire) {
143          readBeatCnt := readBeatCnt + 1.U
144          respDataReg(readBeatCnt) := io.mem_grant.bits.data
145          req_corrupt := io.mem_grant.bits.corrupt // TODO: seems has bug
146          when(readBeatCnt === (refillCycles - 1).U) {
147            assert(refill_done, "refill not done!")
148            state := s_write_back_wait_resp
149          }
150        }
151      }
152    }
153
154    is(s_write_back_wait_resp) {
155      when((io.meta_write.fire && io.data_write.fire || needflush) && io.resp.fire) {
156        state := s_idle
157      }.elsewhen(io.meta_write.fire && io.data_write.fire || needflush) {
158        state := s_wait_resp
159      }.elsewhen(io.resp.fire) {
160        state := s_write_back
161      }
162    }
163
164    is(s_write_back) {
165      when(io.meta_write.fire && io.data_write.fire || needflush) {
166        state := s_idle
167      }
168    }
169
170    is(s_wait_resp) {
171      when(io.resp.fire) {
172        state := s_idle
173      }
174    }
175  }
176
177  /** refill write and meta write */
178
179  val getBlock = edge.Get(
180    fromSource = io.id,
181    toAddress = addrAlign(req.paddr, blockBytes, PAddrBits),
182    lgSize = (log2Up(cacheParams.blockBytes)).U
183  )._2
184
185  io.mem_acquire.bits := getBlock // getBlock
186  // req source
187  io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUInst.id.U)
188  require(nSets <= 256) // icache size should not be more than 128KB
189
190  //resp to ifu
191  io.resp.valid := (state === s_wait_resp) || (state === s_write_back_wait_resp)
192
193  io.resp.bits.data := respDataReg.asUInt
194  io.resp.bits.corrupt := req_corrupt
195
196  io.meta_write.valid := (((state === s_write_back) || (state === s_write_back_wait_resp)) && !needflush)
197  io.meta_write.bits.generate(tag = req_tag, idx = req_idx, waymask = req_waymask, bankIdx = req_idx(0))
198
199  io.data_write.valid := (((state === s_write_back) || (state === s_write_back_wait_resp)) && !needflush)
200  io.data_write.bits.generate(data = respDataReg.asUInt,
201                              idx  = req_idx,
202                              waymask = req_waymask,
203                              bankIdx = req_idx(0),
204                              paddr = req.paddr)
205
206  XSPerfAccumulate(
207    "entryPenalty" + Integer.toString(id, 10),
208    BoolStopWatch(
209      start = io.req.fire,
210      stop = io.resp.valid,
211      startHighPriority = true)
212  )
213  XSPerfAccumulate("entryReq" + Integer.toString(id, 10), io.req.fire)
214
215  // Statistics on the latency distribution of MSHR
216  val cntLatency = RegInit(0.U(32.W))
217  cntLatency := Mux(io.mem_acquire.fire, 1.U, cntLatency + 1.U)
218  // the condition is same as the transition from s_wait_mem_grant to s_write_back
219  val cntEnable = (state === s_wait_mem_grant) && edge.hasData(io.mem_grant.bits) &&
220                  io.mem_grant.fire && (readBeatCnt === (refillCycles - 1).U)
221  XSPerfHistogram("icache_mshr_latency_" + id.toString(), cntLatency, cntEnable, 0, 300, 10, right_strict = true)
222}
223
224
225class ICacheMissUnit(edge: TLEdgeOut)(implicit p: Parameters) extends ICacheMissUnitModule
226{
227  val io = IO(new Bundle{
228    val hartId      = Input(UInt(8.W))
229    val req         = Vec(2, Flipped(DecoupledIO(new ICacheMissReq)))
230    val resp        = Vec(2, ValidIO(new ICacheMissResp))
231
232    val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle))
233    val mem_grant   = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
234
235    val fdip_acquire = Flipped(DecoupledIO(new TLBundleA(edge.bundle)))
236    val fdip_grant   = DecoupledIO(new TLBundleD(edge.bundle))
237
238    val meta_write  = DecoupledIO(new ICacheMetaWriteBundle)
239    val data_write  = DecoupledIO(new ICacheDataWriteBundle)
240
241    val ICacheMissUnitInfo = new ICacheMissUnitInfo
242    val fencei = Input(Bool())
243  })
244  // assign default values to output signals
245  io.mem_grant.ready := false.B
246
247  val meta_write_arb = Module(new Arbiter(new ICacheMetaWriteBundle,  PortNumber))
248  val refill_arb     = Module(new Arbiter(new ICacheDataWriteBundle,  PortNumber))
249
250  io.mem_grant.ready := true.B
251
252  val entries = (0 until PortNumber) map { i =>
253    val entry = Module(new ICacheMissEntry(edge, i))
254
255    entry.io.id := i.U
256
257    // entry req
258    entry.io.req.valid := io.req(i).valid
259    entry.io.req.bits  := io.req(i).bits
260    io.req(i).ready    := entry.io.req.ready
261
262    // entry resp
263    meta_write_arb.io.in(i)     <>  entry.io.meta_write
264    refill_arb.io.in(i)         <>  entry.io.data_write
265
266    entry.io.mem_grant.valid := false.B
267    entry.io.mem_grant.bits  := DontCare
268    when (io.mem_grant.bits.source === i.U) {
269      entry.io.mem_grant <> io.mem_grant
270    }
271
272    io.resp(i) <> entry.io.resp
273    io.ICacheMissUnitInfo.mshr(i) <> entry.io.ongoing_req
274    entry.io.fencei := io.fencei
275//    XSPerfAccumulate(
276//      "entryPenalty" + Integer.toString(i, 10),
277//      BoolStopWatch(
278//        start = entry.io.req.fire,
279//        stop = entry.io.resp.fire,
280//        startHighPriority = true)
281//    )
282//    XSPerfAccumulate("entryReq" + Integer.toString(i, 10), entry.io.req.fire)
283
284    entry
285  }
286
287  io.fdip_grant.valid := false.B
288  io.fdip_grant.bits  := DontCare
289  when (io.mem_grant.bits.source === PortNumber.U) {
290    io.fdip_grant <> io.mem_grant
291  }
292
293  /**
294    ******************************************************************************
295    * Register 2 cycle meta write info for IPrefetchPipe filter
296    ******************************************************************************
297    */
298  val meta_write_buffer = InitQueue(new FilterInfo, size = 2)
299  meta_write_buffer(0).valid := io.meta_write.fire
300  meta_write_buffer(0).paddr := io.data_write.bits.paddr
301  meta_write_buffer(1)       := meta_write_buffer(0)
302  (0 until 2).foreach (i => {
303    io.ICacheMissUnitInfo.recentWrite(i) := meta_write_buffer(i)
304  })
305
306  val tl_a_chanel = entries.map(_.io.mem_acquire) :+ io.fdip_acquire
307  TLArbiter.lowest(edge, io.mem_acquire, tl_a_chanel:_*)
308
309  io.meta_write     <> meta_write_arb.io.out
310  io.data_write     <> refill_arb.io.out
311
312  if (env.EnableDifftest) {
313    val difftest = DifftestModule(new DiffRefillEvent, dontCare = true)
314    difftest.coreid := io.hartId
315    difftest.index := 0.U
316    difftest.valid := refill_arb.io.out.valid
317    difftest.addr := refill_arb.io.out.bits.paddr
318    difftest.data := refill_arb.io.out.bits.data.asTypeOf(difftest.data)
319    difftest.idtfr := DontCare
320  }
321
322  (0 until nWays).map{ w =>
323    XSPerfAccumulate("line_0_refill_way_" + Integer.toString(w, 10),  entries(0).io.meta_write.valid && OHToUInt(entries(0).io.meta_write.bits.waymask)  === w.U)
324    XSPerfAccumulate("line_1_refill_way_" + Integer.toString(w, 10),  entries(1).io.meta_write.valid && OHToUInt(entries(1).io.meta_write.bits.waymask)  === w.U)
325  }
326
327}
328