xref: /XiangShan/src/main/scala/xiangshan/cache/mmu/L2TLBMissQueue.scala (revision bd5d9cb9148005ecb097169abbf96a15b7f24c17)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache.mmu
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import chisel3.internal.naming.chiselName
23import xiangshan._
24import xiangshan.cache.{HasDCacheParameters, MemoryOpConstants}
25import utils._
26import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp}
27import freechips.rocketchip.tilelink._
28import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle}
29
30/* Miss Queue dont care about duplicate req, which is done by PtwFilter
31 * PtwMissQueue is just a Queue inside Chisel with flush
32 */
33
34class L2TlbMQEntry(implicit p: Parameters) extends XSBundle with HasPtwConst {
35  val vpn = UInt(vpnLen.W)
36  val source = UInt(bSourceWidth.W)
37  val ppn = UInt(ppnLen.W)
38  val wait_id = UInt(log2Up(MSHRSize).W)
39  val af = Bool()
40}
41
42class L2TlbMQInBundle(implicit p: Parameters) extends XSBundle with HasPtwConst {
43  val vpn = Output(UInt(vpnLen.W))
44  val source = Output(UInt(bSourceWidth.W))
45  val l3 = Valid(Output(UInt(PAddrBits.W)))
46}
47
48class L2TlbMQCacheBundle(implicit p: Parameters) extends XSBundle with HasPtwConst {
49  val vpn = Output(UInt(vpnLen.W))
50  val source = Output(UInt(bSourceWidth.W))
51}
52
53class L2TlbMQIO(implicit p: Parameters) extends XSBundle with HasPtwConst {
54  val in = Flipped(Decoupled(new L2TlbMQInBundle()))
55  val sfence = Input(new SfenceBundle)
56  val cache = Decoupled(new L2TlbMQCacheBundle())
57  val fsm_done = Input(Bool())
58  val out = DecoupledIO(new Bundle {
59    val source = Output(UInt(bSourceWidth.W))
60    val id = Output(UInt(bMemID.W))
61    val vpn = Output(UInt(vpnLen.W))
62    val af = Output(Bool())
63  })
64  val mem = new Bundle {
65    val req = DecoupledIO(new L2TlbMemReqBundle())
66    val resp = Flipped(Valid(new Bundle {
67      val id = Output(UInt(log2Up(MSHRSize).W))
68    }))
69    val enq_ptr = Output(UInt(log2Ceil(MSHRSize).W))
70    val buffer_it = Output(Vec(MSHRSize, Bool()))
71    val refill = Output(new Bundle {
72      val vpn = UInt(vpnLen.W)
73      val source = UInt(bSourceWidth.W)
74    })
75    val req_mask = Input(Vec(MSHRSize, Bool()))
76  }
77  val pmp = new Bundle {
78    val req = Valid(new PMPReqBundle())
79    val resp = Flipped(new PMPRespBundle())
80  }
81}
82
83@chiselName
84class L2TlbMissQueue(implicit p: Parameters) extends XSModule with HasPtwConst {
85  require(MSHRSize >= (2 + l2tlbParams.filterSize))
86
87  val io = IO(new L2TlbMQIO())
88
89  val entries = Reg(Vec(MSHRSize, new L2TlbMQEntry()))
90  val state_idle :: state_cache_high :: state_cache_low :: state_addr_check :: state_mem_req :: state_mem_waiting :: state_mem_out :: Nil = Enum(7)
91  val state = RegInit(VecInit(Seq.fill(MSHRSize)(state_idle)))
92  val is_emptys = state.map(_ === state_idle)
93  val is_caches_high = state.map(_ === state_cache_high)
94  val is_caches_low = state.map(_ === state_cache_low)
95  val is_mems = state.map(_ === state_mem_req)
96  val is_waiting = state.map(_ === state_mem_waiting)
97  val is_having = state.map(_ === state_mem_out)
98
99  val full = !ParallelOR(is_emptys).asBool()
100  val enq_ptr = ParallelPriorityEncoder(is_emptys)
101  val cache_high_ptr = ParallelPriorityEncoder(is_caches_high)
102  val cache_low_ptr = ParallelPriorityEncoder(is_caches_low)
103
104  val cache_arb = Module(new RRArbiter(new L2TlbMQCacheBundle(), 2))
105  cache_arb.io.in(0).valid := Cat(is_caches_high).orR && io.fsm_done // fsm busy, required l1/l2 pte is not ready
106  cache_arb.io.in(0).bits.vpn := entries(cache_high_ptr).vpn
107  cache_arb.io.in(0).bits.source := entries(cache_high_ptr).source
108  cache_arb.io.in(1).valid := Cat(is_caches_low).orR
109  cache_arb.io.in(1).bits.vpn := entries(cache_low_ptr).vpn
110  cache_arb.io.in(1).bits.source := entries(cache_low_ptr).source
111  cache_arb.io.out.ready := io.cache.ready
112  val cache_ptr = Mux(cache_arb.io.chosen === 0.U, cache_high_ptr, cache_low_ptr)
113
114  val mem_ptr = ParallelPriorityEncoder(is_having)
115  val mem_arb = Module(new RRArbiter(new L2TlbMQEntry(), MSHRSize))
116  for (i <- 0 until MSHRSize) {
117    mem_arb.io.in(i).bits := entries(i)
118    mem_arb.io.in(i).valid := is_mems(i) && !io.mem.req_mask(i)
119  }
120
121  // duplicate req
122  // to_wait: wait for the last to access mem, set to mem_resp
123  // to_cache: the last is back just right now, set to mem_cache
124  def dup(vpn1: UInt, vpn2: UInt): Bool = {
125    dropL3SectorBits(vpn1) === dropL3SectorBits(vpn2)
126  }
127  val dup_vec = state.indices.map(i =>
128    dup(io.in.bits.vpn, entries(i).vpn)
129  )
130  val dup_req_fire = mem_arb.io.out.fire() && dup(io.in.bits.vpn, mem_arb.io.out.bits.vpn) // dup with the req fire entry
131  val dup_vec_wait = dup_vec.zip(is_waiting).map{case (d, w) => d && w} // dup with "mem_waiting" entres, sending mem req already
132  val dup_vec_having = dup_vec.zipWithIndex.map{case (d, i) => d && is_having(i)} // dup with the "mem_out" entry recv the data just now
133  val wait_id = Mux(dup_req_fire, mem_arb.io.chosen, ParallelMux(dup_vec_wait zip entries.map(_.wait_id)))
134  val dup_wait_resp = io.mem.resp.fire() && VecInit(dup_vec_wait)(io.mem.resp.bits.id) // dup with the entry that data coming next cycle
135  val to_wait = Cat(dup_vec_wait).orR || dup_req_fire
136  val to_mem_out = dup_wait_resp
137  val to_cache_low = Cat(dup_vec_having).orR
138  assert(RegNext(!(dup_req_fire && Cat(dup_vec_wait).orR), init = true.B), "mem req but some entries already waiting, should not happed")
139
140  val mem_resp_hit = RegInit(VecInit(Seq.fill(MSHRSize)(false.B)))
141  val enq_state = Mux(to_mem_out, state_mem_out, // same to the blew, but the mem resp now
142    Mux(to_cache_low, state_cache_low, // same to the below, but the mem resp last cycle
143    Mux(to_wait, state_mem_waiting, // wait for the prev mem resp
144    Mux(io.in.bits.l3.valid, state_addr_check, state_cache_high))))
145  when (io.in.fire()) {
146    // if prefetch req does not need mem access, just give it up.
147    // so there will be at most 1 + FilterSize entries that needs re-access page cache
148    // so 2 + FilterSize is enough to avoid dead-lock
149    state(enq_ptr) := Mux(from_pre(io.in.bits.source) && enq_state =/= state_addr_check, state_idle, enq_state)
150    entries(enq_ptr).vpn := io.in.bits.vpn
151    entries(enq_ptr).ppn := io.in.bits.l3.bits
152    entries(enq_ptr).source := io.in.bits.source
153    entries(enq_ptr).wait_id := Mux(to_wait, wait_id, enq_ptr)
154    entries(enq_ptr).af := false.B
155    mem_resp_hit(enq_ptr) := to_mem_out
156  }
157  when (mem_arb.io.out.fire()) {
158    for (i <- state.indices) {
159      when (state(i) =/= state_idle && dup(entries(i).vpn, mem_arb.io.out.bits.vpn)) {
160        // NOTE: "dup enq set state to mem_wait" -> "sending req set other dup entries to mem_wait"
161        state(i) := state_mem_waiting
162        entries(i).wait_id := mem_arb.io.chosen
163      }
164    }
165  }
166  when (io.mem.resp.fire()) {
167    state.indices.map{i =>
168      when (state(i) === state_mem_waiting && io.mem.resp.bits.id === entries(i).wait_id) {
169        state(i) := state_mem_out
170        mem_resp_hit(i) := true.B
171      }
172    }
173  }
174  when (io.out.fire()) {
175    assert(state(mem_ptr) === state_mem_out)
176    state(mem_ptr) := state_idle
177  }
178  when (io.cache.fire()) {
179    state(cache_ptr) := state_idle
180  }
181
182  mem_resp_hit.map(a => when (a) { a := false.B } )
183
184  val enq_ptr_reg = RegNext(enq_ptr)
185
186  io.pmp.req.valid := RegNext(enq_state === state_addr_check)
187  io.pmp.req.bits.addr := MakeAddr(entries(enq_ptr_reg).ppn, getVpnn(entries(enq_ptr_reg).vpn, 0))
188  io.pmp.req.bits.cmd := TlbCmd.read
189  io.pmp.req.bits.size := 3.U // TODO: fix it
190  val pmp_resp_valid = io.pmp.req.valid // same cycle
191  when (pmp_resp_valid && (state(enq_ptr_reg) === state_addr_check) &&
192    !(mem_arb.io.out.fire && dup(entries(enq_ptr_reg).vpn, mem_arb.io.out.bits.vpn))) {
193    // NOTE: when pmp resp but state is not addr check, then the entry is dup with other entry, the state was changed before
194    //       when dup with the req-ing entry, set to mem_waiting (above codes), and the ld must be false, so dontcare
195    entries(enq_ptr_reg).af := io.pmp.resp.ld
196    state(enq_ptr_reg) := Mux(io.pmp.resp.ld, state_mem_out, state_mem_req)
197  }
198
199  when (io.sfence.valid) {
200    state.map(_ := state_idle)
201  }
202
203  io.in.ready := !full
204  io.cache.valid := cache_arb.io.out.valid
205  io.cache.bits.vpn := cache_arb.io.out.bits.vpn
206  io.cache.bits.source := cache_arb.io.out.bits.source
207
208  io.out.valid := ParallelOR(is_having).asBool()
209  io.out.bits.source := entries(mem_ptr).source
210  io.out.bits.vpn := entries(mem_ptr).vpn
211  io.out.bits.id := mem_ptr
212  io.out.bits.af := entries(mem_ptr).af
213
214  io.mem.req.valid := mem_arb.io.out.valid
215  io.mem.req.bits.addr := MakeAddr(mem_arb.io.out.bits.ppn, getVpnn(mem_arb.io.out.bits.vpn, 0))
216  io.mem.req.bits.id := mem_arb.io.chosen
217  mem_arb.io.out.ready := io.mem.req.ready
218  io.mem.refill.vpn := entries(RegNext(io.mem.resp.bits.id(log2Up(MSHRSize)-1, 0))).vpn
219  io.mem.refill.source := entries(RegNext(io.mem.resp.bits.id(log2Up(MSHRSize)-1, 0))).source
220  io.mem.buffer_it := mem_resp_hit
221  io.mem.enq_ptr := enq_ptr
222
223  XSPerfAccumulate("mq_in_count", io.in.fire())
224  XSPerfAccumulate("mq_in_block", io.in.valid && !io.in.ready)
225  for (i <- 0 until 7) {
226    XSPerfAccumulate(s"enq_state${i}", io.in.fire() && enq_state === i.U)
227  }
228  for (i <- 0 until (MSHRSize + 1)) {
229    XSPerfAccumulate(s"util${i}", PopCount(is_emptys.map(!_)) === i.U)
230    XSPerfAccumulate(s"cache_high_util${i}", PopCount(is_caches_high) === i.U)
231    XSPerfAccumulate(s"cache_low_util${i}", PopCount(is_caches_low) === i.U)
232    XSPerfAccumulate(s"mem_util${i}", PopCount(is_mems) === i.U)
233    XSPerfAccumulate(s"waiting_util${i}", PopCount(is_waiting) === i.U)
234  }
235  XSPerfAccumulate("mem_count", io.mem.req.fire())
236  XSPerfAccumulate("mem_cycle", PopCount(is_waiting) =/= 0.U)
237  XSPerfAccumulate("blocked_in", io.in.valid && !io.in.ready)
238
239  for (i <- 0 until MSHRSize) {
240    TimeOutAssert(state(i) =/= state_idle, timeOutThreshold, s"missqueue time out no out ${i}")
241  }
242}
243