xref: /XiangShan/src/main/scala/xiangshan/cache/mmu/PageTableWalker.scala (revision eb163ef08fc5ac1da1f32d948699bd6de053e444)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache.mmu
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import chisel3.internal.naming.chiselName
23import xiangshan._
24import xiangshan.cache.{HasDCacheParameters, MemoryOpConstants}
25import utils._
26import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp}
27import freechips.rocketchip.tilelink._
28import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle}
29
30/** Page Table Walk is divided into two parts
31  * One,   PTW: page walk for pde, except for leaf entries, one by one
32  * Two, LLPTW: page walk for pte, only the leaf entries(4KB), in parallel
33  */
34
35
36/** PTW : page table walker
37  * a finite state machine
38  * only take 1GB and 2MB page walks
39  * or in other words, except the last level(leaf)
40  **/
41class PTWIO()(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst {
42  val req = Flipped(DecoupledIO(new Bundle {
43    val req_info = new L2TlbInnerBundle()
44    val l1Hit = Bool()
45    val ppn = UInt(ppnLen.W)
46  }))
47  val resp = DecoupledIO(new Bundle {
48    val source = UInt(bSourceWidth.W)
49    val resp = new PtwResp
50  })
51
52  val llptw = DecoupledIO(new LLPTWInBundle())
53  // NOTE: llptw change from "connect to llptw" to "connect to page cache"
54  // to avoid corner case that caused duplicate entries
55
56  val mem = new Bundle {
57    val req = DecoupledIO(new L2TlbMemReqBundle())
58    val resp = Flipped(ValidIO(UInt(XLEN.W)))
59    val mask = Input(Bool())
60  }
61  val pmp = new Bundle {
62    val req = ValidIO(new PMPReqBundle())
63    val resp = Flipped(new PMPRespBundle())
64  }
65
66  val refill = Output(new Bundle {
67    val req_info = new L2TlbInnerBundle()
68    val level = UInt(log2Up(Level).W)
69  })
70}
71
72@chiselName
73class PTW()(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents {
74  val io = IO(new PTWIO)
75
76  val sfence = io.sfence
77  val mem = io.mem
78  val satp = io.csr.satp
79  val flush = io.sfence.valid || io.csr.satp.changed
80
81  val s_idle :: s_addr_check :: s_mem_req :: s_mem_resp :: s_check_pte :: Nil = Enum(5)
82  val state = RegInit(s_idle)
83  val level = RegInit(0.U(log2Up(Level).W))
84  val af_level = RegInit(0.U(log2Up(Level).W)) // access fault return this level
85  val ppn = Reg(UInt(ppnLen.W))
86  val vpn = Reg(UInt(vpnLen.W))
87  val levelNext = level + 1.U
88  val l1Hit = Reg(Bool())
89  val memPte = mem.resp.bits.asTypeOf(new PteBundle().cloneType)
90  io.req.ready := state === s_idle
91
92  val finish = WireInit(false.B)
93  val sent_to_pmp = state === s_addr_check || (state === s_check_pte && !finish)
94  val accessFault = RegEnable(io.pmp.resp.ld || io.pmp.resp.mmio, sent_to_pmp)
95  val pageFault = memPte.isPf(level)
96  switch (state) {
97    is (s_idle) {
98      when (io.req.fire()) {
99        val req = io.req.bits
100        state := s_addr_check
101        level := Mux(req.l1Hit, 1.U, 0.U)
102        af_level := Mux(req.l1Hit, 1.U, 0.U)
103        ppn := Mux(req.l1Hit, io.req.bits.ppn, satp.ppn)
104        vpn := io.req.bits.req_info.vpn
105        l1Hit := req.l1Hit
106        accessFault := false.B
107      }
108    }
109
110    is (s_addr_check) {
111      state := s_mem_req
112    }
113
114    is (s_mem_req) {
115      when (mem.req.fire()) {
116        state := s_mem_resp
117      }
118      when (accessFault) {
119        state := s_check_pte
120      }
121    }
122
123    is (s_mem_resp) {
124      when(mem.resp.fire()) {
125        state := s_check_pte
126        af_level := af_level + 1.U
127      }
128    }
129
130    is (s_check_pte) {
131      when (io.resp.valid) { // find pte already or accessFault (mentioned below)
132        when (io.resp.fire()) {
133          state := s_idle
134        }
135        finish := true.B
136      }.elsewhen(io.llptw.valid) { // the next level is pte, go to miss queue
137        when (io.llptw.fire()) {
138          state := s_idle
139        }
140        finish := true.B
141      } otherwise { // go to next level, access the memory, need pmp check first
142        when (io.pmp.resp.ld) { // pmp check failed, raise access-fault
143          // do nothing, RegNext the pmp check result and do it later (mentioned above)
144        }.otherwise { // go to next level.
145          assert(level === 0.U)
146          level := levelNext
147          state := s_mem_req
148        }
149      }
150    }
151  }
152
153  when (sfence.valid) {
154    state := s_idle
155    accessFault := false.B
156  }
157
158  // memPte is valid when at s_check_pte. when mem.resp.fire, it's not ready.
159  val is_pte = memPte.isLeaf() || memPte.isPf(level)
160  val find_pte = is_pte
161  val to_find_pte = level === 1.U && !is_pte
162  val source = RegEnable(io.req.bits.req_info.source, io.req.fire())
163  io.resp.valid := state === s_check_pte && (find_pte || accessFault)
164  io.resp.bits.source := source
165  io.resp.bits.resp.apply(pageFault && !accessFault, accessFault, Mux(accessFault, af_level, level), memPte, vpn, satp.asid)
166
167  io.llptw.valid := state === s_check_pte && to_find_pte && !accessFault
168  io.llptw.bits.req_info.source := source
169  io.llptw.bits.req_info.vpn := vpn
170  io.llptw.bits.ppn := memPte.ppn
171
172  assert(level =/= 2.U || level =/= 3.U)
173
174  val l1addr = MakeAddr(satp.ppn, getVpnn(vpn, 2))
175  val l2addr = MakeAddr(Mux(l1Hit, ppn, memPte.ppn), getVpnn(vpn, 1))
176  val mem_addr = Mux(af_level === 0.U, l1addr, l2addr)
177  io.pmp.req.valid := DontCare // samecycle, do not use valid
178  io.pmp.req.bits.addr := mem_addr
179  io.pmp.req.bits.size := 3.U // TODO: fix it
180  io.pmp.req.bits.cmd := TlbCmd.read
181
182  mem.req.valid := state === s_mem_req && !io.mem.mask && !accessFault
183  mem.req.bits.addr := mem_addr
184  mem.req.bits.id := FsmReqID.U(bMemID.W)
185
186  io.refill.req_info.vpn := vpn
187  io.refill.level := level
188  io.refill.req_info.source := source
189
190  XSDebug(p"[ptw] state:${state} level:${level} notFound:${pageFault}\n")
191
192  // perf
193  XSPerfAccumulate("fsm_count", io.req.fire())
194  for (i <- 0 until PtwWidth) {
195    XSPerfAccumulate(s"fsm_count_source${i}", io.req.fire() && io.req.bits.req_info.source === i.U)
196  }
197  XSPerfAccumulate("fsm_busy", state =/= s_idle)
198  XSPerfAccumulate("fsm_idle", state === s_idle)
199  XSPerfAccumulate("resp_blocked", io.resp.valid && !io.resp.ready)
200  XSPerfAccumulate("mem_count", mem.req.fire())
201  XSPerfAccumulate("mem_cycle", BoolStopWatch(mem.req.fire, mem.resp.fire(), true))
202  XSPerfAccumulate("mem_blocked", mem.req.valid && !mem.req.ready)
203
204  TimeOutAssert(state =/= s_idle, timeOutThreshold, "page table walker time out")
205
206  val perfEvents = Seq(
207    ("fsm_count         ", io.req.fire()                                     ),
208    ("fsm_busy          ", state =/= s_idle                                  ),
209    ("fsm_idle          ", state === s_idle                                  ),
210    ("resp_blocked      ", io.resp.valid && !io.resp.ready                   ),
211    ("mem_count         ", mem.req.fire()                                    ),
212    ("mem_cycle         ", BoolStopWatch(mem.req.fire, mem.resp.fire(), true)),
213    ("mem_blocked       ", mem.req.valid && !mem.req.ready                   ),
214  )
215  generatePerfEvent()
216}
217
218/*========================= LLPTW ==============================*/
219
220/** LLPTW : Last Level Page Table Walker
221  * the page walker that only takes 4KB(last level) page walk.
222  **/
223
224class LLPTWInBundle(implicit p: Parameters) extends XSBundle with HasPtwConst {
225  val req_info = Output(new L2TlbInnerBundle())
226  val ppn = Output(UInt(PAddrBits.W))
227}
228
229class LLPTWIO(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst {
230  val in = Flipped(DecoupledIO(new LLPTWInBundle()))
231  val out = DecoupledIO(new Bundle {
232    val req_info = Output(new L2TlbInnerBundle())
233    val id = Output(UInt(bMemID.W))
234    val af = Output(Bool())
235  })
236  val mem = new Bundle {
237    val req = DecoupledIO(new L2TlbMemReqBundle())
238    val resp = Flipped(Valid(new Bundle {
239      val id = Output(UInt(log2Up(l2tlbParams.llptwsize).W))
240    }))
241    val enq_ptr = Output(UInt(log2Ceil(l2tlbParams.llptwsize).W))
242    val buffer_it = Output(Vec(l2tlbParams.llptwsize, Bool()))
243    val refill = Output(new L2TlbInnerBundle())
244    val req_mask = Input(Vec(l2tlbParams.llptwsize, Bool()))
245  }
246  val pmp = new Bundle {
247    val req = Valid(new PMPReqBundle())
248    val resp = Flipped(new PMPRespBundle())
249  }
250}
251
252class LLPTWEntry(implicit p: Parameters) extends XSBundle with HasPtwConst {
253  val req_info = new L2TlbInnerBundle()
254  val ppn = UInt(ppnLen.W)
255  val wait_id = UInt(log2Up(l2tlbParams.llptwsize).W)
256  val af = Bool()
257}
258
259
260@chiselName
261class LLPTW(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents {
262  val io = IO(new LLPTWIO())
263
264  val entries = Reg(Vec(l2tlbParams.llptwsize, new LLPTWEntry()))
265  val state_idle :: state_addr_check :: state_mem_req :: state_mem_waiting :: state_mem_out :: Nil = Enum(5)
266  val state = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(state_idle)))
267  val is_emptys = state.map(_ === state_idle)
268  val is_mems = state.map(_ === state_mem_req)
269  val is_waiting = state.map(_ === state_mem_waiting)
270  val is_having = state.map(_ === state_mem_out)
271
272  val full = !ParallelOR(is_emptys).asBool()
273  val enq_ptr = ParallelPriorityEncoder(is_emptys)
274
275  val mem_ptr = ParallelPriorityEncoder(is_having)
276  val mem_arb = Module(new RRArbiter(new LLPTWEntry(), l2tlbParams.llptwsize))
277  for (i <- 0 until l2tlbParams.llptwsize) {
278    mem_arb.io.in(i).bits := entries(i)
279    mem_arb.io.in(i).valid := is_mems(i) && !io.mem.req_mask(i)
280  }
281
282  // duplicate req
283  // to_wait: wait for the last to access mem, set to mem_resp
284  // to_cache: the last is back just right now, set to mem_cache
285  def dup(vpn1: UInt, vpn2: UInt): Bool = {
286    dropL3SectorBits(vpn1) === dropL3SectorBits(vpn2)
287  }
288  val dup_vec = state.indices.map(i =>
289    dup(io.in.bits.req_info.vpn, entries(i).req_info.vpn)
290  )
291  val dup_req_fire = mem_arb.io.out.fire() && dup(io.in.bits.req_info.vpn, mem_arb.io.out.bits.req_info.vpn) // dup with the req fire entry
292  val dup_vec_wait = dup_vec.zip(is_waiting).map{case (d, w) => d && w} // dup with "mem_waiting" entres, sending mem req already
293  val dup_vec_having = dup_vec.zipWithIndex.map{case (d, i) => d && is_having(i)} // dup with the "mem_out" entry recv the data just now
294  val wait_id = Mux(dup_req_fire, mem_arb.io.chosen, ParallelMux(dup_vec_wait zip entries.map(_.wait_id)))
295  val dup_wait_resp = io.mem.resp.fire() && VecInit(dup_vec_wait)(io.mem.resp.bits.id) // dup with the entry that data coming next cycle
296  val to_wait = Cat(dup_vec_wait).orR || dup_req_fire
297  val to_mem_out = dup_wait_resp
298  val to_cache_low = Cat(dup_vec_having).orR
299  assert(RegNext(!(dup_req_fire && Cat(dup_vec_wait).orR), init = true.B), "mem req but some entries already waiting, should not happed")
300
301  val mem_resp_hit = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(false.B)))
302  val enq_state = Mux(to_mem_out, state_mem_out, // same to the blew, but the mem resp now
303    Mux(to_wait, state_mem_waiting, state_addr_check))
304  when (io.in.fire()) {
305    // if prefetch req does not need mem access, just give it up.
306    // so there will be at most 1 + FilterSize entries that needs re-access page cache
307    // so 2 + FilterSize is enough to avoid dead-lock
308    state(enq_ptr) := Mux(from_pre(io.in.bits.req_info.source) && enq_state =/= state_addr_check, state_idle, enq_state)
309    entries(enq_ptr).req_info := io.in.bits.req_info
310    entries(enq_ptr).ppn := io.in.bits.ppn
311    entries(enq_ptr).wait_id := Mux(to_wait, wait_id, enq_ptr)
312    entries(enq_ptr).af := false.B
313    mem_resp_hit(enq_ptr) := to_mem_out
314  }
315  when (mem_arb.io.out.fire()) {
316    for (i <- state.indices) {
317      when (state(i) =/= state_idle && dup(entries(i).req_info.vpn, mem_arb.io.out.bits.req_info.vpn)) {
318        // NOTE: "dup enq set state to mem_wait" -> "sending req set other dup entries to mem_wait"
319        state(i) := state_mem_waiting
320        entries(i).wait_id := mem_arb.io.chosen
321      }
322    }
323  }
324  when (io.mem.resp.fire()) {
325    state.indices.map{i =>
326      when (state(i) === state_mem_waiting && io.mem.resp.bits.id === entries(i).wait_id) {
327        state(i) := state_mem_out
328        mem_resp_hit(i) := true.B
329      }
330    }
331  }
332  when (io.out.fire()) {
333    assert(state(mem_ptr) === state_mem_out)
334    state(mem_ptr) := state_idle
335  }
336  mem_resp_hit.map(a => when (a) { a := false.B } )
337
338  val enq_ptr_reg = RegNext(enq_ptr)
339
340  io.pmp.req.valid := RegNext(enq_state === state_addr_check)
341  io.pmp.req.bits.addr := MakeAddr(entries(enq_ptr_reg).ppn, getVpnn(entries(enq_ptr_reg).req_info.vpn, 0))
342  io.pmp.req.bits.cmd := TlbCmd.read
343  io.pmp.req.bits.size := 3.U // TODO: fix it
344  val pmp_resp_valid = io.pmp.req.valid // same cycle
345  when (pmp_resp_valid && (state(enq_ptr_reg) === state_addr_check) &&
346    !(mem_arb.io.out.fire && dup(entries(enq_ptr_reg).req_info.vpn, mem_arb.io.out.bits.req_info.vpn))) {
347    // NOTE: when pmp resp but state is not addr check, then the entry is dup with other entry, the state was changed before
348    //       when dup with the req-ing entry, set to mem_waiting (above codes), and the ld must be false, so dontcare
349    val accessFault = io.pmp.resp.ld || io.pmp.resp.mmio
350    entries(enq_ptr_reg).af := accessFault
351    state(enq_ptr_reg) := Mux(accessFault, state_mem_out, state_mem_req)
352  }
353
354  val flush = io.sfence.valid || io.csr.satp.changed
355  when (flush) {
356    state.map(_ := state_idle)
357  }
358
359  io.in.ready := !full
360
361  io.out.valid := ParallelOR(is_having).asBool()
362  io.out.bits.req_info := entries(mem_ptr).req_info
363  io.out.bits.id := mem_ptr
364  io.out.bits.af := entries(mem_ptr).af
365
366  io.mem.req.valid := mem_arb.io.out.valid && !flush
367  io.mem.req.bits.addr := MakeAddr(mem_arb.io.out.bits.ppn, getVpnn(mem_arb.io.out.bits.req_info.vpn, 0))
368  io.mem.req.bits.id := mem_arb.io.chosen
369  mem_arb.io.out.ready := io.mem.req.ready
370  io.mem.refill := entries(RegNext(io.mem.resp.bits.id(log2Up(l2tlbParams.llptwsize)-1, 0))).req_info
371  io.mem.buffer_it := mem_resp_hit
372  io.mem.enq_ptr := enq_ptr
373
374  XSPerfAccumulate("llptw_in_count", io.in.fire())
375  XSPerfAccumulate("llptw_in_block", io.in.valid && !io.in.ready)
376  for (i <- 0 until 7) {
377    XSPerfAccumulate(s"enq_state${i}", io.in.fire() && enq_state === i.U)
378  }
379  for (i <- 0 until (l2tlbParams.llptwsize + 1)) {
380    XSPerfAccumulate(s"util${i}", PopCount(is_emptys.map(!_)) === i.U)
381    XSPerfAccumulate(s"mem_util${i}", PopCount(is_mems) === i.U)
382    XSPerfAccumulate(s"waiting_util${i}", PopCount(is_waiting) === i.U)
383  }
384  XSPerfAccumulate("mem_count", io.mem.req.fire())
385  XSPerfAccumulate("mem_cycle", PopCount(is_waiting) =/= 0.U)
386  XSPerfAccumulate("blocked_in", io.in.valid && !io.in.ready)
387
388  for (i <- 0 until l2tlbParams.llptwsize) {
389    TimeOutAssert(state(i) =/= state_idle, timeOutThreshold, s"missqueue time out no out ${i}")
390  }
391
392  val perfEvents = Seq(
393    ("tlbllptw_incount           ", io.in.fire()               ),
394    ("tlbllptw_inblock           ", io.in.valid && !io.in.ready),
395    ("tlbllptw_memcount          ", io.mem.req.fire()          ),
396    ("tlbllptw_memcycle          ", PopCount(is_waiting)       ),
397  )
398  generatePerfEvent()
399}