xref: /XiangShan/src/main/scala/xiangshan/cache/mmu/PageTableWalker.scala (revision 57bb43b5f11c3f1e89ac52f232fe73056b35d9bd)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache.mmu
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import chisel3.internal.naming.chiselName
23import xiangshan._
24import xiangshan.cache.{HasDCacheParameters, MemoryOpConstants}
25import utils._
26import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp}
27import freechips.rocketchip.tilelink._
28import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle}
29
30/* ptw finite state machine, the actual page table walker
31 */
32class PtwFsmIO()(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst {
33  val req = Flipped(DecoupledIO(new Bundle {
34    val req_info = new L2TlbInnerBundle()
35    val l1Hit = Bool()
36    val ppn = UInt(ppnLen.W)
37  }))
38  val resp = DecoupledIO(new Bundle {
39    val source = UInt(bSourceWidth.W)
40    val resp = new PtwResp
41  })
42
43  val mq = DecoupledIO(new L2TlbMQInBundle())
44
45  val mem = new Bundle {
46    val req = DecoupledIO(new L2TlbMemReqBundle())
47    val resp = Flipped(ValidIO(UInt(XLEN.W)))
48    val mask = Input(Bool())
49  }
50  val pmp = new Bundle {
51    val req = ValidIO(new PMPReqBundle())
52    val resp = Flipped(new PMPRespBundle())
53  }
54
55  val refill = Output(new Bundle {
56    val req_info = new L2TlbInnerBundle()
57    val level = UInt(log2Up(Level).W)
58  })
59}
60
61@chiselName
62class PtwFsm()(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents {
63  val io = IO(new PtwFsmIO)
64
65  val sfence = io.sfence
66  val mem = io.mem
67  val satp = io.csr.satp
68  val flush = io.sfence.valid || io.csr.satp.changed
69
70  val s_idle :: s_addr_check :: s_mem_req :: s_mem_resp :: s_check_pte :: Nil = Enum(5)
71  val state = RegInit(s_idle)
72  val level = RegInit(0.U(log2Up(Level).W))
73  val af_level = RegInit(0.U(log2Up(Level).W)) // access fault return this level
74  val ppn = Reg(UInt(ppnLen.W))
75  val vpn = Reg(UInt(vpnLen.W))
76  val levelNext = level + 1.U
77  val l1Hit = Reg(Bool())
78  val memPte = mem.resp.bits.asTypeOf(new PteBundle().cloneType)
79  io.req.ready := state === s_idle
80
81  val finish = WireInit(false.B)
82  val sent_to_pmp = state === s_addr_check || (state === s_check_pte && !finish)
83  val accessFault = RegEnable(io.pmp.resp.ld || io.pmp.resp.mmio, sent_to_pmp)
84  val pageFault = memPte.isPf(level)
85  switch (state) {
86    is (s_idle) {
87      when (io.req.fire()) {
88        val req = io.req.bits
89        state := s_addr_check
90        level := Mux(req.l1Hit, 1.U, 0.U)
91        af_level := Mux(req.l1Hit, 1.U, 0.U)
92        ppn := Mux(req.l1Hit, io.req.bits.ppn, satp.ppn)
93        vpn := io.req.bits.req_info.vpn
94        l1Hit := req.l1Hit
95        accessFault := false.B
96      }
97    }
98
99    is (s_addr_check) {
100      state := s_mem_req
101    }
102
103    is (s_mem_req) {
104      when (mem.req.fire()) {
105        state := s_mem_resp
106      }
107      when (accessFault) {
108        state := s_check_pte
109      }
110    }
111
112    is (s_mem_resp) {
113      when(mem.resp.fire()) {
114        state := s_check_pte
115        af_level := af_level + 1.U
116      }
117    }
118
119    is (s_check_pte) {
120      when (io.resp.valid) { // find pte already or accessFault (mentioned below)
121        when (io.resp.fire()) {
122          state := s_idle
123        }
124        finish := true.B
125      }.elsewhen(io.mq.valid) { // the next level is pte, go to miss queue
126        when (io.mq.fire()) {
127          state := s_idle
128        }
129        finish := true.B
130      } otherwise { // go to next level, access the memory, need pmp check first
131        when (io.pmp.resp.ld) { // pmp check failed, raise access-fault
132          // do nothing, RegNext the pmp check result and do it later (mentioned above)
133        }.otherwise { // go to next level.
134          assert(level === 0.U)
135          level := levelNext
136          state := s_mem_req
137        }
138      }
139    }
140  }
141
142  when (sfence.valid) {
143    state := s_idle
144    accessFault := false.B
145  }
146
147  // memPte is valid when at s_check_pte. when mem.resp.fire, it's not ready.
148  val is_pte = memPte.isLeaf() || memPte.isPf(level)
149  val find_pte = is_pte
150  val to_find_pte = level === 1.U && !is_pte
151  val source = RegEnable(io.req.bits.req_info.source, io.req.fire())
152  io.resp.valid := state === s_check_pte && (find_pte || accessFault)
153  io.resp.bits.source := source
154  io.resp.bits.resp.apply(pageFault && !accessFault, accessFault, Mux(accessFault, af_level, level), memPte, vpn, satp.asid)
155
156  io.mq.valid := state === s_check_pte && to_find_pte && !accessFault
157  io.mq.bits.req_info.source := source
158  io.mq.bits.req_info.vpn := vpn
159  io.mq.bits.l3.valid := true.B
160  io.mq.bits.l3.bits := memPte.ppn
161
162  assert(level =/= 2.U || level =/= 3.U)
163
164  val l1addr = MakeAddr(satp.ppn, getVpnn(vpn, 2))
165  val l2addr = MakeAddr(Mux(l1Hit, ppn, memPte.ppn), getVpnn(vpn, 1))
166  val mem_addr = Mux(af_level === 0.U, l1addr, l2addr)
167  io.pmp.req.valid := DontCare // samecycle, do not use valid
168  io.pmp.req.bits.addr := mem_addr
169  io.pmp.req.bits.size := 3.U // TODO: fix it
170  io.pmp.req.bits.cmd := TlbCmd.read
171
172  mem.req.valid := state === s_mem_req && !io.mem.mask && !accessFault
173  mem.req.bits.addr := mem_addr
174  mem.req.bits.id := FsmReqID.U(bMemID.W)
175
176  io.refill.req_info.vpn := vpn
177  io.refill.level := level
178  io.refill.req_info.source := source
179
180  XSDebug(p"[fsm] state:${state} level:${level} notFound:${pageFault}\n")
181
182  // perf
183  XSPerfAccumulate("fsm_count", io.req.fire())
184  for (i <- 0 until PtwWidth) {
185    XSPerfAccumulate(s"fsm_count_source${i}", io.req.fire() && io.req.bits.req_info.source === i.U)
186  }
187  XSPerfAccumulate("fsm_busy", state =/= s_idle)
188  XSPerfAccumulate("fsm_idle", state === s_idle)
189  XSPerfAccumulate("resp_blocked", io.resp.valid && !io.resp.ready)
190  XSPerfAccumulate("mem_count", mem.req.fire())
191  XSPerfAccumulate("mem_cycle", BoolStopWatch(mem.req.fire, mem.resp.fire(), true))
192  XSPerfAccumulate("mem_blocked", mem.req.valid && !mem.req.ready)
193
194  TimeOutAssert(state =/= s_idle, timeOutThreshold, "page table walker time out")
195
196  val perfEvents = Seq(
197    ("fsm_count         ", io.req.fire()                                     ),
198    ("fsm_busy          ", state =/= s_idle                                  ),
199    ("fsm_idle          ", state === s_idle                                  ),
200    ("resp_blocked      ", io.resp.valid && !io.resp.ready                   ),
201    ("mem_count         ", mem.req.fire()                                    ),
202    ("mem_cycle         ", BoolStopWatch(mem.req.fire, mem.resp.fire(), true)),
203    ("mem_blocked       ", mem.req.valid && !mem.req.ready                   ),
204  )
205  generatePerfEvent()
206}
207