xref: /XiangShan/src/main/scala/xiangshan/cache/mmu/PageTableWalker.scala (revision d0de7e4a4bcd4633260dda99dfedc2a5e543b8b4)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache.mmu
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import xiangshan._
23import xiangshan.cache.{HasDCacheParameters, MemoryOpConstants}
24import utils._
25import utility._
26import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp}
27import freechips.rocketchip.tilelink._
28import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle}
29
30/** Page Table Walk is divided into two parts
31  * One,   PTW: page walk for pde, except for leaf entries, one by one
32  * Two, LLPTW: page walk for pte, only the leaf entries(4KB), in parallel
33  */
34
35
36/** PTW : page table walker
37  * a finite state machine
38  * only take 1GB and 2MB page walks
39  * or in other words, except the last level(leaf)
40  **/
41class PTWIO()(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst {
42  val req = Flipped(DecoupledIO(new Bundle {
43    val req_info = new L2TlbInnerBundle()
44    val l1Hit = Bool()
45    val ppn = UInt(ppnLen.W)
46  }))
47  val resp = DecoupledIO(new Bundle {
48    val source = UInt(bSourceWidth.W)
49    val s2xlate = UInt(2.W) // 0 bit: has s2xlate, 1 bit: Only valid when 0 bit is 1. If 0, all stage; if 1, only stage 2
50    val resp = new PtwMergeResp
51    val h_resp = new HptwResp
52  })
53
54  val llptw = DecoupledIO(new LLPTWInBundle())
55  // NOTE: llptw change from "connect to llptw" to "connect to page cache"
56  // to avoid corner case that caused duplicate entries
57
58  val hptw = new Bundle {
59    val req = DecoupledIO(new Bundle {
60      val id = UInt(log2Up(l2tlbParams.llptwsize).W)
61      val gvpn = UInt(gvpnLen.W)
62    })
63    val resp = Flipped(Valid(new Bundle {
64      val h_resp = Output(new HptwResp)
65    }))
66  }
67  val mem = new Bundle {
68    val req = DecoupledIO(new L2TlbMemReqBundle())
69    val resp = Flipped(ValidIO(UInt(XLEN.W)))
70    val mask = Input(Bool())
71  }
72  val pmp = new Bundle {
73    val req = ValidIO(new PMPReqBundle())
74    val resp = Flipped(new PMPRespBundle())
75  }
76
77  val refill = Output(new Bundle {
78    val req_info = new L2TlbInnerBundle()
79    val level = UInt(log2Up(Level).W)
80  })
81}
82
83class PTW()(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents {
84  val io = IO(new PTWIO)
85  val sfence = io.sfence
86  val mem = io.mem
87  val req_s2xlate = Reg(UInt(2.W))
88  val enableS2xlate = RegInit(false.B)
89
90  val satp = Mux(enableS2xlate, io.csr.vsatp, io.csr.satp)
91  val hgatp = io.csr.hgatp
92  val flush = io.sfence.valid || satp.changed
93  val onlyS1xlate = satp.mode =/= 0.U && hgatp.mode === 0.U
94  val onlyS2xlate = satp.mode === 0.U && hgatp.mode =/= 0.U
95  val s2xlate = enableS2xlate && !onlyS1xlate
96
97  val level = RegInit(0.U(log2Up(Level).W))
98  val af_level = RegInit(0.U(log2Up(Level).W)) // access fault return this level
99  val ppn = Reg(UInt(ppnLen.W))
100  val vpn = Reg(UInt(vpnLen.W))
101  val gvpn = Reg(UInt(gvpnLen.W)) // for the cases: 1. satp == 0 and hgatp != 0 and exec hlv; 2. virtmode == 1, vsatp == 0 and hgatp != 0
102  val levelNext = level + 1.U
103  val l1Hit = Reg(Bool())
104  val pte = mem.resp.bits.asTypeOf(new PteBundle().cloneType)
105
106  // s/w register
107  val s_pmp_check = RegInit(true.B)
108  val s_mem_req = RegInit(true.B)
109  val s_llptw_req = RegInit(true.B)
110  val w_mem_resp = RegInit(true.B)
111  val s_hptw_req = RegInit(true.B)
112  val w_hptw_resp = RegInit(true.B)
113  val s_last_hptw_req = RegInit(true.B)
114  val w_last_hptw_resp = RegInit(true.B)
115  // for updating "level"
116  val mem_addr_update = RegInit(false.B)
117
118  val idle = RegInit(true.B)
119  val finish = WireInit(false.B)
120  val sent_to_pmp = idle === false.B && (s_pmp_check === false.B || mem_addr_update) && !finish
121
122  val pageFault = pte.isPf(level)
123  val accessFault = RegEnable(io.pmp.resp.ld || io.pmp.resp.mmio, sent_to_pmp)
124
125  val hptw_pageFault = RegInit(false.B)
126  val hptw_accessFault = RegInit(false.B)
127  val last_s2xlate = RegInit(false.B)
128
129  val ppn_af = pte.isAf()
130  val find_pte = pte.isLeaf() || ppn_af || pageFault
131  val to_find_pte = level === 1.U && find_pte === false.B
132  val source = RegEnable(io.req.bits.req_info.source, io.req.fire)
133
134  val l1addr = MakeAddr(satp.ppn, getVpnn(vpn, 2))
135  val l2addr = MakeAddr(Mux(l1Hit, ppn, pte.ppn), getVpnn(vpn, 1))
136  val mem_addr = Mux(af_level === 0.U, l1addr, l2addr)
137
138  val hptw_resp = io.hptw.resp.bits.h_resp
139  val gpaddr = Mux(onlyS2xlate, Cat(gvpn, 0.U(offLen.W)), mem_addr)
140  val hpaddr = Cat(hptw_resp.entry.ppn, 0.U(offLen.W))
141
142  io.req.ready := idle
143
144  io.resp.valid := idle === false.B && mem_addr_update && !last_s2xlate && ((w_mem_resp && find_pte) || (s_pmp_check && accessFault) || onlyS2xlate)
145  io.resp.bits.source := source
146  io.resp.bits.resp.apply(pageFault && !accessFault && !ppn_af, accessFault || ppn_af, Mux(accessFault, af_level,level), pte, vpn, satp.asid, hgatp.asid, vpn(sectortlbwidth - 1, 0), not_super = false)
147  io.resp.bits.h_resp := io.hptw.resp.bits.h_resp
148  io.resp.bits.s2xlate := s2xlate
149
150  io.llptw.valid := s_llptw_req === false.B && to_find_pte && !accessFault
151  io.llptw.bits.req_info.source := source
152  io.llptw.bits.req_info.vpn := vpn
153  io.llptw.bits.req_info.gvpn := pte.ppn
154  io.llptw.bits.req_info.s2xlate(0) := enableS2xlate
155  io.llptw.bits.req_info.s2xlate(1) := 0.U
156
157  io.pmp.req.valid := DontCare // samecycle, do not use valid
158  io.pmp.req.bits.addr := Mux(s2xlate, hpaddr, mem_addr)
159  io.pmp.req.bits.size := 3.U // TODO: fix it
160  io.pmp.req.bits.cmd := TlbCmd.read
161
162  mem.req.valid := s_mem_req === false.B && !mem.mask && !accessFault && s_pmp_check
163  mem.req.bits.addr := Mux(s2xlate, hpaddr, mem_addr)
164  mem.req.bits.id := FsmReqID.U(bMemID.W)
165
166  io.refill.req_info.vpn := vpn
167  io.refill.level := level
168  io.refill.req_info.source := source
169
170  io.hptw.req.valid := !s_hptw_req || !s_last_hptw_req
171  io.hptw.req.bits.id := FsmReqID.U(bMemID.W)
172  io.hptw.req.bits.gvpn := gvpn
173
174  io.hptw.req.valid := !s_hptw_req || !s_last_hptw_req
175  io.hptw.req.bits.id := FsmReqID.U(bMemID.W)
176  io.hptw.req.bits.gvpn := gvpn
177
178  when (io.req.fire){
179    val req = io.req.bits
180    level := Mux(req.l1Hit, 1.U, 0.U)
181    af_level := Mux(req.l1Hit, 1.U, 0.U)
182    ppn := Mux(req.l1Hit, io.req.bits.ppn, satp.ppn)
183    vpn := io.req.bits.req_info.vpn
184    gvpn := io.req.bits.req_info.gvpn
185    enableS2xlate := io.req.bits.req_info.s2xlate(0)
186    l1Hit := req.l1Hit
187    accessFault := false.B
188    s_pmp_check := false.B
189    idle := false.B
190    hptw_pageFault := false.B
191    s2xlate := io.req.bits.req_info.s2xlate
192    when((io.req.bits.req_info.s2xlate(0)) && hgatp.mode =/= 0.U){
193      last_s2xlate := true.B
194      s_hptw_req := false.B
195    }.otherwise {
196      s_pmp_check := false.B
197    }
198  }
199
200  when(io.hptw.req.fire() && s_hptw_req === false.B){
201    s_hptw_req := true.B
202    w_hptw_resp := false.B
203  }
204
205  when(io.hptw.resp.fire() && w_hptw_resp === false.B) {
206    hptw_pageFault := io.hptw.resp.bits.h_resp.gpf
207    hptw_accessFault := io.hptw.resp.bits.h_resp.gaf
208    w_hptw_resp := true.B
209    when(onlyS2xlate){
210      mem_addr_update := true.B
211      last_s2xlate := false.B
212    }.otherwise {
213      s_pmp_check := false.B
214    }
215  }
216
217  when(io.hptw.req.fire() && s_last_hptw_req === false.B) {
218    w_last_hptw_resp := false.B
219    s_last_hptw_req := true.B
220  }
221
222  when(io.hptw.resp.fire() && w_last_hptw_resp === false.B){
223    hptw_pageFault := io.hptw.resp.bits.h_resp.gpf
224    hptw_accessFault := io.hptw.resp.bits.h_resp.gaf
225    w_last_hptw_resp := true.B
226    mem_addr_update := true.B
227    last_s2xlate := false.B
228  }
229
230  when(sent_to_pmp && mem_addr_update === false.B){
231    s_mem_req := false.B
232    s_pmp_check := true.B
233  }
234
235  when(accessFault && idle === false.B){
236    s_pmp_check := true.B
237    s_mem_req := true.B
238    w_mem_resp := true.B
239    s_llptw_req := true.B
240    s_hptw_req := true.B
241    w_hptw_resp := true.B
242    s_last_hptw_req := true.B
243    w_last_hptw_resp := true.B
244    mem_addr_update := true.B
245    last_s2xlate := false.B
246  }
247
248  when (mem.req.fire){
249    s_mem_req := true.B
250    w_mem_resp := false.B
251  }
252
253  when(mem.resp.fire && w_mem_resp === false.B){
254    w_mem_resp := true.B
255    af_level := af_level + 1.U
256    s_llptw_req := false.B
257    mem_addr_update := true.B
258  }
259
260  when(mem_addr_update){
261    when(level === 0.U && !(find_pte || accessFault)){
262      level := levelNext
263      when(s2xlate){
264        s_hptw_req := false.B
265      }.otherwise{
266        s_mem_req := false.B
267      }
268      s_llptw_req := true.B
269      mem_addr_update := false.B
270    }.elsewhen(io.llptw.valid){
271      when(io.llptw.fire) {
272        idle := true.B
273        s_llptw_req := true.B
274        mem_addr_update := false.B
275        last_s2xlate := false.B
276      }
277      finish := true.B
278    }.elsewhen(s2xlate && last_s2xlate === true.B) {
279      s_last_hptw_req := false.B
280      mem_addr_update := false.B
281    }.elsewhen(io.resp.valid){
282      when(io.resp.fire) {
283        idle := true.B
284        s_llptw_req := true.B
285        mem_addr_update := false.B
286        accessFault := false.B
287      }
288      finish := true.B
289    }
290  }
291
292
293  when (sfence.valid) {
294    idle := true.B
295    s_pmp_check := true.B
296    s_mem_req := true.B
297    s_llptw_req := true.B
298    w_mem_resp := true.B
299    accessFault := false.B
300    mem_addr_update := false.B
301    s_hptw_req := true.B
302    w_hptw_resp := true.B
303    s_last_hptw_req := true.B
304    w_last_hptw_resp := true.B
305  }
306
307
308  XSDebug(p"[ptw] level:${level} notFound:${pageFault}\n")
309
310  // perf
311  XSPerfAccumulate("fsm_count", io.req.fire)
312  for (i <- 0 until PtwWidth) {
313    XSPerfAccumulate(s"fsm_count_source${i}", io.req.fire && io.req.bits.req_info.source === i.U)
314  }
315  XSPerfAccumulate("fsm_busy", !idle)
316  XSPerfAccumulate("fsm_idle", idle)
317  XSPerfAccumulate("resp_blocked", io.resp.valid && !io.resp.ready)
318  XSPerfAccumulate("ptw_ppn_af", io.resp.fire && ppn_af)
319  XSPerfAccumulate("mem_count", mem.req.fire)
320  XSPerfAccumulate("mem_cycle", BoolStopWatch(mem.req.fire, mem.resp.fire, true))
321  XSPerfAccumulate("mem_blocked", mem.req.valid && !mem.req.ready)
322
323  TimeOutAssert(!idle, timeOutThreshold, "page table walker time out")
324
325  val perfEvents = Seq(
326    ("fsm_count         ", io.req.fire                                     ),
327    ("fsm_busy          ", !idle                                             ),
328    ("fsm_idle          ", idle                                              ),
329    ("resp_blocked      ", io.resp.valid && !io.resp.ready                   ),
330    ("mem_count         ", mem.req.fire                                    ),
331    ("mem_cycle         ", BoolStopWatch(mem.req.fire, mem.resp.fire, true)),
332    ("mem_blocked       ", mem.req.valid && !mem.req.ready                   ),
333  )
334  generatePerfEvent()
335}
336
337/*========================= LLPTW ==============================*/
338
339/** LLPTW : Last Level Page Table Walker
340  * the page walker that only takes 4KB(last level) page walk.
341  **/
342
343class LLPTWInBundle(implicit p: Parameters) extends XSBundle with HasPtwConst {
344  val req_info = Output(new L2TlbInnerBundle())
345  val ppn = Output(if(HasHExtension) UInt(gvpnLen.W) else UInt(ppnLen.W))
346}
347
348class LLPTWIO(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst {
349  val in = Flipped(DecoupledIO(new LLPTWInBundle()))
350  val out = DecoupledIO(new Bundle {
351    val req_info = Output(new L2TlbInnerBundle())
352    val id = Output(UInt(bMemID.W))
353    val h_resp = Output(new HptwResp)
354    val af = Output(Bool())
355  })
356  val mem = new Bundle {
357    val req = DecoupledIO(new L2TlbMemReqBundle())
358    val resp = Flipped(Valid(new Bundle {
359      val id = Output(UInt(log2Up(l2tlbParams.llptwsize).W))
360    }))
361    val enq_ptr = Output(UInt(log2Ceil(l2tlbParams.llptwsize).W))
362    val buffer_it = Output(Vec(l2tlbParams.llptwsize, Bool()))
363    val refill = Output(new L2TlbInnerBundle())
364    val req_mask = Input(Vec(l2tlbParams.llptwsize, Bool()))
365  }
366  val cache = DecoupledIO(new L2TlbInnerBundle())
367  val pmp = new Bundle {
368    val req = Valid(new PMPReqBundle())
369    val resp = Flipped(new PMPRespBundle())
370  }
371  val hptw = new Bundle {
372    val req = DecoupledIO(new Bundle{
373      val id = UInt(log2Up(l2tlbParams.llptwsize).W)
374      val gvpn = UInt(gvpnLen.W)
375    })
376    val resp = Flipped(Valid(new Bundle {
377      val id = Output(UInt(log2Up(l2tlbParams.llptwsize).W))
378      val h_resp = Output(new HptwResp)
379    }))
380  }
381}
382
383class LLPTWEntry(implicit p: Parameters) extends XSBundle with HasPtwConst {
384  val req_info = new L2TlbInnerBundle()
385  val s2xlate = Bool()
386  val gvpn = UInt(gvpnLen.W) // the vpn of guest address translation
387  val ppn = UInt(ppnLen.W)
388  val wait_id = UInt(log2Up(l2tlbParams.llptwsize).W)
389  val af = Bool()
390  val gaf = Bool()
391  val gpf = Bool()
392}
393
394
395class LLPTW(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents {
396  val io = IO(new LLPTWIO())
397  val enableS2xlate = io.in.bits.req_info.s2xlate(0)
398  val satp = Mux(enableS2xlate, io.csr.vsatp, io.csr.satp)
399
400  val flush = io.sfence.valid || satp.changed
401  val entries = Reg(Vec(l2tlbParams.llptwsize, new LLPTWEntry()))
402  val state_idle :: state_hptw_req :: state_hptw_resp :: state_addr_check :: state_mem_req :: state_mem_waiting :: state_mem_out :: state_last_hptw_req :: state_last_hptw_resp :: state_cache :: Nil = Enum(10)
403  val state = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(state_idle)))
404
405  val is_emptys = state.map(_ === state_idle)
406  val is_mems = state.map(_ === state_mem_req)
407  val is_waiting = state.map(_ === state_mem_waiting)
408  val is_having = state.map(_ === state_mem_out)
409  val is_cache = state.map(_ === state_cache)
410  val is_hptw_req = state.map(_ === state_hptw_req)
411  val is_last_hptw_req = state.map(_ === state_last_hptw_req)
412
413  val full = !ParallelOR(is_emptys).asBool
414  val enq_ptr = ParallelPriorityEncoder(is_emptys)
415
416  val mem_ptr = ParallelPriorityEncoder(is_having) // TODO: optimize timing, bad: entries -> ptr -> entry
417  val mem_arb = Module(new RRArbiter(new LLPTWEntry(), l2tlbParams.llptwsize))
418  for (i <- 0 until l2tlbParams.llptwsize) {
419    mem_arb.io.in(i).bits := entries(i)
420    mem_arb.io.in(i).valid := is_mems(i) && !io.mem.req_mask(i)
421  }
422  val hyper_arb1 = Module(new RRArbiter(new LLPTWEntry(), l2tlbParams.llptwsize))
423  for (i <- 0 until l2tlbParams.llptwsize) {
424    hyper_arb1.io.in(i).bits := entries(i)
425    hyper_arb1.io.in(i).valid := is_hptw_req(i)
426  }
427  val hyper_arb2 = Module(new RRArbiter(new LLPTWEntry(), l2tlbParams.llptwsize))
428  for(i <- 0 until l2tlbParams.llptwsize) {
429    hyper_arb2.io.in(i).bits := entries(i)
430    hyper_arb2.io.in(i).valid := is_last_hptw_req(i)
431  }
432
433  val cache_ptr = ParallelMux(is_cache, (0 until l2tlbParams.llptwsize).map(_.U(log2Up(l2tlbParams.llptwsize).W)))
434
435  // duplicate req
436  // to_wait: wait for the last to access mem, set to mem_resp
437  // to_cache: the last is back just right now, set to mem_cache
438  val dup_vec = state.indices.map(i =>
439    dup(io.in.bits.req_info.vpn, entries(i).req_info.vpn) && io.in.bits.req_info.hyperinst === entries(i).req_info.hyperinst
440  )
441  val dup_req_fire = mem_arb.io.out.fire && dup(io.in.bits.req_info.vpn, mem_arb.io.out.bits.req_info.vpn) && io.in.bits.req_info.hyperinst === entries(i).req_info.hyperinst // dup with the req fire entry
442  val dup_vec_wait = dup_vec.zip(is_waiting).map{case (d, w) => d && w} // dup with "mem_waiting" entres, sending mem req already
443  val dup_vec_having = dup_vec.zipWithIndex.map{case (d, i) => d && is_having(i)} // dup with the "mem_out" entry recv the data just now
444  val wait_id = Mux(dup_req_fire, mem_arb.io.chosen, ParallelMux(dup_vec_wait zip entries.map(_.wait_id)))
445  val dup_wait_resp = io.mem.resp.fire && VecInit(dup_vec_wait)(io.mem.resp.bits.id) // dup with the entry that data coming next cycle
446  val to_wait = Cat(dup_vec_wait).orR || dup_req_fire
447  val to_mem_out = dup_wait_resp
448  val to_cache = Cat(dup_vec_having).orR
449  XSError(RegNext(dup_req_fire && Cat(dup_vec_wait).orR, init = false.B), "mem req but some entries already waiting, should not happed")
450
451  XSError(io.in.fire && ((to_mem_out && to_cache) || (to_wait && to_cache)), "llptw enq, to cache conflict with to mem")
452  val mem_resp_hit = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(false.B)))
453  val enq_state_normal = Mux(to_mem_out, state_mem_out, // same to the blew, but the mem resp now
454    Mux(to_wait, state_mem_waiting,
455    Mux(to_cache, state_cache, state_addr_check)))
456  val enq_state = Mux(from_pre(io.in.bits.req_info.source) && enq_state_normal =/= state_addr_check, state_idle, enq_state_normal)
457  when (io.in.fire) {
458    // if prefetch req does not need mem access, just give it up.
459    // so there will be at most 1 + FilterSize entries that needs re-access page cache
460    // so 2 + FilterSize is enough to avoid dead-lock
461    state(enq_ptr) := enq_state
462    entries(enq_ptr).req_info := io.in.bits.req_info
463    entries(enq_ptr).gvpn := io.in.bits.req_info.gvpn
464    entries(enq_ptr).ppn := io.in.bits.ppn
465    entries(enq_ptr).wait_id := Mux(to_wait, wait_id, enq_ptr)
466    entries(enq_ptr).af := false.B
467    entries(enq_ptr).gaf := false.B
468    entries(enq_ptr).gpf := false.B
469    entries(enq_ptr).s2xlate := enableS2xlate
470    mem_resp_hit(enq_ptr) := to_mem_out
471  }
472
473  val enq_ptr_reg = RegNext(enq_ptr)
474  val need_addr_check = RegNext(enq_state === state_addr_check && (io.in.fire() || io.hptw.resp.fire()) && !flush)
475
476  val gpaddr = MakeGAddr(io.in.bits.req_info.gvpn, getVpnn(io.in.bits.req_info.vpn, 0))
477  val hpaddr = Cat(io.in.bits.ppn, gpaddr(offLen-1, 0))
478
479  val addr = Mux(enableS2xlate, hpaddr, MakeAddr(io.in.bits.ppn, getVpnn(io.in.bits.req_info.vpn, 0)))
480
481  io.pmp.req.valid := need_addr_check
482  io.pmp.req.bits.addr := RegEnable(addr, io.in.fire)
483  io.pmp.req.bits.cmd := TlbCmd.read
484  io.pmp.req.bits.size := 3.U // TODO: fix it
485  val pmp_resp_valid = io.pmp.req.valid // same cycle
486  when (pmp_resp_valid) {
487    // NOTE: when pmp resp but state is not addr check, then the entry is dup with other entry, the state was changed before
488    //       when dup with the req-ing entry, set to mem_waiting (above codes), and the ld must be false, so dontcare
489    val accessFault = io.pmp.resp.ld || io.pmp.resp.mmio
490    entries(enq_ptr_reg).af := accessFault
491    state(enq_ptr_reg) := Mux(accessFault, state_mem_out, state_mem_req)
492  }
493
494  when (mem_arb.io.out.fire) {
495    for (i <- state.indices) {
496      when (state(i) =/= state_idle && dup(entries(i).req_info.vpn, mem_arb.io.out.bits.req_info.vpn)) {
497        // NOTE: "dup enq set state to mem_wait" -> "sending req set other dup entries to mem_wait"
498        state(i) := state_mem_waiting
499        entries(i).wait_id := mem_arb.io.chosen
500      }
501    }
502  }
503  when (io.mem.resp.fire) {
504    state.indices.map{i =>
505      when (state(i) === state_mem_waiting && io.mem.resp.bits.id === entries(i).wait_id) {
506        state(i) := Mux(entries(i).s2xlate, state_last_hptw_req, state_mem_out)
507        mem_resp_hit(i) := true.B
508      }
509    }
510  }
511
512  when (hyper_arb1.io.out.fire()) {
513    for (i <- state.indices) {
514      when (state(i) === state_hptw_req && entries(i).ppn === hyper_arb1.io.out.bits.ppn && entries(i).s2xlate) {
515        state(i) := state_hptw_resp
516        entries(i).wait_id := hyper_arb1.io.chosen
517      }
518    }
519  }
520
521  when (hyper_arb2.io.out.fire()) {
522    for (i <- state.indices) {
523      when (state(i) === state_last_hptw_req && entries(i).ppn === hyper_arb2.io.out.bits.ppn && entries(i).s2xlate) {
524        state(i) := state_last_hptw_resp
525        entries(i).wait_id := hyper_arb2.io.chosen
526      }
527    }
528  }
529
530  when (io.hptw.resp.fire()) {
531    for (i <- state.indices) {
532      when (state(i) === state_hptw_resp && io.hptw.resp.bits.id === entries(i).wait_id) {
533        state(i) := state_addr_check
534        entries(i).gpf := io.hptw.resp.bits.h_resp.gpf
535        entries(i).gaf := io.hptw.resp.bits.h_resp.gaf
536      }
537      when (state(i) === state_last_hptw_resp && io.hptw.resp.bits.id === entries(i).wait_id) {
538        state(i) := state_mem_out
539        entries(i).gpf := io.hptw.resp.bits.h_resp.gpf
540        entries(i).gaf := io.hptw.resp.bits.h_resp.gaf
541      }
542    }
543  }
544
545  when (io.out.fire) {
546    assert(state(mem_ptr) === state_mem_out)
547    state(mem_ptr) := state_idle
548  }
549  mem_resp_hit.map(a => when (a) { a := false.B } )
550
551  when (io.cache.fire) {
552    state(cache_ptr) := state_idle
553  }
554  XSError(io.out.fire && io.cache.fire && (mem_ptr === cache_ptr), "mem resp and cache fire at the same time at same entry")
555
556  when (flush) {
557    state.map(_ := state_idle)
558  }
559
560  io.in.ready := !full
561
562  io.out.valid := ParallelOR(is_having).asBool
563  io.out.bits.req_info := entries(mem_ptr).req_info
564  io.out.bits.id := mem_ptr
565  io.out.bits.af := entries(mem_ptr).af
566  io.out.bits.h_resp := io.hptw.resp.bits.h_resp
567
568  io.hptw.req.valid := (hyper_arb1.io.out.valid || hyper_arb2.io.out.valid) && !flush
569  io.hptw.req.bits.gvpn := Mux(hyper_arb1.io.out.valid, hyper_arb1.io.out.bits.gvpn, hyper_arb2.io.out.bits.gvpn)
570  io.hptw.req.bits.id := Mux(hyper_arb1.io.out.valid, hyper_arb1.io.chosen, hyper_arb2.io.chosen)
571  hyper_arb1.io.out.ready := io.hptw.req.ready
572  hyper_arb2.io.out.ready := io.hptw.req.ready
573
574  io.mem.req.valid := mem_arb.io.out.valid && !flush
575  io.mem.req.bits.addr := MakeAddr(mem_arb.io.out.bits.ppn, getVpnn(mem_arb.io.out.bits.req_info.vpn, 0))
576  io.mem.req.bits.id := mem_arb.io.chosen
577  mem_arb.io.out.ready := io.mem.req.ready
578  io.mem.refill := entries(RegNext(io.mem.resp.bits.id(log2Up(l2tlbParams.llptwsize)-1, 0))).req_info
579  io.mem.buffer_it := mem_resp_hit
580  io.mem.enq_ptr := enq_ptr
581
582  io.cache.valid := Cat(is_cache).orR
583  io.cache.bits := ParallelMux(is_cache, entries.map(_.req_info))
584
585  XSPerfAccumulate("llptw_in_count", io.in.fire)
586  XSPerfAccumulate("llptw_in_block", io.in.valid && !io.in.ready)
587  for (i <- 0 until 7) {
588    XSPerfAccumulate(s"enq_state${i}", io.in.fire && enq_state === i.U)
589  }
590  for (i <- 0 until (l2tlbParams.llptwsize + 1)) {
591    XSPerfAccumulate(s"util${i}", PopCount(is_emptys.map(!_)) === i.U)
592    XSPerfAccumulate(s"mem_util${i}", PopCount(is_mems) === i.U)
593    XSPerfAccumulate(s"waiting_util${i}", PopCount(is_waiting) === i.U)
594  }
595  XSPerfAccumulate("mem_count", io.mem.req.fire)
596  XSPerfAccumulate("mem_cycle", PopCount(is_waiting) =/= 0.U)
597  XSPerfAccumulate("blocked_in", io.in.valid && !io.in.ready)
598
599  for (i <- 0 until l2tlbParams.llptwsize) {
600    TimeOutAssert(state(i) =/= state_idle, timeOutThreshold, s"missqueue time out no out ${i}")
601  }
602
603  val perfEvents = Seq(
604    ("tlbllptw_incount           ", io.in.fire               ),
605    ("tlbllptw_inblock           ", io.in.valid && !io.in.ready),
606    ("tlbllptw_memcount          ", io.mem.req.fire          ),
607    ("tlbllptw_memcycle          ", PopCount(is_waiting)       ),
608  )
609  generatePerfEvent()
610}
611
612/*========================= HPTW ==============================*/
613
614/** HPTW : Hypervisor Page Table Walker
615  * the page walker take the virtual machine's page walk.
616  * guest physical address translation, guest physical address -> host physical address
617  **/
618class HPTWIO()(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst {
619  val req = Flipped(DecoupledIO(new Bundle {
620    val id = UInt(log2Up(l2tlbParams.llptwsize).W)
621    val gvpn = UInt(gvpnLen.W)
622    val l1Hit = Bool()
623    val l2Hit = Bool()
624    val ppn = UInt(ppnLen.W)
625  }))
626  val resp = Valid(new Bundle {
627    val resp = Output(new HptwResp())
628    val id = Output(UInt(bMemID.W))
629  })
630
631  val mem = new Bundle {
632    val req = DecoupledIO(new L2TlbMemReqBundle())
633    val resp = Flipped(ValidIO(UInt(XLEN.W)))
634    val mask = Input(Bool())
635  }
636  val refill = Output(new Bundle {
637    val req_info = new L2TlbInnerBundle()
638    val level = UInt(log2Up(Level).W)
639  })
640  val pmp = new Bundle {
641    val req = ValidIO(new PMPReqBundle())
642    val resp = Flipped(new PMPRespBundle())
643  }
644}
645
646@chiselName
647class HPTW()(implicit p: Parameters) extends XSModule with HasPtwConst {
648  val io = IO(new HPTWIO)
649  val hgatp = io.csr.hgatp
650  val sfence = io.sfence
651  val flush = sfence.valid || hgatp.changed
652
653  val level = RegInit(0.U(log2Up(Level).W))
654  val gpaddr = Reg(UInt(GPAddrBits.W))
655  val vpn = gpaddr(GPAddrBits-1, offLen)
656  val levelNext = level + 1.U
657  val l1Hit = Reg(Bool())
658  val l2Hit = Reg(Bool())
659  val ppn = Reg(UInt(ppnLen.W))
660  val pg_base = MakeAddr(hgatp.ppn, getGVpnn(vpn, 2.U))
661//  val pte = io.mem.resp.bits.MergeRespToPte()
662  val pte = io.mem.resp.bits.asTypeOf(new PteBundle().cloneType)
663  val p_pte = MakeAddr(ppn, getVpnn(vpn, 2.U - level))
664  val mem_addr = Mux(level === 0.U, pg_base, p_pte)
665
666  //s/w register
667  val s_pmp_check = RegInit(true.B)
668  val s_mem_req = RegInit(true.B)
669  val w_mem_resp = RegInit(true.B)
670  val mem_addr_update = RegInit(true.B)
671  val idle = RegInit(true.B)
672  val finish = WireInit(false.B)
673
674  val sent_to_pmp = !idle && (!s_pmp_check || mem_addr_update) && !finish
675  val pageFault = pte.isPf(level)
676  val accessFault = RegEnable(io.pmp.resp.ld || io.pmp.resp.mmio, sent_to_pmp)
677
678  val ppn_af = pte.isAf()
679  val find_pte = pte.isLeaf() || ppn_af || pageFault
680
681  val resp_valid = !idle && mem_addr_update && ((w_mem_resp && find_pte) || (s_pmp_check && accessFault))
682  val id = Reg(UInt(log2Up(l2tlbParams.llptwsize).W))
683  io.req.ready := idle
684  val resp = new HptwResp()
685  resp.apply(pageFault && !accessFault && !ppn_af, accessFault || ppn_af, level, pte, vpn, hgatp.asid)
686  io.resp.valid := resp_valid
687  io.resp.bits.id := id
688  io.resp.bits.resp := resp
689
690  io.pmp.req.valid := DontCare
691  io.pmp.req.bits.addr := mem_addr
692  io.pmp.req.bits.size := 3.U
693  io.pmp.req.bits.cmd := TlbCmd.read
694
695  io.mem.req.valid := !s_mem_req && !io.mem.mask && !accessFault && s_pmp_check
696  io.mem.req.bits.addr := mem_addr
697  io.mem.req.bits.id := HptwReqId.U(bMemID.W)
698
699  io.refill.req_info.gvpn := vpn
700  io.refill.level := level
701  when (idle){
702    when(io.req.fire()){
703      level := Mux(io.req.bits.l2Hit, 2.U, Mux(io.req.bits.l1Hit, 1.U, 0.U))
704      idle := false.B
705      gpaddr := Cat(io.req.bits.gvpn, 0.U(offLen.W))
706      accessFault := false.B
707      s_pmp_check := false.B
708      id := io.req.bits.id
709      l1Hit := io.req.bits.l1Hit
710      l2Hit := io.req.bits.l2Hit
711      ppn := io.req.bits.ppn
712    }
713  }
714
715  when(sent_to_pmp && !mem_addr_update){
716    s_mem_req := false.B
717    s_pmp_check := true.B
718  }
719
720  when(accessFault && !idle){
721    s_pmp_check := true.B
722    s_mem_req := true.B
723    w_mem_resp := true.B
724    mem_addr_update := true.B
725  }
726
727  when(io.mem.req.fire()){
728    s_mem_req := true.B
729    w_mem_resp := false.B
730  }
731
732  when(io.mem.resp.fire() && !w_mem_resp){
733    ppn := pte.ppn
734    w_mem_resp := true.B
735    mem_addr_update := true.B
736  }
737
738  when(mem_addr_update){
739    when(!(find_pte || accessFault)){
740      level := levelNext
741      s_mem_req := false.B
742      mem_addr_update := false.B
743    }.elsewhen(resp_valid){
744      when(io.resp.fire()){
745        idle := true.B
746        mem_addr_update := false.B
747        accessFault := false.B
748      }
749      finish := true.B
750    }
751  }
752}