xref: /XiangShan/src/main/scala/xiangshan/cache/mmu/PageTableWalker.scala (revision c9ae2b1404646566e3bf6646cb044fe7199962d0)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache.mmu
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import xiangshan._
23import xiangshan.cache.{HasDCacheParameters, MemoryOpConstants}
24import utils._
25import utility._
26import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp}
27import freechips.rocketchip.tilelink._
28import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle}
29
30/** Page Table Walk is divided into two parts
31  * One,   PTW: page walk for pde, except for leaf entries, one by one
32  * Two, LLPTW: page walk for pte, only the leaf entries(4KB), in parallel
33  */
34
35
36/** PTW : page table walker
37  * a finite state machine
38  * only take 1GB and 2MB page walks
39  * or in other words, except the last level(leaf)
40  **/
41class PTWIO()(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst {
42  val req = Flipped(DecoupledIO(new Bundle {
43    val req_info = new L2TlbInnerBundle()
44    val l1Hit = Bool()
45    val ppn = UInt(gvpnLen.W)
46    val stage1Hit = Bool()
47    val stage1 = new PtwMergeResp
48  }))
49  val resp = DecoupledIO(new Bundle {
50    val source = UInt(bSourceWidth.W)
51    val s2xlate = UInt(2.W)
52    val resp = new PtwMergeResp
53    val h_resp = new HptwResp
54  })
55
56  val llptw = DecoupledIO(new LLPTWInBundle())
57  // NOTE: llptw change from "connect to llptw" to "connect to page cache"
58  // to avoid corner case that caused duplicate entries
59
60  val hptw = new Bundle {
61    val req = DecoupledIO(new Bundle {
62      val source = UInt(bSourceWidth.W)
63      val id = UInt(log2Up(l2tlbParams.llptwsize).W)
64      val gvpn = UInt(vpnLen.W)
65    })
66    val resp = Flipped(Valid(new Bundle {
67      val h_resp = Output(new HptwResp)
68    }))
69  }
70  val mem = new Bundle {
71    val req = DecoupledIO(new L2TlbMemReqBundle())
72    val resp = Flipped(ValidIO(UInt(XLEN.W)))
73    val mask = Input(Bool())
74  }
75  val pmp = new Bundle {
76    val req = ValidIO(new PMPReqBundle())
77    val resp = Flipped(new PMPRespBundle())
78  }
79
80  val refill = Output(new Bundle {
81    val req_info = new L2TlbInnerBundle()
82    val level = UInt(log2Up(Level).W)
83  })
84}
85
86class PTW()(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents {
87  val io = IO(new PTWIO)
88  val sfence = io.sfence
89  val mem = io.mem
90  val req_s2xlate = Reg(UInt(2.W))
91  val enableS2xlate = req_s2xlate =/= noS2xlate
92  val onlyS1xlate = req_s2xlate === onlyStage1
93  val onlyS2xlate = req_s2xlate === onlyStage2
94
95  val satp = Mux(enableS2xlate, io.csr.vsatp, io.csr.satp)
96  val hgatp = io.csr.hgatp
97  val flush = io.sfence.valid || io.csr.satp.changed || io.csr.vsatp.changed || io.csr.hgatp.changed
98  val s2xlate = enableS2xlate && !onlyS1xlate
99  val level = RegInit(0.U(log2Up(Level).W))
100  val af_level = RegInit(0.U(log2Up(Level).W)) // access fault return this level
101  val ppn = Reg(UInt(gvpnLen.W))
102  val vpn = Reg(UInt(vpnLen.W)) // vpn or gvpn(onlyS2xlate)
103  val levelNext = level + 1.U
104  val l1Hit = Reg(Bool())
105  val pte = mem.resp.bits.asTypeOf(new PteBundle().cloneType)
106
107  // s/w register
108  val s_pmp_check = RegInit(true.B)
109  val s_mem_req = RegInit(true.B)
110  val s_llptw_req = RegInit(true.B)
111  val w_mem_resp = RegInit(true.B)
112  val s_hptw_req = RegInit(true.B)
113  val w_hptw_resp = RegInit(true.B)
114  val s_last_hptw_req = RegInit(true.B)
115  val w_last_hptw_resp = RegInit(true.B)
116  // for updating "level"
117  val mem_addr_update = RegInit(false.B)
118
119  val idle = RegInit(true.B)
120  val finish = WireInit(false.B)
121  val sent_to_pmp = idle === false.B && (s_pmp_check === false.B || mem_addr_update) && !finish
122
123  val pageFault = pte.isPf(level)
124  val accessFault = RegEnable(io.pmp.resp.ld || io.pmp.resp.mmio, sent_to_pmp)
125
126  val hptw_pageFault = RegInit(false.B)
127  val hptw_accessFault = RegInit(false.B)
128  val last_s2xlate = RegInit(false.B)
129  val stage1Hit = RegEnable(io.req.bits.stage1Hit, io.req.fire)
130  val stage1 = RegEnable(io.req.bits.stage1, io.req.fire)
131  val hptw_resp_stage2 = Reg(Bool())
132
133  val ppn_af = Mux(s2xlate, pte.isStage1Af(), pte.isAf()) // In two-stage address translation, stage 1 ppn is a vpn for host, so don't need to check ppn_high
134  val guest_fault = hptw_pageFault || hptw_accessFault
135  val find_pte = pte.isLeaf() || ppn_af || pageFault
136  val to_find_pte = level === 1.U && find_pte === false.B
137  val source = RegEnable(io.req.bits.req_info.source, io.req.fire)
138
139  val l1addr = MakeAddr(satp.ppn, getVpnn(vpn, 2))
140  val l2addr = MakeAddr(Mux(l1Hit, ppn, pte.getPPN()), getVpnn(vpn, 1))
141  val mem_addr = Mux(af_level === 0.U, l1addr, l2addr)
142
143  val hptw_resp = RegEnable(io.hptw.resp.bits.h_resp, io.hptw.resp.fire)
144  val gpaddr = MuxCase(mem_addr, Seq(
145    stage1Hit -> Cat(stage1.genPPN(), 0.U(offLen.W)),
146    onlyS2xlate -> Cat(vpn, 0.U(offLen.W)),
147    !s_last_hptw_req -> Cat(MuxLookup(level, pte.getPPN())(Seq(
148      0.U -> Cat(pte.getPPN()(gvpnLen - 1, vpnnLen * 2), vpn(vpnnLen * 2 - 1, 0)),
149      1.U -> Cat(pte.getPPN()(gvpnLen - 1, vpnnLen), vpn(vpnnLen - 1, 0)
150    ))),
151    0.U(offLen.W))
152  ))
153  val hpaddr = Cat(hptw_resp.genPPNS2(get_pn(gpaddr)), get_off(gpaddr))
154
155  io.req.ready := idle
156  val pte_valid = RegInit(false.B) // avoid the x states
157  val fake_pte = 0.U.asTypeOf(pte)
158  fake_pte.perm.v := true.B
159  fake_pte.perm.r := true.B
160  fake_pte.perm.w := true.B
161  fake_pte.perm.x := true.B
162  val ptw_resp = Wire(new PtwMergeResp)
163  ptw_resp.apply(pageFault && !accessFault && !ppn_af, accessFault || ppn_af, Mux(accessFault, af_level,level), Mux(pte_valid, pte, fake_pte), vpn, satp.asid, hgatp.asid, vpn(sectortlbwidth - 1, 0), not_super = false)
164
165  val normal_resp = idle === false.B && mem_addr_update && !last_s2xlate && (guest_fault || (w_mem_resp && find_pte) || (s_pmp_check && accessFault) || onlyS2xlate )
166  val stageHit_resp = idle === false.B && hptw_resp_stage2
167  io.resp.valid := Mux(stage1Hit, stageHit_resp, normal_resp)
168  io.resp.bits.source := source
169  io.resp.bits.resp := Mux(stage1Hit, stage1, ptw_resp)
170  io.resp.bits.h_resp := hptw_resp
171  io.resp.bits.s2xlate := req_s2xlate
172
173  io.llptw.valid := s_llptw_req === false.B && to_find_pte && !accessFault
174  io.llptw.bits.req_info.source := source
175  io.llptw.bits.req_info.vpn := vpn
176  io.llptw.bits.req_info.s2xlate := req_s2xlate
177  io.llptw.bits.ppn := DontCare
178
179  io.pmp.req.valid := DontCare // samecycle, do not use valid
180  io.pmp.req.bits.addr := Mux(s2xlate, hpaddr, mem_addr)
181  io.pmp.req.bits.size := 3.U // TODO: fix it
182  io.pmp.req.bits.cmd := TlbCmd.read
183
184  mem.req.valid := s_mem_req === false.B && !mem.mask && !accessFault && s_pmp_check
185  mem.req.bits.addr := Mux(s2xlate, hpaddr, mem_addr)
186  mem.req.bits.id := FsmReqID.U(bMemID.W)
187  mem.req.bits.hptw_bypassed := false.B
188
189  io.refill.req_info.s2xlate := Mux(enableS2xlate, onlyStage1, req_s2xlate) // ptw refill the pte of stage 1 when s2xlate is enabled
190  io.refill.req_info.vpn := vpn
191  io.refill.level := level
192  io.refill.req_info.source := source
193
194  io.hptw.req.valid := !s_hptw_req || !s_last_hptw_req
195  io.hptw.req.bits.id := FsmReqID.U(bMemID.W)
196  io.hptw.req.bits.gvpn := get_pn(gpaddr)
197  io.hptw.req.bits.source := source
198
199  when (io.req.fire && io.req.bits.stage1Hit){
200    idle := false.B
201    req_s2xlate := io.req.bits.req_info.s2xlate
202    s_hptw_req := false.B
203    hptw_resp_stage2 := false.B
204    last_s2xlate := false.B
205    hptw_pageFault := false.B
206    hptw_accessFault := false.B
207  }
208
209  when (io.hptw.resp.fire && w_hptw_resp === false.B && stage1Hit){
210    w_hptw_resp := true.B
211    hptw_resp_stage2 := true.B
212  }
213
214  when (io.resp.fire && stage1Hit){
215    idle := true.B
216  }
217
218  when (io.req.fire && !io.req.bits.stage1Hit){
219    val req = io.req.bits
220    level := Mux(req.l1Hit, 1.U, 0.U)
221    af_level := Mux(req.l1Hit, 1.U, 0.U)
222    ppn := Mux(req.l1Hit, io.req.bits.ppn, satp.ppn)
223    vpn := io.req.bits.req_info.vpn
224    l1Hit := req.l1Hit
225    accessFault := false.B
226    idle := false.B
227    hptw_pageFault := false.B
228    hptw_accessFault := false.B
229    pte_valid := false.B
230    req_s2xlate := io.req.bits.req_info.s2xlate
231    when(io.req.bits.req_info.s2xlate =/= noS2xlate && io.req.bits.req_info.s2xlate =/= onlyStage1){
232      last_s2xlate := true.B
233      s_hptw_req := false.B
234    }.otherwise {
235      last_s2xlate := false.B
236      s_pmp_check := false.B
237    }
238  }
239
240  when(io.hptw.req.fire && s_hptw_req === false.B){
241    s_hptw_req := true.B
242    w_hptw_resp := false.B
243  }
244
245  when(io.hptw.resp.fire && w_hptw_resp === false.B && !stage1Hit) {
246    hptw_pageFault := io.hptw.resp.bits.h_resp.gpf
247    hptw_accessFault := io.hptw.resp.bits.h_resp.gaf
248    w_hptw_resp := true.B
249    when(onlyS2xlate){
250      mem_addr_update := true.B
251      last_s2xlate := false.B
252    }.elsewhen(!(io.hptw.resp.bits.h_resp.gpf || io.hptw.resp.bits.h_resp.gaf)) {
253      s_pmp_check := false.B
254    }
255  }
256
257  when(io.hptw.req.fire && s_last_hptw_req === false.B) {
258    w_last_hptw_resp := false.B
259    s_last_hptw_req := true.B
260  }
261
262  when(io.hptw.resp.fire && w_last_hptw_resp === false.B){
263    hptw_pageFault := io.hptw.resp.bits.h_resp.gpf
264    hptw_accessFault := io.hptw.resp.bits.h_resp.gaf
265    w_last_hptw_resp := true.B
266    mem_addr_update := true.B
267    last_s2xlate := false.B
268  }
269
270  when(sent_to_pmp && mem_addr_update === false.B){
271    s_mem_req := false.B
272    s_pmp_check := true.B
273  }
274
275  when(accessFault && idle === false.B){
276    s_pmp_check := true.B
277    s_mem_req := true.B
278    w_mem_resp := true.B
279    s_llptw_req := true.B
280    s_hptw_req := true.B
281    w_hptw_resp := true.B
282    s_last_hptw_req := true.B
283    w_last_hptw_resp := true.B
284    mem_addr_update := true.B
285    last_s2xlate := false.B
286  }
287
288  when(guest_fault && idle === false.B){
289    s_pmp_check := true.B
290    s_mem_req := true.B
291    w_mem_resp := true.B
292    s_llptw_req := true.B
293    s_hptw_req := true.B
294    w_hptw_resp := true.B
295    s_last_hptw_req := true.B
296    w_last_hptw_resp := true.B
297    mem_addr_update := true.B
298    last_s2xlate := false.B
299  }
300
301  when (mem.req.fire){
302    s_mem_req := true.B
303    w_mem_resp := false.B
304  }
305
306  when(mem.resp.fire && w_mem_resp === false.B){
307    w_mem_resp := true.B
308    af_level := af_level + 1.U
309    s_llptw_req := false.B
310    mem_addr_update := true.B
311    pte_valid := true.B
312  }
313
314  when(mem_addr_update){
315    when(level === 0.U && !onlyS2xlate && !(guest_fault || find_pte || accessFault)){
316      level := levelNext
317      when(s2xlate){
318        s_hptw_req := false.B
319      }.otherwise{
320        s_mem_req := false.B
321      }
322      s_llptw_req := true.B
323      mem_addr_update := false.B
324    }.elsewhen(io.llptw.valid){
325      when(io.llptw.fire) {
326        idle := true.B
327        s_llptw_req := true.B
328        mem_addr_update := false.B
329        last_s2xlate := false.B
330      }
331      finish := true.B
332    }.elsewhen(s2xlate && last_s2xlate === true.B) {
333      when(accessFault || pageFault || ppn_af){
334        last_s2xlate := false.B
335      }.otherwise{
336        s_last_hptw_req := false.B
337        mem_addr_update := false.B
338      }
339    }.elsewhen(io.resp.valid){
340      when(io.resp.fire) {
341        idle := true.B
342        s_llptw_req := true.B
343        mem_addr_update := false.B
344        accessFault := false.B
345      }
346      finish := true.B
347    }
348  }
349
350
351  when (flush) {
352    idle := true.B
353    s_pmp_check := true.B
354    s_mem_req := true.B
355    s_llptw_req := true.B
356    w_mem_resp := true.B
357    accessFault := false.B
358    mem_addr_update := false.B
359    s_hptw_req := true.B
360    w_hptw_resp := true.B
361    s_last_hptw_req := true.B
362    w_last_hptw_resp := true.B
363  }
364
365
366  XSDebug(p"[ptw] level:${level} notFound:${pageFault}\n")
367
368  // perf
369  XSPerfAccumulate("fsm_count", io.req.fire)
370  for (i <- 0 until PtwWidth) {
371    XSPerfAccumulate(s"fsm_count_source${i}", io.req.fire && io.req.bits.req_info.source === i.U)
372  }
373  XSPerfAccumulate("fsm_busy", !idle)
374  XSPerfAccumulate("fsm_idle", idle)
375  XSPerfAccumulate("resp_blocked", io.resp.valid && !io.resp.ready)
376  XSPerfAccumulate("ptw_ppn_af", io.resp.fire && ppn_af)
377  XSPerfAccumulate("mem_count", mem.req.fire)
378  XSPerfAccumulate("mem_cycle", BoolStopWatch(mem.req.fire, mem.resp.fire, true))
379  XSPerfAccumulate("mem_blocked", mem.req.valid && !mem.req.ready)
380
381  TimeOutAssert(!idle, timeOutThreshold, "page table walker time out")
382
383  val perfEvents = Seq(
384    ("fsm_count         ", io.req.fire                                     ),
385    ("fsm_busy          ", !idle                                             ),
386    ("fsm_idle          ", idle                                              ),
387    ("resp_blocked      ", io.resp.valid && !io.resp.ready                   ),
388    ("mem_count         ", mem.req.fire                                    ),
389    ("mem_cycle         ", BoolStopWatch(mem.req.fire, mem.resp.fire, true)),
390    ("mem_blocked       ", mem.req.valid && !mem.req.ready                   ),
391  )
392  generatePerfEvent()
393}
394
395/*========================= LLPTW ==============================*/
396
397/** LLPTW : Last Level Page Table Walker
398  * the page walker that only takes 4KB(last level) page walk.
399  **/
400
401class LLPTWInBundle(implicit p: Parameters) extends XSBundle with HasPtwConst {
402  val req_info = Output(new L2TlbInnerBundle())
403  val ppn = Output(UInt(gvpnLen.W))
404}
405
406class LLPTWIO(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst {
407  val in = Flipped(DecoupledIO(new LLPTWInBundle()))
408  val out = DecoupledIO(new Bundle {
409    val req_info = Output(new L2TlbInnerBundle())
410    val id = Output(UInt(bMemID.W))
411    val h_resp = Output(new HptwResp)
412    val first_s2xlate_fault = Output(Bool()) // Whether the first stage 2 translation occurs pf/af
413    val af = Output(Bool())
414  })
415  val mem = new Bundle {
416    val req = DecoupledIO(new L2TlbMemReqBundle())
417    val resp = Flipped(Valid(new Bundle {
418      val id = Output(UInt(log2Up(l2tlbParams.llptwsize).W))
419      val value = Output(UInt(blockBits.W))
420    }))
421    val enq_ptr = Output(UInt(log2Ceil(l2tlbParams.llptwsize).W))
422    val buffer_it = Output(Vec(l2tlbParams.llptwsize, Bool()))
423    val refill = Output(new L2TlbInnerBundle())
424    val req_mask = Input(Vec(l2tlbParams.llptwsize, Bool()))
425  }
426  val cache = DecoupledIO(new L2TlbInnerBundle())
427  val pmp = new Bundle {
428    val req = Valid(new PMPReqBundle())
429    val resp = Flipped(new PMPRespBundle())
430  }
431  val hptw = new Bundle {
432    val req = DecoupledIO(new Bundle{
433      val source = UInt(bSourceWidth.W)
434      val id = UInt(log2Up(l2tlbParams.llptwsize).W)
435      val gvpn = UInt(vpnLen.W)
436    })
437    val resp = Flipped(Valid(new Bundle {
438      val id = Output(UInt(log2Up(l2tlbParams.llptwsize).W))
439      val h_resp = Output(new HptwResp)
440    }))
441  }
442}
443
444class LLPTWEntry(implicit p: Parameters) extends XSBundle with HasPtwConst {
445  val req_info = new L2TlbInnerBundle()
446  val ppn = UInt(gvpnLen.W)
447  val wait_id = UInt(log2Up(l2tlbParams.llptwsize).W)
448  val af = Bool()
449  val hptw_resp = new HptwResp()
450  val first_s2xlate_fault = Output(Bool())
451}
452
453
454class LLPTW(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents {
455  val io = IO(new LLPTWIO())
456  val enableS2xlate = io.in.bits.req_info.s2xlate =/= noS2xlate
457  val satp = Mux(enableS2xlate, io.csr.vsatp, io.csr.satp)
458
459  val flush = io.sfence.valid || io.csr.satp.changed || io.csr.vsatp.changed || io.csr.hgatp.changed
460  val entries = Reg(Vec(l2tlbParams.llptwsize, new LLPTWEntry()))
461  val state_idle :: state_hptw_req :: state_hptw_resp :: state_addr_check :: state_mem_req :: state_mem_waiting :: state_mem_out :: state_last_hptw_req :: state_last_hptw_resp :: state_cache :: Nil = Enum(10)
462  val state = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(state_idle)))
463
464  val is_emptys = state.map(_ === state_idle)
465  val is_mems = state.map(_ === state_mem_req)
466  val is_waiting = state.map(_ === state_mem_waiting)
467  val is_having = state.map(_ === state_mem_out)
468  val is_cache = state.map(_ === state_cache)
469  val is_hptw_req = state.map(_ === state_hptw_req)
470  val is_last_hptw_req = state.map(_ === state_last_hptw_req)
471  val is_hptw_resp = state.map(_ === state_hptw_resp)
472  val is_last_hptw_resp = state.map(_ === state_last_hptw_resp)
473
474  val full = !ParallelOR(is_emptys).asBool
475  val enq_ptr = ParallelPriorityEncoder(is_emptys)
476
477  val mem_ptr = ParallelPriorityEncoder(is_having) // TODO: optimize timing, bad: entries -> ptr -> entry
478  val mem_arb = Module(new RRArbiterInit(new LLPTWEntry(), l2tlbParams.llptwsize))
479  for (i <- 0 until l2tlbParams.llptwsize) {
480    mem_arb.io.in(i).bits := entries(i)
481    mem_arb.io.in(i).valid := is_mems(i) && !io.mem.req_mask(i)
482  }
483
484  // process hptw requests in serial
485  val hyper_arb1 = Module(new RRArbiterInit(new LLPTWEntry(), l2tlbParams.llptwsize))
486  for (i <- 0 until l2tlbParams.llptwsize) {
487    hyper_arb1.io.in(i).bits := entries(i)
488    hyper_arb1.io.in(i).valid := is_hptw_req(i) && !(Cat(is_hptw_resp).orR) && !(Cat(is_last_hptw_resp).orR)
489  }
490  val hyper_arb2 = Module(new RRArbiterInit(new LLPTWEntry(), l2tlbParams.llptwsize))
491  for(i <- 0 until l2tlbParams.llptwsize) {
492    hyper_arb2.io.in(i).bits := entries(i)
493    hyper_arb2.io.in(i).valid := is_last_hptw_req(i) && !(Cat(is_hptw_resp).orR) && !(Cat(is_last_hptw_resp).orR)
494  }
495
496  val cache_ptr = ParallelMux(is_cache, (0 until l2tlbParams.llptwsize).map(_.U(log2Up(l2tlbParams.llptwsize).W)))
497
498  // duplicate req
499  // to_wait: wait for the last to access mem, set to mem_resp
500  // to_cache: the last is back just right now, set to mem_cache
501  val dup_vec = state.indices.map(i =>
502    dup(io.in.bits.req_info.vpn, entries(i).req_info.vpn) && io.in.bits.req_info.s2xlate === entries(i).req_info.s2xlate
503  )
504  val dup_req_fire = mem_arb.io.out.fire && dup(io.in.bits.req_info.vpn, mem_arb.io.out.bits.req_info.vpn) && io.in.bits.req_info.s2xlate === mem_arb.io.out.bits.req_info.s2xlate // dup with the req fire entry
505  val dup_vec_wait = dup_vec.zip(is_waiting).map{case (d, w) => d && w} // dup with "mem_waiting" entries, sending mem req already
506  val dup_vec_having = dup_vec.zipWithIndex.map{case (d, i) => d && is_having(i)} // dup with the "mem_out" entry recv the data just now
507  val dup_vec_last_hptw = dup_vec.zipWithIndex.map{case (d, i) => d && (is_last_hptw_req(i) || is_last_hptw_resp(i))}
508  val wait_id = Mux(dup_req_fire, mem_arb.io.chosen, ParallelMux(dup_vec_wait zip entries.map(_.wait_id)))
509  val dup_wait_resp = io.mem.resp.fire && VecInit(dup_vec_wait)(io.mem.resp.bits.id) // dup with the entry that data coming next cycle
510  val to_wait = Cat(dup_vec_wait).orR || dup_req_fire
511  val to_mem_out = dup_wait_resp && ((entries(io.mem.resp.bits.id).req_info.s2xlate === noS2xlate) || (entries(io.mem.resp.bits.id).req_info.s2xlate === onlyStage1))
512  val to_cache = Cat(dup_vec_having).orR || Cat(dup_vec_last_hptw).orR
513  val to_hptw_req = io.in.bits.req_info.s2xlate === allStage
514  val to_last_hptw_req = dup_wait_resp && entries(io.mem.resp.bits.id).req_info.s2xlate === allStage
515  val last_hptw_req_id = io.mem.resp.bits.id
516  val req_paddr = MakeAddr(io.in.bits.ppn(ppnLen-1, 0), getVpnn(io.in.bits.req_info.vpn, 0))
517  val req_hpaddr = MakeAddr(entries(last_hptw_req_id).hptw_resp.genPPNS2(get_pn(req_paddr)), getVpnn(io.in.bits.req_info.vpn, 0))
518  val index =  Mux(entries(last_hptw_req_id).req_info.s2xlate === allStage, req_hpaddr, req_paddr)(log2Up(l2tlbParams.blockBytes)-1, log2Up(XLEN/8))
519  val last_hptw_req_ppn = io.mem.resp.bits.value.asTypeOf(Vec(blockBits / XLEN, new PteBundle()))(index).getPPN()
520  XSError(RegNext(dup_req_fire && Cat(dup_vec_wait).orR, init = false.B), "mem req but some entries already waiting, should not happed")
521
522  XSError(io.in.fire && ((to_mem_out && to_cache) || (to_wait && to_cache)), "llptw enq, to cache conflict with to mem")
523  val mem_resp_hit = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(false.B)))
524  val enq_state_normal = MuxCase(state_addr_check, Seq(
525    to_mem_out -> state_mem_out, // same to the blew, but the mem resp now
526    to_last_hptw_req -> state_last_hptw_req,
527    to_wait -> state_mem_waiting,
528    to_cache -> state_cache,
529    to_hptw_req -> state_hptw_req
530  ))
531  val enq_state = Mux(from_pre(io.in.bits.req_info.source) && enq_state_normal =/= state_addr_check, state_idle, enq_state_normal)
532  when (io.in.fire) {
533    // if prefetch req does not need mem access, just give it up.
534    // so there will be at most 1 + FilterSize entries that needs re-access page cache
535    // so 2 + FilterSize is enough to avoid dead-lock
536    state(enq_ptr) := enq_state
537    entries(enq_ptr).req_info := io.in.bits.req_info
538    entries(enq_ptr).ppn := Mux(to_last_hptw_req, last_hptw_req_ppn, io.in.bits.ppn)
539    entries(enq_ptr).wait_id := Mux(to_wait, wait_id, enq_ptr)
540    entries(enq_ptr).af := false.B
541    entries(enq_ptr).hptw_resp := Mux(to_last_hptw_req, entries(last_hptw_req_id).hptw_resp, Mux(to_wait, entries(wait_id).hptw_resp, entries(enq_ptr).hptw_resp))
542    entries(enq_ptr).first_s2xlate_fault := false.B
543    mem_resp_hit(enq_ptr) := to_mem_out || to_last_hptw_req
544  }
545
546  val enq_ptr_reg = RegNext(enq_ptr)
547  val need_addr_check = GatedValidRegNext(enq_state === state_addr_check && io.in.fire && !flush)
548
549  val hasHptwResp = ParallelOR(state.map(_ === state_hptw_resp)).asBool
550  val hptw_resp_ptr_reg = RegNext(io.hptw.resp.bits.id)
551  val hptw_need_addr_check = RegNext(hasHptwResp && io.hptw.resp.fire && !flush) && state(hptw_resp_ptr_reg) === state_addr_check
552
553  val ptes = io.mem.resp.bits.value.asTypeOf(Vec(blockBits / XLEN, new PteBundle()))
554  val gpaddr = MakeGPAddr(entries(hptw_resp_ptr_reg).ppn, getVpnn(entries(hptw_resp_ptr_reg).req_info.vpn, 0))
555  val hptw_resp = entries(hptw_resp_ptr_reg).hptw_resp
556  val hpaddr = Cat(hptw_resp.genPPNS2(get_pn(gpaddr)), get_off(gpaddr))
557  val addr = RegEnable(MakeAddr(io.in.bits.ppn(ppnLen - 1, 0), getVpnn(io.in.bits.req_info.vpn, 0)), io.in.fire)
558  io.pmp.req.valid := need_addr_check || hptw_need_addr_check
559  io.pmp.req.bits.addr := Mux(hptw_need_addr_check, hpaddr, addr)
560  io.pmp.req.bits.cmd := TlbCmd.read
561  io.pmp.req.bits.size := 3.U // TODO: fix it
562  val pmp_resp_valid = io.pmp.req.valid // same cycle
563  when (pmp_resp_valid) {
564    // NOTE: when pmp resp but state is not addr check, then the entry is dup with other entry, the state was changed before
565    //       when dup with the req-ing entry, set to mem_waiting (above codes), and the ld must be false, so dontcare
566    val ptr = Mux(hptw_need_addr_check, hptw_resp_ptr_reg, enq_ptr_reg);
567    val accessFault = io.pmp.resp.ld || io.pmp.resp.mmio
568    entries(ptr).af := accessFault
569    state(ptr) := Mux(accessFault, state_mem_out, state_mem_req)
570  }
571
572  when (mem_arb.io.out.fire) {
573    for (i <- state.indices) {
574      when (state(i) =/= state_idle && state(i) =/= state_mem_out && state(i) =/= state_last_hptw_req && state(i) =/= state_last_hptw_resp
575      && entries(i).req_info.s2xlate === mem_arb.io.out.bits.req_info.s2xlate
576      && dup(entries(i).req_info.vpn, mem_arb.io.out.bits.req_info.vpn)) {
577        // NOTE: "dup enq set state to mem_wait" -> "sending req set other dup entries to mem_wait"
578        state(i) := state_mem_waiting
579        entries(i).hptw_resp := entries(mem_arb.io.chosen).hptw_resp
580        entries(i).wait_id := mem_arb.io.chosen
581      }
582    }
583  }
584  when (io.mem.resp.fire) {
585    state.indices.map{i =>
586      when (state(i) === state_mem_waiting && io.mem.resp.bits.id === entries(i).wait_id) {
587        val req_paddr = MakeAddr(entries(i).ppn, getVpnn(entries(i).req_info.vpn, 0))
588        val req_hpaddr = MakeAddr(entries(i).hptw_resp.genPPNS2(get_pn(req_paddr)), getVpnn(entries(i).req_info.vpn, 0))
589        val index =  Mux(entries(i).req_info.s2xlate === allStage, req_hpaddr, req_paddr)(log2Up(l2tlbParams.blockBytes)-1, log2Up(XLEN/8))
590        state(i) := Mux(entries(i).req_info.s2xlate === allStage && !(ptes(index).isPf(2.U) || !ptes(index).isLeaf() || ptes(index).isAf()), state_last_hptw_req, state_mem_out)
591        mem_resp_hit(i) := true.B
592        entries(i).ppn := ptes(index).getPPN() // for last stage 2 translation
593      }
594    }
595  }
596
597  when (hyper_arb1.io.out.fire) {
598    for (i <- state.indices) {
599      when (state(i) === state_hptw_req && entries(i).ppn === hyper_arb1.io.out.bits.ppn && entries(i).req_info.s2xlate === allStage && hyper_arb1.io.chosen === i.U) {
600        state(i) := state_hptw_resp
601        entries(i).wait_id := hyper_arb1.io.chosen
602      }
603    }
604  }
605
606  when (hyper_arb2.io.out.fire) {
607    for (i <- state.indices) {
608      when (state(i) === state_last_hptw_req && entries(i).ppn === hyper_arb2.io.out.bits.ppn && entries(i).req_info.s2xlate === allStage && hyper_arb2.io.chosen === i.U) {
609        state(i) := state_last_hptw_resp
610        entries(i).wait_id := hyper_arb2.io.chosen
611      }
612    }
613  }
614
615  when (io.hptw.resp.fire) {
616    for (i <- state.indices) {
617      when (state(i) === state_hptw_resp && io.hptw.resp.bits.id === entries(i).wait_id && io.hptw.resp.bits.h_resp.entry.tag === entries(i).ppn) {
618        when (io.hptw.resp.bits.h_resp.gaf || io.hptw.resp.bits.h_resp.gpf) {
619          state(i) := state_mem_out
620          entries(i).hptw_resp := io.hptw.resp.bits.h_resp
621          entries(i).first_s2xlate_fault := io.hptw.resp.bits.h_resp.gaf || io.hptw.resp.bits.h_resp.gpf
622        }.otherwise{ // change the entry that is waiting hptw resp
623          val need_to_waiting_vec = state.indices.map(i => state(i) === state_mem_waiting && dup(entries(i).req_info.vpn, entries(io.hptw.resp.bits.id).req_info.vpn))
624          val waiting_index = ParallelMux(need_to_waiting_vec zip entries.map(_.wait_id))
625          state(i) := Mux(Cat(need_to_waiting_vec).orR, state_mem_waiting, state_addr_check)
626          entries(i).hptw_resp := io.hptw.resp.bits.h_resp
627          entries(i).wait_id := Mux(Cat(need_to_waiting_vec).orR, waiting_index, entries(i).wait_id)
628          //To do: change the entry that is having the same hptw req
629        }
630      }
631      when (state(i) === state_last_hptw_resp && io.hptw.resp.bits.id === entries(i).wait_id && io.hptw.resp.bits.h_resp.entry.tag === entries(i).ppn) {
632        state(i) := state_mem_out
633        entries(i).hptw_resp := io.hptw.resp.bits.h_resp
634        //To do: change the entry that is having the same hptw req
635      }
636    }
637  }
638  when (io.out.fire) {
639    assert(state(mem_ptr) === state_mem_out)
640    state(mem_ptr) := state_idle
641  }
642  mem_resp_hit.map(a => when (a) { a := false.B } )
643
644  when (io.cache.fire) {
645    state(cache_ptr) := state_idle
646  }
647  XSError(io.out.fire && io.cache.fire && (mem_ptr === cache_ptr), "mem resp and cache fire at the same time at same entry")
648
649  when (flush) {
650    state.map(_ := state_idle)
651  }
652
653  io.in.ready := !full
654
655  io.out.valid := ParallelOR(is_having).asBool
656  io.out.bits.req_info := entries(mem_ptr).req_info
657  io.out.bits.id := mem_ptr
658  io.out.bits.af := entries(mem_ptr).af
659  io.out.bits.h_resp := entries(mem_ptr).hptw_resp
660  io.out.bits.first_s2xlate_fault := entries(mem_ptr).first_s2xlate_fault
661
662  val hptw_req_arb = Module(new Arbiter(new Bundle{
663      val source = UInt(bSourceWidth.W)
664      val id = UInt(log2Up(l2tlbParams.llptwsize).W)
665      val ppn = UInt(gvpnLen.W)
666    } , 2))
667  // first stage 2 translation
668  hptw_req_arb.io.in(0).valid := hyper_arb1.io.out.valid
669  hptw_req_arb.io.in(0).bits.source := hyper_arb1.io.out.bits.req_info.source
670  hptw_req_arb.io.in(0).bits.ppn := hyper_arb1.io.out.bits.ppn
671  hptw_req_arb.io.in(0).bits.id := hyper_arb1.io.chosen
672  hyper_arb1.io.out.ready := hptw_req_arb.io.in(0).ready
673  // last stage 2 translation
674  hptw_req_arb.io.in(1).valid := hyper_arb2.io.out.valid
675  hptw_req_arb.io.in(1).bits.source := hyper_arb2.io.out.bits.req_info.source
676  hptw_req_arb.io.in(1).bits.ppn := hyper_arb2.io.out.bits.ppn
677  hptw_req_arb.io.in(1).bits.id := hyper_arb2.io.chosen
678  hyper_arb2.io.out.ready := hptw_req_arb.io.in(1).ready
679  hptw_req_arb.io.out.ready := io.hptw.req.ready
680  io.hptw.req.valid := hptw_req_arb.io.out.fire && !flush
681  io.hptw.req.bits.gvpn := hptw_req_arb.io.out.bits.ppn
682  io.hptw.req.bits.id := hptw_req_arb.io.out.bits.id
683  io.hptw.req.bits.source := hptw_req_arb.io.out.bits.source
684
685  io.mem.req.valid := mem_arb.io.out.valid && !flush
686  val mem_paddr = MakeAddr(mem_arb.io.out.bits.ppn, getVpnn(mem_arb.io.out.bits.req_info.vpn, 0))
687  val mem_hpaddr = MakeAddr(mem_arb.io.out.bits.hptw_resp.genPPNS2(get_pn(mem_paddr)), getVpnn(mem_arb.io.out.bits.req_info.vpn, 0))
688  io.mem.req.bits.addr := Mux(mem_arb.io.out.bits.req_info.s2xlate === allStage, mem_hpaddr, mem_paddr)
689  io.mem.req.bits.id := mem_arb.io.chosen
690  io.mem.req.bits.hptw_bypassed := false.B
691  mem_arb.io.out.ready := io.mem.req.ready
692  val mem_refill_id = RegNext(io.mem.resp.bits.id(log2Up(l2tlbParams.llptwsize)-1, 0))
693  io.mem.refill := entries(mem_refill_id).req_info
694  io.mem.refill.s2xlate := Mux(entries(mem_refill_id).req_info.s2xlate === noS2xlate, noS2xlate, onlyStage1) // llptw refill the pte of stage 1
695  io.mem.buffer_it := mem_resp_hit
696  io.mem.enq_ptr := enq_ptr
697
698  io.cache.valid := Cat(is_cache).orR
699  io.cache.bits := ParallelMux(is_cache, entries.map(_.req_info))
700
701  XSPerfAccumulate("llptw_in_count", io.in.fire)
702  XSPerfAccumulate("llptw_in_block", io.in.valid && !io.in.ready)
703  for (i <- 0 until 7) {
704    XSPerfAccumulate(s"enq_state${i}", io.in.fire && enq_state === i.U)
705  }
706  for (i <- 0 until (l2tlbParams.llptwsize + 1)) {
707    XSPerfAccumulate(s"util${i}", PopCount(is_emptys.map(!_)) === i.U)
708    XSPerfAccumulate(s"mem_util${i}", PopCount(is_mems) === i.U)
709    XSPerfAccumulate(s"waiting_util${i}", PopCount(is_waiting) === i.U)
710  }
711  XSPerfAccumulate("mem_count", io.mem.req.fire)
712  XSPerfAccumulate("mem_cycle", PopCount(is_waiting) =/= 0.U)
713  XSPerfAccumulate("blocked_in", io.in.valid && !io.in.ready)
714
715  for (i <- 0 until l2tlbParams.llptwsize) {
716    TimeOutAssert(state(i) =/= state_idle, timeOutThreshold, s"missqueue time out no out ${i}")
717  }
718
719  val perfEvents = Seq(
720    ("tlbllptw_incount           ", io.in.fire               ),
721    ("tlbllptw_inblock           ", io.in.valid && !io.in.ready),
722    ("tlbllptw_memcount          ", io.mem.req.fire          ),
723    ("tlbllptw_memcycle          ", PopCount(is_waiting)       ),
724  )
725  generatePerfEvent()
726}
727
728/*========================= HPTW ==============================*/
729
730/** HPTW : Hypervisor Page Table Walker
731  * the page walker take the virtual machine's page walk.
732  * guest physical address translation, guest physical address -> host physical address
733  **/
734class HPTWIO()(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst {
735  val req = Flipped(DecoupledIO(new Bundle {
736    val source = UInt(bSourceWidth.W)
737    val id = UInt(log2Up(l2tlbParams.llptwsize).W)
738    val gvpn = UInt(vpnLen.W)
739    val ppn = UInt(ppnLen.W)
740    val l1Hit = Bool()
741    val l2Hit = Bool()
742    val bypassed = Bool() // if bypass, don't refill
743  }))
744  val resp = DecoupledIO(new Bundle {
745    val source = UInt(bSourceWidth.W)
746    val resp = Output(new HptwResp())
747    val id = Output(UInt(bMemID.W))
748  })
749
750  val mem = new Bundle {
751    val req = DecoupledIO(new L2TlbMemReqBundle())
752    val resp = Flipped(ValidIO(UInt(XLEN.W)))
753    val mask = Input(Bool())
754  }
755  val refill = Output(new Bundle {
756    val req_info = new L2TlbInnerBundle()
757    val level = UInt(log2Up(Level).W)
758  })
759  val pmp = new Bundle {
760    val req = ValidIO(new PMPReqBundle())
761    val resp = Flipped(new PMPRespBundle())
762  }
763}
764
765class HPTW()(implicit p: Parameters) extends XSModule with HasPtwConst {
766  val io = IO(new HPTWIO)
767  val hgatp = io.csr.hgatp
768  val sfence = io.sfence
769  val flush = sfence.valid || hgatp.changed || io.csr.satp.changed || io.csr.vsatp.changed
770
771  val level = RegInit(0.U(log2Up(Level).W))
772  val gpaddr = Reg(UInt(GPAddrBits.W))
773  val req_ppn = Reg(UInt(ppnLen.W))
774  val vpn = gpaddr(GPAddrBits-1, offLen)
775  val levelNext = level + 1.U
776  val l1Hit = Reg(Bool())
777  val l2Hit = Reg(Bool())
778  val bypassed = Reg(Bool())
779  val pg_base = MakeGPAddr(hgatp.ppn, getGVpnn(vpn, 2.U)) // for l0
780//  val pte = io.mem.resp.bits.MergeRespToPte()
781  val pte = io.mem.resp.bits.asTypeOf(new PteBundle().cloneType)
782  val ppn_l1 = Mux(l1Hit, req_ppn, pte.ppn)
783  val ppn_l2 = Mux(l2Hit, req_ppn, pte.ppn)
784  val ppn = Mux(level === 1.U, ppn_l1, ppn_l2) //for l1 and l2
785  val p_pte = MakeAddr(ppn, getVpnn(vpn, 2.U - level))
786  val mem_addr = Mux(level === 0.U, pg_base, p_pte)
787
788  //s/w register
789  val s_pmp_check = RegInit(true.B)
790  val s_mem_req = RegInit(true.B)
791  val w_mem_resp = RegInit(true.B)
792  val idle = RegInit(true.B)
793  val mem_addr_update = RegInit(false.B)
794  val finish = WireInit(false.B)
795
796  val sent_to_pmp = !idle && (!s_pmp_check || mem_addr_update) && !finish
797  val pageFault = pte.isPf(level) || (!pte.isLeaf() && level >= 2.U)
798  val accessFault = RegEnable(io.pmp.resp.ld || io.pmp.resp.mmio, sent_to_pmp)
799
800  val ppn_af = pte.isAf()
801  val find_pte = pte.isLeaf() || ppn_af || pageFault
802
803  val resp_valid = !idle && mem_addr_update && ((w_mem_resp && find_pte) || (s_pmp_check && accessFault))
804  val id = Reg(UInt(log2Up(l2tlbParams.llptwsize).W))
805  val source = RegEnable(io.req.bits.source, io.req.fire)
806
807  io.req.ready := idle
808  val resp = Wire(new HptwResp())
809  resp.apply(pageFault && !accessFault && !ppn_af, accessFault || ppn_af, level, pte, vpn, hgatp.asid)
810  io.resp.valid := resp_valid
811  io.resp.bits.id := id
812  io.resp.bits.resp := resp
813  io.resp.bits.source := source
814
815  io.pmp.req.valid := DontCare
816  io.pmp.req.bits.addr := mem_addr
817  io.pmp.req.bits.size := 3.U
818  io.pmp.req.bits.cmd := TlbCmd.read
819
820  io.mem.req.valid := !s_mem_req && !io.mem.mask && !accessFault && s_pmp_check
821  io.mem.req.bits.addr := mem_addr
822  io.mem.req.bits.id := HptwReqId.U(bMemID.W)
823  io.mem.req.bits.hptw_bypassed := bypassed
824
825  io.refill.req_info.vpn := vpn
826  io.refill.level := level
827  io.refill.req_info.source := source
828  io.refill.req_info.s2xlate := onlyStage2
829  when (idle){
830    when(io.req.fire){
831      bypassed := io.req.bits.bypassed
832      level := Mux(io.req.bits.l2Hit, 2.U, Mux(io.req.bits.l1Hit, 1.U, 0.U))
833      idle := false.B
834      gpaddr := Cat(io.req.bits.gvpn, 0.U(offLen.W))
835      accessFault := false.B
836      s_pmp_check := false.B
837      id := io.req.bits.id
838      req_ppn := io.req.bits.ppn
839      l1Hit := io.req.bits.l1Hit
840      l2Hit := io.req.bits.l2Hit
841    }
842  }
843
844  when(sent_to_pmp && !mem_addr_update){
845    s_mem_req := false.B
846    s_pmp_check := true.B
847  }
848
849  when(accessFault && !idle){
850    s_pmp_check := true.B
851    s_mem_req := true.B
852    w_mem_resp := true.B
853    mem_addr_update := true.B
854  }
855
856  when(io.mem.req.fire){
857    s_mem_req := true.B
858    w_mem_resp := false.B
859  }
860
861  when(io.mem.resp.fire && !w_mem_resp){
862    w_mem_resp := true.B
863    mem_addr_update := true.B
864  }
865
866  when(mem_addr_update){
867    when(!(find_pte || accessFault)){
868      level := levelNext
869      s_mem_req := false.B
870      mem_addr_update := false.B
871    }.elsewhen(resp_valid){
872      when(io.resp.fire){
873        idle := true.B
874        mem_addr_update := false.B
875        accessFault := false.B
876      }
877      finish := true.B
878    }
879  }
880   when (flush) {
881    idle := true.B
882    s_pmp_check := true.B
883    s_mem_req := true.B
884    w_mem_resp := true.B
885    accessFault := false.B
886    mem_addr_update := false.B
887  }
888}
889