1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.cache.mmu 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import chisel3.internal.naming.chiselName 23import xiangshan._ 24import xiangshan.cache.{HasDCacheParameters, MemoryOpConstants} 25import utils._ 26import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp} 27import freechips.rocketchip.tilelink._ 28import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle} 29 30/** Page Table Walk is divided into two parts 31 * One, PTW: page walk for pde, except for leaf entries, one by one 32 * Two, LLPTW: page walk for pte, only the leaf entries(4KB), in parallel 33 */ 34 35 36/** PTW : page table walker 37 * a finite state machine 38 * only take 1GB and 2MB page walks 39 * or in other words, except the last level(leaf) 40 **/ 41class PTWIO()(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst { 42 val req = Flipped(DecoupledIO(new Bundle { 43 val req_info = new L2TlbInnerBundle() 44 val l1Hit = Bool() 45 val ppn = UInt(ppnLen.W) 46 })) 47 val resp = DecoupledIO(new Bundle { 48 val source = UInt(bSourceWidth.W) 49 val resp = new PtwResp 50 }) 51 52 val llptw = DecoupledIO(new LLPTWInBundle()) 53 // NOTE: llptw change from "connect to llptw" to "connect to page cache" 54 // to avoid corner case that caused duplicate entries 55 56 val mem = new Bundle { 57 val req = DecoupledIO(new L2TlbMemReqBundle()) 58 val resp = Flipped(ValidIO(UInt(XLEN.W))) 59 val mask = Input(Bool()) 60 } 61 val pmp = new Bundle { 62 val req = ValidIO(new PMPReqBundle()) 63 val resp = Flipped(new PMPRespBundle()) 64 } 65 66 val refill = Output(new Bundle { 67 val req_info = new L2TlbInnerBundle() 68 val level = UInt(log2Up(Level).W) 69 }) 70} 71 72@chiselName 73class PTW()(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents { 74 val io = IO(new PTWIO) 75 val sfence = io.sfence 76 val mem = io.mem 77 val satp = io.csr.satp 78 val flush = io.sfence.valid || io.csr.satp.changed 79 80 val level = RegInit(0.U(log2Up(Level).W)) 81 val af_level = RegInit(0.U(log2Up(Level).W)) // access fault return this level 82 val ppn = Reg(UInt(ppnLen.W)) 83 val vpn = Reg(UInt(vpnLen.W)) 84 val levelNext = level + 1.U 85 val l1Hit = Reg(Bool()) 86 val memPte = mem.resp.bits.asTypeOf(new PteBundle().cloneType) 87 88 // s/w register 89 val s_pmp_check = RegInit(true.B) 90 val s_mem_req = RegInit(true.B) 91 val s_llptw_req = RegInit(true.B) 92 val w_mem_resp = RegInit(true.B) 93 // for updating "level" 94 val mem_addr_update = RegInit(false.B) 95 96 val idle = RegInit(true.B) 97 val sent_to_pmp = idle === false.B && (s_pmp_check === false.B || mem_addr_update) 98 99 val pageFault = memPte.isPf(level) 100 val accessFault = RegEnable(io.pmp.resp.ld || io.pmp.resp.mmio, sent_to_pmp) 101 102 val find_pte = memPte.isLeaf() || pageFault 103 val to_find_pte = level === 1.U && find_pte === false.B 104 val source = RegEnable(io.req.bits.req_info.source, io.req.fire()) 105 106 val l1addr = MakeAddr(satp.ppn, getVpnn(vpn, 2)) 107 val l2addr = MakeAddr(Mux(l1Hit, ppn, memPte.ppn), getVpnn(vpn, 1)) 108 val mem_addr = Mux(af_level === 0.U, l1addr, l2addr) 109 110 io.req.ready := idle 111 112 io.resp.valid := idle === false.B && mem_addr_update && ((w_mem_resp && find_pte) || (s_pmp_check && accessFault)) 113 io.resp.bits.source := source 114 io.resp.bits.resp.apply(pageFault && !accessFault, accessFault, Mux(accessFault, af_level,level), memPte, vpn, satp.asid) 115 116 io.llptw.valid := s_llptw_req === false.B && to_find_pte && !accessFault 117 io.llptw.bits.req_info.source := source 118 io.llptw.bits.req_info.vpn := vpn 119 io.llptw.bits.ppn := memPte.ppn 120 121 io.pmp.req.valid := DontCare // samecycle, do not use valid 122 io.pmp.req.bits.addr := mem_addr 123 io.pmp.req.bits.size := 3.U // TODO: fix it 124 io.pmp.req.bits.cmd := TlbCmd.read 125 126 mem.req.valid := s_mem_req === false.B && !mem.mask && !accessFault && s_pmp_check 127 mem.req.bits.addr := mem_addr 128 mem.req.bits.id := FsmReqID.U(bMemID.W) 129 130 io.refill.req_info.vpn := vpn 131 io.refill.level := level 132 io.refill.req_info.source := source 133 134 when (io.req.fire()){ 135 val req = io.req.bits 136 level := Mux(req.l1Hit, 1.U, 0.U) 137 af_level := Mux(req.l1Hit, 1.U, 0.U) 138 ppn := Mux(req.l1Hit, io.req.bits.ppn, satp.ppn) 139 vpn := io.req.bits.req_info.vpn 140 l1Hit := req.l1Hit 141 accessFault := false.B 142 s_pmp_check := false.B 143 idle := false.B 144 } 145 146 when(sent_to_pmp && mem_addr_update === false.B){ 147 s_mem_req := false.B 148 s_pmp_check := true.B 149 } 150 151 when(accessFault && idle === false.B){ 152 s_pmp_check := true.B 153 s_mem_req := true.B 154 w_mem_resp := true.B 155 s_llptw_req := true.B 156 mem_addr_update := true.B 157 } 158 159 when (mem.req.fire()){ 160 s_mem_req := true.B 161 w_mem_resp := false.B 162 } 163 164 when(mem.resp.fire() && w_mem_resp === false.B){ 165 w_mem_resp := true.B 166 af_level := af_level + 1.U 167 s_llptw_req := false.B 168 mem_addr_update := true.B 169 } 170 171 when(mem_addr_update){ 172 when(level === 0.U && !(find_pte||accessFault)){ 173 level := levelNext 174 s_mem_req := false.B 175 s_llptw_req := true.B 176 mem_addr_update := false.B 177 }.elsewhen(io.llptw.fire()){ 178 idle := true.B 179 s_llptw_req := true.B 180 mem_addr_update := false.B 181 }.elsewhen(io.resp.fire()){ 182 idle := true.B 183 s_llptw_req := true.B 184 mem_addr_update := false.B 185 accessFault := false.B 186 } 187 } 188 189 190 when (sfence.valid) { 191 idle := true.B 192 s_pmp_check := true.B 193 s_mem_req := true.B 194 s_llptw_req := true.B 195 w_mem_resp := true.B 196 accessFault := false.B 197 } 198 199 200 XSDebug(p"[ptw] level:${level} notFound:${pageFault}\n") 201 202 // perf 203 XSPerfAccumulate("fsm_count", io.req.fire()) 204 for (i <- 0 until PtwWidth) { 205 XSPerfAccumulate(s"fsm_count_source${i}", io.req.fire() && io.req.bits.req_info.source === i.U) 206 } 207 XSPerfAccumulate("fsm_busy", !idle) 208 XSPerfAccumulate("fsm_idle", idle) 209 XSPerfAccumulate("resp_blocked", io.resp.valid && !io.resp.ready) 210 XSPerfAccumulate("mem_count", mem.req.fire()) 211 XSPerfAccumulate("mem_cycle", BoolStopWatch(mem.req.fire, mem.resp.fire(), true)) 212 XSPerfAccumulate("mem_blocked", mem.req.valid && !mem.req.ready) 213 214 TimeOutAssert(!idle, timeOutThreshold, "page table walker time out") 215 216 val perfEvents = Seq( 217 ("fsm_count ", io.req.fire() ), 218 ("fsm_busy ", !idle ), 219 ("fsm_idle ", idle ), 220 ("resp_blocked ", io.resp.valid && !io.resp.ready ), 221 ("mem_count ", mem.req.fire() ), 222 ("mem_cycle ", BoolStopWatch(mem.req.fire, mem.resp.fire(), true)), 223 ("mem_blocked ", mem.req.valid && !mem.req.ready ), 224 ) 225 generatePerfEvent() 226} 227 228/*========================= LLPTW ==============================*/ 229 230/** LLPTW : Last Level Page Table Walker 231 * the page walker that only takes 4KB(last level) page walk. 232 **/ 233 234class LLPTWInBundle(implicit p: Parameters) extends XSBundle with HasPtwConst { 235 val req_info = Output(new L2TlbInnerBundle()) 236 val ppn = Output(UInt(PAddrBits.W)) 237} 238 239class LLPTWIO(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst { 240 val in = Flipped(DecoupledIO(new LLPTWInBundle())) 241 val out = DecoupledIO(new Bundle { 242 val req_info = Output(new L2TlbInnerBundle()) 243 val id = Output(UInt(bMemID.W)) 244 val af = Output(Bool()) 245 }) 246 val mem = new Bundle { 247 val req = DecoupledIO(new L2TlbMemReqBundle()) 248 val resp = Flipped(Valid(new Bundle { 249 val id = Output(UInt(log2Up(l2tlbParams.llptwsize).W)) 250 })) 251 val enq_ptr = Output(UInt(log2Ceil(l2tlbParams.llptwsize).W)) 252 val buffer_it = Output(Vec(l2tlbParams.llptwsize, Bool())) 253 val refill = Output(new L2TlbInnerBundle()) 254 val req_mask = Input(Vec(l2tlbParams.llptwsize, Bool())) 255 } 256 val cache = DecoupledIO(new L2TlbInnerBundle()) 257 val pmp = new Bundle { 258 val req = Valid(new PMPReqBundle()) 259 val resp = Flipped(new PMPRespBundle()) 260 } 261} 262 263class LLPTWEntry(implicit p: Parameters) extends XSBundle with HasPtwConst { 264 val req_info = new L2TlbInnerBundle() 265 val ppn = UInt(ppnLen.W) 266 val wait_id = UInt(log2Up(l2tlbParams.llptwsize).W) 267 val af = Bool() 268} 269 270 271@chiselName 272class LLPTW(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents { 273 val io = IO(new LLPTWIO()) 274 275 val flush = io.sfence.valid || io.csr.satp.changed 276 val entries = Reg(Vec(l2tlbParams.llptwsize, new LLPTWEntry())) 277 val state_idle :: state_addr_check :: state_mem_req :: state_mem_waiting :: state_mem_out :: state_cache :: Nil = Enum(6) 278 val state = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(state_idle))) 279 280 val is_emptys = state.map(_ === state_idle) 281 val is_mems = state.map(_ === state_mem_req) 282 val is_waiting = state.map(_ === state_mem_waiting) 283 val is_having = state.map(_ === state_mem_out) 284 val is_cache = state.map(_ === state_cache) 285 286 val full = !ParallelOR(is_emptys).asBool() 287 val enq_ptr = ParallelPriorityEncoder(is_emptys) 288 289 val mem_ptr = ParallelPriorityEncoder(is_having) // TODO: optimize timing, bad: entries -> ptr -> entry 290 val mem_arb = Module(new RRArbiter(new LLPTWEntry(), l2tlbParams.llptwsize)) 291 for (i <- 0 until l2tlbParams.llptwsize) { 292 mem_arb.io.in(i).bits := entries(i) 293 mem_arb.io.in(i).valid := is_mems(i) && !io.mem.req_mask(i) 294 } 295 296 val cache_ptr = ParallelMux(is_cache, (0 until l2tlbParams.llptwsize).map(_.U(log2Up(l2tlbParams.llptwsize).W))) 297 298 // duplicate req 299 // to_wait: wait for the last to access mem, set to mem_resp 300 // to_cache: the last is back just right now, set to mem_cache 301 val dup_vec = state.indices.map(i => 302 dup(io.in.bits.req_info.vpn, entries(i).req_info.vpn) 303 ) 304 val dup_req_fire = mem_arb.io.out.fire() && dup(io.in.bits.req_info.vpn, mem_arb.io.out.bits.req_info.vpn) // dup with the req fire entry 305 val dup_vec_wait = dup_vec.zip(is_waiting).map{case (d, w) => d && w} // dup with "mem_waiting" entres, sending mem req already 306 val dup_vec_having = dup_vec.zipWithIndex.map{case (d, i) => d && is_having(i)} // dup with the "mem_out" entry recv the data just now 307 val wait_id = Mux(dup_req_fire, mem_arb.io.chosen, ParallelMux(dup_vec_wait zip entries.map(_.wait_id))) 308 val dup_wait_resp = io.mem.resp.fire() && VecInit(dup_vec_wait)(io.mem.resp.bits.id) // dup with the entry that data coming next cycle 309 val to_wait = Cat(dup_vec_wait).orR || dup_req_fire 310 val to_mem_out = dup_wait_resp 311 val to_cache = Cat(dup_vec_having).orR 312 XSError(RegNext(dup_req_fire && Cat(dup_vec_wait).orR, init = false.B), "mem req but some entries already waiting, should not happed") 313 314 XSError(io.in.fire() && ((to_mem_out && to_cache) || (to_wait && to_cache)), "llptw enq, to cache conflict with to mem") 315 val mem_resp_hit = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(false.B))) 316 val enq_state_normal = Mux(to_mem_out, state_mem_out, // same to the blew, but the mem resp now 317 Mux(to_wait, state_mem_waiting, 318 Mux(to_cache, state_cache, state_addr_check))) 319 val enq_state = Mux(from_pre(io.in.bits.req_info.source) && enq_state_normal =/= state_addr_check, state_idle, enq_state_normal) 320 when (io.in.fire()) { 321 // if prefetch req does not need mem access, just give it up. 322 // so there will be at most 1 + FilterSize entries that needs re-access page cache 323 // so 2 + FilterSize is enough to avoid dead-lock 324 state(enq_ptr) := enq_state 325 entries(enq_ptr).req_info := io.in.bits.req_info 326 entries(enq_ptr).ppn := io.in.bits.ppn 327 entries(enq_ptr).wait_id := Mux(to_wait, wait_id, enq_ptr) 328 entries(enq_ptr).af := false.B 329 mem_resp_hit(enq_ptr) := to_mem_out 330 } 331 332 val enq_ptr_reg = RegNext(enq_ptr) 333 val need_addr_check = RegNext(enq_state === state_addr_check && io.in.fire() && !flush) 334 val last_enq_vpn = RegEnable(io.in.bits.req_info.vpn, io.in.fire()) 335 336 io.pmp.req.valid := need_addr_check 337 io.pmp.req.bits.addr := RegEnable(MakeAddr(io.in.bits.ppn, getVpnn(io.in.bits.req_info.vpn, 0)), io.in.fire()) 338 io.pmp.req.bits.cmd := TlbCmd.read 339 io.pmp.req.bits.size := 3.U // TODO: fix it 340 val pmp_resp_valid = io.pmp.req.valid // same cycle 341 when (pmp_resp_valid) { 342 // NOTE: when pmp resp but state is not addr check, then the entry is dup with other entry, the state was changed before 343 // when dup with the req-ing entry, set to mem_waiting (above codes), and the ld must be false, so dontcare 344 val accessFault = io.pmp.resp.ld || io.pmp.resp.mmio 345 entries(enq_ptr_reg).af := accessFault 346 state(enq_ptr_reg) := Mux(accessFault, state_mem_out, state_mem_req) 347 } 348 349 when (mem_arb.io.out.fire()) { 350 for (i <- state.indices) { 351 when (state(i) =/= state_idle && dup(entries(i).req_info.vpn, mem_arb.io.out.bits.req_info.vpn)) { 352 // NOTE: "dup enq set state to mem_wait" -> "sending req set other dup entries to mem_wait" 353 state(i) := state_mem_waiting 354 entries(i).wait_id := mem_arb.io.chosen 355 } 356 } 357 } 358 when (io.mem.resp.fire()) { 359 state.indices.map{i => 360 when (state(i) === state_mem_waiting && io.mem.resp.bits.id === entries(i).wait_id) { 361 state(i) := state_mem_out 362 mem_resp_hit(i) := true.B 363 } 364 } 365 } 366 when (io.out.fire()) { 367 assert(state(mem_ptr) === state_mem_out) 368 state(mem_ptr) := state_idle 369 } 370 mem_resp_hit.map(a => when (a) { a := false.B } ) 371 372 when (io.cache.fire) { 373 state(cache_ptr) := state_idle 374 } 375 XSError(io.out.fire && io.cache.fire && (mem_ptr === cache_ptr), "mem resp and cache fire at the same time at same entry") 376 377 when (flush) { 378 state.map(_ := state_idle) 379 } 380 381 io.in.ready := !full 382 383 io.out.valid := ParallelOR(is_having).asBool() 384 io.out.bits.req_info := entries(mem_ptr).req_info 385 io.out.bits.id := mem_ptr 386 io.out.bits.af := entries(mem_ptr).af 387 388 io.mem.req.valid := mem_arb.io.out.valid && !flush 389 io.mem.req.bits.addr := MakeAddr(mem_arb.io.out.bits.ppn, getVpnn(mem_arb.io.out.bits.req_info.vpn, 0)) 390 io.mem.req.bits.id := mem_arb.io.chosen 391 mem_arb.io.out.ready := io.mem.req.ready 392 io.mem.refill := entries(RegNext(io.mem.resp.bits.id(log2Up(l2tlbParams.llptwsize)-1, 0))).req_info 393 io.mem.buffer_it := mem_resp_hit 394 io.mem.enq_ptr := enq_ptr 395 396 io.cache.valid := Cat(is_cache).orR 397 io.cache.bits := ParallelMux(is_cache, entries.map(_.req_info)) 398 399 XSPerfAccumulate("llptw_in_count", io.in.fire()) 400 XSPerfAccumulate("llptw_in_block", io.in.valid && !io.in.ready) 401 for (i <- 0 until 7) { 402 XSPerfAccumulate(s"enq_state${i}", io.in.fire() && enq_state === i.U) 403 } 404 for (i <- 0 until (l2tlbParams.llptwsize + 1)) { 405 XSPerfAccumulate(s"util${i}", PopCount(is_emptys.map(!_)) === i.U) 406 XSPerfAccumulate(s"mem_util${i}", PopCount(is_mems) === i.U) 407 XSPerfAccumulate(s"waiting_util${i}", PopCount(is_waiting) === i.U) 408 } 409 XSPerfAccumulate("mem_count", io.mem.req.fire()) 410 XSPerfAccumulate("mem_cycle", PopCount(is_waiting) =/= 0.U) 411 XSPerfAccumulate("blocked_in", io.in.valid && !io.in.ready) 412 413 for (i <- 0 until l2tlbParams.llptwsize) { 414 TimeOutAssert(state(i) =/= state_idle, timeOutThreshold, s"missqueue time out no out ${i}") 415 } 416 417 val perfEvents = Seq( 418 ("tlbllptw_incount ", io.in.fire() ), 419 ("tlbllptw_inblock ", io.in.valid && !io.in.ready), 420 ("tlbllptw_memcount ", io.mem.req.fire() ), 421 ("tlbllptw_memcycle ", PopCount(is_waiting) ), 422 ) 423 generatePerfEvent() 424} 425