1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.cache.mmu 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import difftest._ 23import freechips.rocketchip.util.SRAMAnnotation 24import xiangshan._ 25import utils._ 26import utility._ 27import xiangshan.backend.fu.{PMPChecker, PMPReqBundle, PMPConfig => XSPMPConfig} 28import xiangshan.backend.rob.RobPtr 29import xiangshan.backend.fu.util.HasCSRConst 30import freechips.rocketchip.rocket.PMPConfig 31 32/** TLB module 33 * support block request and non-block request io at the same time 34 * return paddr at next cycle, then go for pmp/pma check 35 * @param Width: The number of requestors 36 * @param Block: Blocked or not for each requestor ports 37 * @param q: TLB Parameters, like entry number, each TLB has its own parameters 38 * @param p: XiangShan Paramemters, like XLEN 39 */ 40 41class TLB(Width: Int, nRespDups: Int = 1, Block: Seq[Boolean], q: TLBParameters)(implicit p: Parameters) extends TlbModule 42 with HasCSRConst 43 with HasPerfEvents 44{ 45 val io = IO(new TlbIO(Width, nRespDups, q)) 46 47 val req = io.requestor.map(_.req) 48 val resp = io.requestor.map(_.resp) 49 val ptw = io.ptw 50 val pmp = io.pmp 51 val refill_to_mem = io.refill_to_mem 52 53 /** Sfence.vma & Svinval 54 * Sfence.vma will 1. flush old entries 2. flush inflight 3. flush pipe 55 * Svinval will 1. flush old entries 2. flush inflight 56 * So, Svinval will not flush pipe, which means 57 * it should not drop reqs from pipe and should return right resp 58 */ 59 val sfence = DelayN(io.sfence, q.fenceDelay) 60 val csr = io.csr 61 val satp = DelayN(io.csr.satp, q.fenceDelay) 62 val vsatp = DelayN(io.csr.vsatp, q.fenceDelay) 63 val hgatp = DelayN(io.csr.hgatp, q.fenceDelay) 64 val isHyperInst = (0 until Width).map(i => ValidHold(req(i).fire && !req(i).bits.kill && req(i).bits.hyperinst, resp(i).fire, flush_pipe(i))) 65 val isHlvx = (0 until Width).map(i => ValidHold(req(i).fire && !req(i).bits.kill && req(i).bits.hlvx, resp(i).fire, flush_pipe(i))) 66 val onlyS2xlate = vsatp.mode === 0.U && hgatp.mode === 8.U 67 68 val flush_mmu = DelayN(sfence.valid || csr.satp.changed || csr.vsatp.changed || csr.hgatp.changed, q.fenceDelay) 69 val mmu_flush_pipe = DelayN(sfence.valid && sfence.bits.flushPipe, q.fenceDelay) // for svinval, won't flush pipe 70 val flush_pipe = io.flushPipe 71 72 // ATTENTION: csr and flush from backend are delayed. csr should not be later than flush. 73 // because, csr will influence tlb behavior. 74 val ifecth = if (q.fetchi) true.B else false.B 75 val mode_tmp = if (q.useDmode) csr.priv.dmode else csr.priv.imode 76 val mode = (0 until Width).map(i => Mux(isHyperInst(i), csr.priv.spvp, mode_tmp)) 77 val virt = csr.priv.virt 78 val sum = (0 until Width).map(i => Mux(virt || isHyperInst(i), io.csr.priv.vsum, io.csr.priv.sum)) 79 val mxr = (0 until Width).map(i => Mux(virt || isHyperInst(i), io.csr.priv.vmxr || io.csr.priv.mxr, io.csr.priv.mxr)) 80 81 // val vmEnable = satp.mode === 8.U // && (mode < ModeM) // FIXME: fix me when boot xv6/linux... 82 val vmEnable = (0 until Width).map(i => if (EnbaleTlbDebug) (satp.mode === 8.U) 83 else (satp.mode === 8.U) && (mode(i) < ModeM)) 84 val s2xlateEnable = (0 until Width).map(i => (isHyperInst(i) || virt) && (vsatp.mode === 8.U || hgatp.mode === 8.U) && (mode(i) < ModeM)) 85 val portTranslateEnable = (0 until Width).map(i => (vmEnable(i) || s2xlateEnable(i)) && RegNext(!req(i).bits.no_translate)) 86 87 val req_in = req 88 val req_out = req.map(a => RegEnable(a.bits, a.fire)) 89 val req_out_v = (0 until Width).map(i => ValidHold(req_in(i).fire && !req_in(i).bits.kill, resp(i).fire, flush_pipe(i))) 90 91 val refill = (0 until Width).map(i => ptw.resp.fire && !flush_mmu && (vmEnable(i) || ptw.resp.bits.s2xlate(0)) 92 93 val entries = Module(new TlbStorageWrapper(Width, q, nRespDups)) 94 entries.io.base_connect(sfence, csr, satp) 95 if (q.outReplace) { io.replace <> entries.io.replace } 96 for (i <- 0 until Width) { 97 val s2xlate = Wire(UInt(2.W)) 98 s2xlate(0) := virt || req_in(i).bits.hyperinst 99 s2xlate(1) := s2xlate(0) && vsatp.mode === 0.U && hgatp.mode === 8.U 100 entries.io.r_req_apply(io.requestor(i).req.valid, get_pn(req_in(i).bits.vaddr), i, s2xlate) 101 entries.io.w_apply(refill, ptw.resp.bits) 102 resp(i).bits.debug.isFirstIssue := RegNext(req(i).bits.debug.isFirstIssue) 103 resp(i).bits.debug.robIdx := RegNext(req(i).bits.debug.robIdx) 104 } 105 106 // read TLB, get hit/miss, paddr, perm bits 107 val readResult = (0 until Width).map(TLBRead(_)) 108 val hitVec = readResult.map(_._1) 109 val missVec = readResult.map(_._2) 110 val pmp_addr = readResult.map(_._3) 111 val perm = readResult.map(_._4) 112 val g_perm = readResult.map(_._7) 113 val s2xlate = readResult.map(_._8) 114 // check pmp use paddr (for timing optization, use pmp_addr here) 115 // check permisson 116 (0 until Width).foreach{i => 117 pmp_check(pmp_addr(i), req_out(i).size, req_out(i).cmd, i) 118 for (d <- 0 until nRespDups) { 119 perm_check(perm(i)(d), req_out(i).cmd, i, d, g_perm(i)(d), req_out(i).hlvx, s2xlate(i)) 120 } 121 } 122 123 // handle block or non-block io 124 // for non-block io, just return the above result, send miss to ptw 125 // for block io, hold the request, send miss to ptw, 126 // when ptw back, return the result 127 (0 until Width) foreach {i => 128 if (Block(i)) handle_block(i) 129 else handle_nonblock(i) 130 } 131 io.ptw.resp.ready := true.B 132 133 /************************ main body above | method/log/perf below ****************************/ 134 def TLBRead(i: Int) = { 135 val s2xlate = Wire(UInt(2.W)) 136 s2xlate(0) := virt || req_in(i).bits.hyperinst 137 s2xlate(1) := s2xlate(0) && vsatp.mode === 0.U && hgatp.mode === 8.U 138 val (e_hit, e_ppn, e_perm, e_gvpn, e_g_perm, e_s2xlate) = entries.io.r_resp_apply(i) 139 val (p_hit, p_ppn, p_perm, p_gvpn, p_g_perm, p_s2xlate) = ptw_resp_bypass(get_pn(req_in(i).bits.vaddr), s2xlate) 140 val enable = portTranslateEnable(i) 141 142 val hit = e_hit || p_hit 143 val miss = !hit && enable 144 hit.suggestName(s"hit_read_${i}") 145 miss.suggestName(s"miss_read_${i}") 146 147 val vaddr = SignExt(req_out(i).vaddr, PAddrBits) 148 resp(i).bits.miss := miss 149 resp(i).bits.ptwBack := ptw.resp.fire 150 resp(i).bits.memidx := RegNext(req_in(i).bits.memidx) 151 152 val ppn = WireInit(VecInit(Seq.fill(nRespDups)(0.U(ppnLen.W)))) 153 val perm = WireInit(VecInit(Seq.fill(nRespDups)(0.U.asTypeOf(new TlbPermBundle)))) 154 val gvpn = WireInit(VecInit(Seq.fill(nRespDups)(0.U(gvpnLen.W)))) 155 val g_perm = WireInit(VecInit(Seq.fill(nRespDups)(0.U.asTypeOf(new TlbPermBundle)))) 156 for (d <- 0 until nRespDups) { 157 ppn(d) := Mux(p_hit, p_ppn, e_ppn(d)) 158 perm(d) := Mux(p_hit, p_perm, e_perm(d)) 159 gvpn(d) := Mux(p_hit, p_gvpn, e_gvpn(d)) 160 g_perm(d) := Mux(p_hit, p_g_perm, e_g_perm(d)) 161 s2xlate(d) := Mux(p_hit, p_s2xlate, e_s2xlate(d)) 162 val paddr = Cat(ppn(d), get_off(req_out(i).vaddr)) 163 val gpaddr = Cat(gvpn(d), get_off(req_out(i).vaddr)) 164 resp(i).bits.paddr(d) := Mux(enable, paddr, vaddr) 165 resp(i).bits.gpaddr(d) := gpaddr 166 } 167 168 XSDebug(req_out_v(i), p"(${i.U}) hit:${hit} miss:${miss} ppn:${Hexadecimal(ppn(0))} perm:${perm(0)}\n") 169 170 val pmp_paddr = resp(i).bits.paddr(0) 171 172 (hit, miss, pmp_paddr, perm, g_perm, s2xlate) 173 } 174 175 def pmp_check(addr: UInt, size: UInt, cmd: UInt, idx: Int): Unit = { 176 pmp(idx).valid := resp(idx).valid 177 pmp(idx).bits.addr := addr 178 pmp(idx).bits.size := size 179 pmp(idx).bits.cmd := cmd 180 } 181 182 def perm_check(perm: TlbPermBundle, cmd: UInt, idx: Int, nDups: Int, g_perm: TlbPermBundle, hlvx: Bool, s2xlate: UInt) = { 183 // for timing optimization, pmp check is divided into dynamic and static 184 // dynamic: superpage (or full-connected reg entries) -> check pmp when translation done 185 // static: 4K pages (or sram entries) -> check pmp with pre-checked results 186 val hasS2xlate = s2xlate(0) === 1.U 187 val onlyS2 = s2xlate === 11.U 188 val af = perm.af || (hasS2xlate && g_perm.af) 189 190 // Stage 1 perm check 191 val pf = perm.pf 192 val ldUpdate = !perm.a && TlbCmd.isRead(cmd) && !TlbCmd.isAmo(cmd) // update A/D through exception 193 val stUpdate = (!perm.a || !perm.d) && (TlbCmd.isWrite(cmd) || TlbCmd.isAmo(cmd)) // update A/D through exception 194 val instrUpdate = !perm.a && TlbCmd.isExec(cmd) // update A/D through exception 195 val modeCheck = !(mode(idx) === ModeU && !perm.u || mode(idx) === ModeS && perm.u && (!sum(idx) || ifecth)) 196 val ldPermFail = !(modeCheck && (perm.r || mxr(idx) && perm.x)) 197 val stPermFail = !(modeCheck && perm.w) 198 val instrPermFail = !(modeCheck && perm.x) 199 val ldPf = (ldPermFail || pf) && (TlbCmd.isRead(cmd) && !TlbCmd.isAmo(cmd)) 200 val stPf = (stPermFail || pf) && (TlbCmd.isWrite(cmd) || TlbCmd.isAmo(cmd)) 201 val instrPf = (instrPermFail || pf) && TlbCmd.isExec(cmd) 202 val s1_valid = portTranslateEnable(idx) && !onlyS2 203 204 // Stage 2 perm check 205 val gpf = g_perm.pf 206 val g_ldUpdate = !g_perm.a && TlbCmd.isRead(cmd) && !TlbCmd.isAmo(cmd) 207 val g_stUpdate = (!g_perm.a || !g_perm.d) && (TlbCmd.isWrite(cmd) || TlbCmd.isAmo(cmd)) 208 val g_instrUpdate = !g_perm.a && TlbCmd.isExec(cmd) 209 val g_ldPermFail = !(g_perm.r || io.csr.priv.mxr && g_perm.x) 210 val g_stPermFail = !g_perm.w 211 val g_instrPermFail = !g_perm.x 212 val ldGpf = (g_ldPermFail || gpf) && (TlbCmd.isRead(cmd) && !TlbCmd.isAmo(cmd)) 213 val stGpf = (g_stPermFail || gpf) && (TlbCmd.isWrite(cmd) || TlbCmd.isAmo(cmd)) 214 val instrGpf = (g_instrPermFail || gpf) && TlbCmd.isExec(cmd) 215 val s2_valid = hasS2xlate 216 217 val fault_valid = s1_valid || s2_valid 218 219 resp(idx).bits.excp(nDups).pf.ld := (ldPf || ldUpdate) && s1_valid && !af 220 resp(idx).bits.excp(nDups).pf.st := (stPf || stUpdate) && s1_valid && !af 221 resp(idx).bits.excp(nDups).pf.instr := (instrPf || instrUpdate) && s1_valid && !af 222 // NOTE: pf need && with !af, page fault has higher priority than access fault 223 // but ptw may also have access fault, then af happens, the translation is wrong. 224 // In this case, pf has lower priority than af 225 226 resp(idx).bits.excp(nDups).gpf.ld := (ldGpf || g_ldUpdate) && s2_valid && !af 227 resp(idx).bits.excp(nDups).gpf.st := (stGpf || g_stUpdate) && s2_valid && !af 228 resp(idx).bits.excp(nDups).gpf.instr := (instrGpf || g_instrUpdate) && s2_valid && !af 229 230 resp(idx).bits.excp(nDups).af.ld := af && TlbCmd.isRead(cmd) && fault_valid 231 resp(idx).bits.excp(nDups).af.st := af && TlbCmd.isWrite(cmd) && fault_valid 232 resp(idx).bits.excp(nDups).af.instr := af && TlbCmd.isExec(cmd) && fault_valid 233 234 235 } 236 237 def handle_nonblock(idx: Int): Unit = { 238 io.requestor(idx).resp.valid := req_out_v(idx) 239 io.requestor(idx).req.ready := io.requestor(idx).resp.ready // should always be true 240 XSError(!io.requestor(idx).resp.ready, s"${q.name} port ${idx} is non-block, resp.ready must be true.B") 241 242 val req_s2xlate = Wire(UInt(2.W)) 243 req_s2xlate(0) := virt || req_out(idx).hyperinst 244 req_s2xlate(1) := req_s2xlate(0) && vsatp.mode === 0.U && hgatp.mode === 8.U 245 val ptw_s2xlate = ptw.resp.bits.s2xlate 246 val onlyS2 = ptw_s2xlate === 11.U 247 val ptw_s1_hit = ptw.resp.bits.s1.hit(get_pn(req_out(idx).vaddr), Mux(ptw_s2xlate(0), io.csr.vsatp.asid, io.csr.satp.asid), io.csr.hgatp.asid, true, false, ptw_s2xlate(0)) 248 val ptw_s2_hit = ptw.resp.bits.s2.hit(get_pn(req_out(idx).vaddr), io.csr.hgatp.asid) 249 val ptw_just_back = ptw.resp.fire && req_s2xlate === ptw_s2xlate&& Mux(onlyS2, ptw_s2_hit, ptw_s1_hit) 250 val ptw_already_back = RegNext(ptw.resp.fire) && RegNext(ptw.resp.bits).hit(get_pn(req_out(idx).vaddr), asid = io.csr.satp.asid, allType = true) 251 io.ptw.req(idx).valid := req_out_v(idx) && missVec(idx) && !(ptw_just_back || ptw_already_back) // TODO: remove the regnext, timing 252 io.tlbreplay(idx) := req_out_v(idx) && missVec(idx) && (ptw_just_back || ptw_already_back) 253 when (io.requestor(idx).req_kill && RegNext(io.requestor(idx).req.fire)) { 254 io.ptw.req(idx).valid := false.B 255 io.tlbreplay(idx) := true.B 256 } 257 io.ptw.req(idx).bits.vpn := get_pn(req_out(idx).vaddr) 258 io.ptw.req(idx).bits.gvpn := RegNext(get_pn(req_out(idx).vaddr)) // for only stage two translation, longer than vpn 259 io.ptw.req(idx).bits.s2xlate := RegNext(req_s2xlate) 260 io.ptw.req(idx).bits.memidx := req_out(idx).memidx 261 } 262 263 def handle_block(idx: Int): Unit = { 264 // three valid: 1.if exist a entry; 2.if sent to ptw; 3.unset resp.valid 265 io.requestor(idx).req.ready := !req_out_v(idx) || io.requestor(idx).resp.fire 266 // req_out_v for if there is a request, may long latency, fixme 267 268 // miss request entries 269 val miss_req_vpn = get_pn(req_out(idx).vaddr) 270 val miss_req_gvpn = get_pn(req_out(idx).vaddr) 271 val miss_req_memidx = req_out(idx).memidx 272 val miss_req_s2xlate = Wire(UInt(2.W)) 273 miss_req_s2xlate(0) := virt || req_out(idx).hyperinst 274 miss_req_s2xlate(1) := miss_req_s2xlate(0) && vsatp.mode === 0.U && hgatp.mode === 8.U 275 val onlyS2 = miss_req_s2xlate === 11.U 276 val hit_s1 = io.ptw.resp.bits.s1.hit(miss_req_vpn, Mux(miss_req_s2xlate(0), io.csr.vsatp.asid, io.csr.satp.asid), io.csr.hgatp.asid, allType = true, false, miss_req_s2xlate(0)) 277 val hit_s2 = io.ptw.resp.bits.s2.hit(miss_req_vpn, io.csr.hgatp.asid) 278 val hit = Mux(onlyS2, hit_s2, hit_s1) && io.ptw.resp.valid && miss_req_s2xlate === io.ptw.resp.bits.s2xlate 279 280 val new_coming = RegNext(req_in(idx).fire && !req_in(idx).bits.kill && !flush_pipe(idx), false.B) 281 val miss_wire = new_coming && missVec(idx) 282 val miss_v = ValidHoldBypass(miss_wire, resp(idx).fire, flush_pipe(idx)) 283 val miss_req_v = ValidHoldBypass(miss_wire || (miss_v && flush_mmu && !mmu_flush_pipe), 284 io.ptw.req(idx).fire || resp(idx).fire, flush_pipe(idx)) 285 286 // when ptw resp, check if hit, reset miss_v, resp to lsu/ifu 287 resp(idx).valid := req_out_v(idx) && !(miss_v && portTranslateEnable(idx)) 288 when (io.ptw.resp.fire && hit && req_out_v(idx) && portTranslateEnable(idx)) { 289 val stage1 = io.ptw.resp.bits.s1 290 val stage2 = io.ptw.resp.bits.s2 291 val s2xlate = io.ptw.resp.bits.s2xlate 292 resp(idx).valid := true.B 293 resp(idx).bits.miss := false.B // for blocked tlb, this is useless 294 val s1_paddr = Cat(stage1.genPPN(get_pn(req_out(idx).vaddr)), get_off(req_out(idx).vaddr)) 295 val s2_paddr = Cat(stage2.genPPNS2() 296 for (d <- 0 until nRespDups) { 297 resp(idx).bits.paddr(d) := Mux(s2xlate(0), s2_paddr, s1_paddr) 298 resp(idx).bits.gpaddr(d) := s1_paddr 299 perm_check(stage1.entry.perm.get(), req_out(idx).cmd, idx, d, stage2.entry.perm, req_out(idx).hlvx, s2xlate(0)) 300 } 301 pmp_check(resp(idx).bits.paddr(0), req_out(idx).size, req_out(idx).cmd, idx) 302 303 // NOTE: the unfiltered req would be handled by Repeater 304 } 305 assert(RegNext(!resp(idx).valid || resp(idx).ready, true.B), "when tlb resp valid, ready should be true, must") 306 assert(RegNext(req_out_v(idx) || !(miss_v || miss_req_v), true.B), "when not req_out_v, should not set miss_v/miss_req_v") 307 308 val ptw_req = io.ptw.req(idx) 309 ptw_req.valid := miss_req_v 310 ptw_req.bits.vpn := miss_req_vpn 311 ptw_req.bits.gvpn := miss_req_gvpn 312 ptw_req.bits.s2xlate := miss_req_s2xlate 313 ptw_req.bits.memidx := miss_req_memidx 314 315 io.tlbreplay(idx) := false.B 316 317 // NOTE: when flush pipe, tlb should abandon last req 318 // however, some outside modules like icache, dont care flushPipe, and still waiting for tlb resp 319 // just resp valid and raise page fault to go through. The pipe(ifu) will abandon it. 320 if (!q.outsideRecvFlush) { 321 when (req_out_v(idx) && flush_pipe(idx) && portTranslateEnable(idx)) { 322 resp(idx).valid := true.B 323 for (d <- 0 until nRespDups) { 324 resp(idx).bits.excp(d).pf.ld := true.B // sfence happened, pf for not to use this addr 325 resp(idx).bits.excp(d).pf.st := true.B 326 resp(idx).bits.excp(d).pf.instr := true.B 327 } 328 } 329 } 330 } 331 332 // when ptw resp, tlb at refill_idx maybe set to miss by force. 333 // Bypass ptw resp to check. 334 def ptw_resp_bypass(vpn: UInt, s2xlate: UInt) = { 335 val hasS2xlate = s2xlate(0) === 1.U 336 val onlyS2 = s2xlate(1) === 1.U && hasS2xlate 337 val s2xlate_hit = s2xlate === ptw.resp.bits.s2xlate 338 val normal_hit = ptw.resp.bits.s1.hit(vpn, Mux(hasS2xlate, io.csr.vsatp.asid, io.csr.satp.asid), io.csr.hgatp.asid, true, false, hasS2xlate) 339 val onlyS2_hit = ptw.resp.bits.s2.hit(vpn, io.csr.hgatp.asid) 340 val p_hit = RegNext(Mux(onlyS2, onlyS2_hit, normal_hit) && io.ptw.resp.fire && s2xlate_hit) 341 val ppn_s1 = ptw.resp.bits.s1.genPPN(vpn) 342 val ppn_s2 = ptw.resp.bits.s2.genPPNS2() 343 val p_ppn = RegEnable(Mux(hasS2xlate, ppn_s2, ppn_s1), io.ptw.resp.fire) 344 val p_perm = RegEnable(ptwresp_to_tlbperm(ptw.resp.bits.s1), io.ptw.resp.fire) 345 val p_gvpn = RegEnable(ptw.resp.bits.s2.entry.tag, io.ptw.resp.fire) 346 val p_g_perm = RegEnable(hptwresp_to_tlbperm(ptw.resp.bits.s2), io.ptw.resp.fire) 347 val p_s2xlate = RegEnable(ptw.resp.bits.s2xlate, io.ptw.resp.fire) 348 (p_hit, p_ppn, p_perm, p_gvpn, p_g_perm, p_s2xlate) 349 } 350 351 // assert 352 for(i <- 0 until Width) { 353 TimeOutAssert(req_out_v(i) && !resp(i).valid, timeOutThreshold, s"{q.name} port{i} long time no resp valid.") 354 } 355 356 // perf event 357 val result_ok = req_in.map(a => RegNext(a.fire)) 358 val perfEvents = 359 Seq( 360 ("access", PopCount((0 until Width).map{i => if (Block(i)) io.requestor(i).req.fire else portTranslateEnable(i) && result_ok(i) })), 361 ("miss ", PopCount((0 until Width).map{i => if (Block(i)) portTranslateEnable(i) && result_ok(i) && missVec(i) else ptw.req(i).fire })), 362 ) 363 generatePerfEvent() 364 365 // perf log 366 for (i <- 0 until Width) { 367 if (Block(i)) { 368 XSPerfAccumulate(s"access${i}",result_ok(i) && portTranslateEnable(i)) 369 XSPerfAccumulate(s"miss${i}", result_ok(i) && missVec(i)) 370 } else { 371 XSPerfAccumulate("first_access" + Integer.toString(i, 10), result_ok(i) && portTranslateEnable(i) && RegNext(req(i).bits.debug.isFirstIssue)) 372 XSPerfAccumulate("access" + Integer.toString(i, 10), result_ok(i) && portTranslateEnable(i)) 373 XSPerfAccumulate("first_miss" + Integer.toString(i, 10), result_ok(i) && portTranslateEnable(i) && missVec(i) && RegNext(req(i).bits.debug.isFirstIssue)) 374 XSPerfAccumulate("miss" + Integer.toString(i, 10), result_ok(i) && portTranslateEnable(i) && missVec(i)) 375 } 376 } 377 XSPerfAccumulate("ptw_resp_count", ptw.resp.fire) 378 XSPerfAccumulate("ptw_resp_pf_count", ptw.resp.fire && ptw.resp.bits.pf) 379 380 // Log 381 for(i <- 0 until Width) { 382 XSDebug(req(i).valid, p"req(${i.U}): (${req(i).valid} ${req(i).ready}) ${req(i).bits}\n") 383 XSDebug(resp(i).valid, p"resp(${i.U}): (${resp(i).valid} ${resp(i).ready}) ${resp(i).bits}\n") 384 } 385 386 XSDebug(io.sfence.valid, p"Sfence: ${io.sfence}\n") 387 XSDebug(ParallelOR(req_out_v) || ptw.resp.valid, p"vmEnable:${vmEnable} hit:${Binary(VecInit(hitVec).asUInt)} miss:${Binary(VecInit(missVec).asUInt)}\n") 388 for (i <- ptw.req.indices) { 389 XSDebug(ptw.req(i).fire, p"L2TLB req:${ptw.req(i).bits}\n") 390 } 391 XSDebug(ptw.resp.valid, p"L2TLB resp:${ptw.resp.bits} (v:${ptw.resp.valid}r:${ptw.resp.ready}) \n") 392 393 println(s"${q.name}: page: ${q.NWays} ${q.Associative} ${q.Replacer.get}") 394 395 if (env.EnableDifftest) { 396 for (i <- 0 until Width) { 397 val pf = io.requestor(i).resp.bits.excp(0).pf.instr || io.requestor(i).resp.bits.excp(0).pf.st || io.requestor(i).resp.bits.excp(0).pf.ld 398 val gpf = io.requestor(i).resp.bits.excp(0).gpf.instr || io.requestor(i).resp.bits.excp(0).gpf.st || io.requestor(i).resp.bits.excp(0).gpf.ld 399 val af = io.requestor(i).resp.bits.excp(0).af.instr || io.requestor(i).resp.bits.excp(0).af.st || io.requestor(i).resp.bits.excp(0).af.ld 400 val difftest = DifftestModule(new DiffL1TLBEvent) 401 difftest.coreid := io.hartId 402 difftest.valid := RegNext(io.requestor(i).req.fire) && !io.requestor(i).req_kill && io.requestor(i).resp.fire && !io.requestor(i).resp.bits.miss && !pf && !af && !gpf && portTranslateEnable(i) 403 if (!Seq("itlb", "ldtlb", "sttlb").contains(q.name)) { 404 difftest.valid := false.B 405 } 406 difftest.index := TLBDiffId(p(XSCoreParamsKey).HartId).U 407 difftest.satp := io.csr.satp 408 difftest.vpn := RegNext(get_pn(req_in(i).bits.vaddr)) 409 difftest.ppn := get_pn(io.requestor(i).resp.bits.paddr(0)) 410 difftest.io.vsatp := io.csr.vsatp 411 difftest.io.hgatp := io.csr.hgatp 412 val s2xlate = Wire(UInt(2.W)) 413 s2xlate(0) := virt || req_in(i).bits.hyperinst 414 s2xlate(1) := s2xlate(0) && io.csr.vsatp.mode === 0.U && io.csr.hgatp.mode === 8.U 415 difftest.io.s2xlate := s2xlate 416 } 417 } 418} 419 420object TLBDiffId { 421 var i: Int = 0 422 var lastHartId: Int = -1 423 def apply(hartId: Int): Int = { 424 if (lastHartId != hartId) { 425 i = 0 426 lastHartId = hartId 427 } 428 i += 1 429 i - 1 430 } 431} 432 433class TLBNonBlock(Width: Int, nRespDups: Int = 1, q: TLBParameters)(implicit p: Parameters) extends TLB(Width, nRespDups, Seq.fill(Width)(false), q) 434class TLBBLock(Width: Int, nRespDups: Int = 1, q: TLBParameters)(implicit p: Parameters) extends TLB(Width, nRespDups, Seq.fill(Width)(true), q) 435 436class TlbReplace(Width: Int, q: TLBParameters)(implicit p: Parameters) extends TlbModule { 437 val io = IO(new TlbReplaceIO(Width, q)) 438 439 if (q.Associative == "fa") { 440 val re = ReplacementPolicy.fromString(q.Replacer, q.NWays) 441 re.access(io.page.access.map(_.touch_ways)) 442 io.page.refillIdx := re.way 443 } else { // set-acco && plru 444 val re = ReplacementPolicy.fromString(q.Replacer, q.NSets, q.NWays) 445 re.access(io.page.access.map(_.sets), io.page.access.map(_.touch_ways)) 446 io.page.refillIdx := { if (q.NWays == 1) 0.U else re.way(io.page.chosen_set) } 447 } 448} 449