1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.cache.mmu 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.internal.naming.chiselName 22import chisel3.util._ 23import difftest._ 24import freechips.rocketchip.util.SRAMAnnotation 25import xiangshan._ 26import utils._ 27import utility._ 28import xiangshan.backend.fu.{PMPChecker, PMPReqBundle, PMPConfig => XSPMPConfig} 29import xiangshan.backend.rob.RobPtr 30import xiangshan.backend.fu.util.HasCSRConst 31import firrtl.FirrtlProtos.Firrtl.Module.ExternalModule.Parameter 32import freechips.rocketchip.rocket.PMPConfig 33 34/** TLB module 35 * support block request and non-block request io at the same time 36 * return paddr at next cycle, then go for pmp/pma check 37 * @param Width: The number of requestors 38 * @param Block: Blocked or not for each requestor ports 39 * @param q: TLB Parameters, like entry number, each TLB has its own parameters 40 * @param p: XiangShan Paramemters, like XLEN 41 */ 42 43@chiselName 44class TLB(Width: Int, nRespDups: Int = 1, Block: Seq[Boolean], q: TLBParameters)(implicit p: Parameters) extends TlbModule 45 with HasCSRConst 46 with HasPerfEvents 47{ 48 val io = IO(new TlbIO(Width, nRespDups, q)) 49 50 val req = io.requestor.map(_.req) 51 val resp = io.requestor.map(_.resp) 52 val ptw = io.ptw 53 val pmp = io.pmp 54 55 /** Sfence.vma & Svinval 56 * Sfence.vma will 1. flush old entries 2. flush inflight 3. flush pipe 57 * Svinval will 1. flush old entries 2. flush inflight 58 * So, Svinval will not flush pipe, which means 59 * it should not drop reqs from pipe and should return right resp 60 */ 61 val sfence = DelayN(io.sfence, q.fenceDelay) 62 val csr = io.csr 63 val satp = DelayN(io.csr.satp, q.fenceDelay) 64 val flush_mmu = DelayN(sfence.valid || csr.satp.changed, q.fenceDelay) 65 val mmu_flush_pipe = DelayN(sfence.valid && sfence.bits.flushPipe, q.fenceDelay) // for svinval, won't flush pipe 66 val flush_pipe = io.flushPipe 67 68 // ATTENTION: csr and flush from backend are delayed. csr should not be later than flush. 69 // because, csr will influence tlb behavior. 70 val ifecth = if (q.fetchi) true.B else false.B 71 val mode = if (q.useDmode) csr.priv.dmode else csr.priv.imode 72 // val vmEnable = satp.mode === 8.U // && (mode < ModeM) // FIXME: fix me when boot xv6/linux... 73 val vmEnable = if (EnbaleTlbDebug) (satp.mode === 8.U) 74 else (satp.mode === 8.U && (mode < ModeM)) 75 val portTranslateEnable = (0 until Width).map(i => vmEnable && !req(i).bits.no_translate) 76 77 val req_in = req 78 val req_out = req.map(a => RegEnable(a.bits, a.fire())) 79 val req_out_v = (0 until Width).map(i => ValidHold(req_in(i).fire && !req_in(i).bits.kill, resp(i).fire, flush_pipe(i))) 80 81 val refill = ptw.resp.fire() && !flush_mmu && vmEnable 82 val entries = Module(new TlbStorageWrapper(Width, q, nRespDups)) 83 entries.io.base_connect(sfence, csr, satp) 84 if (q.outReplace) { io.replace <> entries.io.replace } 85 for (i <- 0 until Width) { 86 entries.io.r_req_apply(io.requestor(i).req.valid, get_pn(req_in(i).bits.vaddr), i) 87 entries.io.w_apply(refill, ptw.resp.bits, io.ptw_replenish) 88 } 89 90 // read TLB, get hit/miss, paddr, perm bits 91 val readResult = (0 until Width).map(TLBRead(_)) 92 val hitVec = readResult.map(_._1) 93 val missVec = readResult.map(_._2) 94 val pmp_addr = readResult.map(_._3) 95 val static_pm = readResult.map(_._4) 96 val static_pm_v = readResult.map(_._5) 97 val perm = readResult.map(_._6) 98 99 // check pmp use paddr (for timing optization, use pmp_addr here) 100 // check permisson 101 (0 until Width).foreach{i => 102 pmp_check(pmp_addr(i), req_out(i).size, req_out(i).cmd, i) 103 for (d <- 0 until nRespDups) { 104 perm_check(perm(i)(d), req_out(i).cmd, static_pm(i), static_pm_v(i), i, d) 105 } 106 } 107 108 // handle block or non-block io 109 // for non-block io, just return the above result, send miss to ptw 110 // for block io, hold the request, send miss to ptw, 111 // when ptw back, return the result 112 (0 until Width) foreach {i => 113 if (Block(i)) handle_block(i) 114 else handle_nonblock(i) 115 } 116 io.ptw.resp.ready := true.B 117 118 /************************ main body above | method/log/perf below ****************************/ 119 def TLBRead(i: Int) = { 120 val (e_hit, e_ppn, e_perm, e_super_hit, e_super_ppn, static_pm) = entries.io.r_resp_apply(i) 121 val (p_hit, p_ppn, p_perm) = ptw_resp_bypass(get_pn(req_in(i).bits.vaddr)) 122 val enable = portTranslateEnable(i) 123 124 val hit = e_hit || p_hit 125 val miss = !hit && enable 126 val fast_miss = !(e_super_hit || p_hit) && enable 127 hit.suggestName(s"hit_read_${i}") 128 miss.suggestName(s"miss_read_${i}") 129 130 val vaddr = SignExt(req_out(i).vaddr, PAddrBits) 131 resp(i).bits.miss := miss 132 resp(i).bits.fast_miss := fast_miss 133 resp(i).bits.ptwBack := ptw.resp.fire() 134 135 val ppn = WireInit(VecInit(Seq.fill(nRespDups)(0.U(ppnLen.W)))) 136 val perm = WireInit(VecInit(Seq.fill(nRespDups)(0.U.asTypeOf(new TlbPermBundle)))) 137 138 for (d <- 0 until nRespDups) { 139 ppn(d) := Mux(p_hit, p_ppn, e_ppn(d)) 140 perm(d) := Mux(p_hit, p_perm, e_perm(d)) 141 142 val paddr = Cat(ppn(d), get_off(req_out(i).vaddr)) 143 resp(i).bits.paddr(d) := Mux(enable, paddr, vaddr) 144 } 145 146 XSDebug(req_out_v(i), p"(${i.U}) hit:${hit} miss:${miss} ppn:${Hexadecimal(ppn(0))} perm:${perm(0)}\n") 147 148 val pmp_paddr = Mux(enable, Cat(Mux(p_hit, p_ppn, e_super_ppn), get_off(req_out(i).vaddr)), vaddr) 149 // pmp_paddr seems same to paddr functionally. It abandons normal_ppn for timing optimization. 150 // val pmp_paddr = Mux(enable, paddr, vaddr) 151 val static_pm_valid = !(e_super_hit || p_hit) && enable && q.partialStaticPMP.B 152 153 (hit, miss, pmp_paddr, static_pm, static_pm_valid, perm) 154 } 155 156 def pmp_check(addr: UInt, size: UInt, cmd: UInt, idx: Int): Unit = { 157 pmp(idx).valid := resp(idx).valid 158 pmp(idx).bits.addr := addr 159 pmp(idx).bits.size := size 160 pmp(idx).bits.cmd := cmd 161 } 162 163 def perm_check(perm: TlbPermBundle, cmd: UInt, spm: TlbPMBundle, spm_v: Bool, idx: Int, nDups: Int) = { 164 // for timing optimization, pmp check is divided into dynamic and static 165 // dynamic: superpage (or full-connected reg entries) -> check pmp when translation done 166 // static: 4K pages (or sram entries) -> check pmp with pre-checked results 167 val af = perm.af 168 val pf = perm.pf 169 val ldUpdate = !perm.a && TlbCmd.isRead(cmd) && !TlbCmd.isAmo(cmd) // update A/D through exception 170 val stUpdate = (!perm.a || !perm.d) && (TlbCmd.isWrite(cmd) || TlbCmd.isAmo(cmd)) // update A/D through exception 171 val instrUpdate = !perm.a && TlbCmd.isExec(cmd) // update A/D through exception 172 val modeCheck = !(mode === ModeU && !perm.u || mode === ModeS && perm.u && (!io.csr.priv.sum || ifecth)) 173 val ldPermFail = !(modeCheck && (perm.r || io.csr.priv.mxr && perm.x)) 174 val stPermFail = !(modeCheck && perm.w) 175 val instrPermFail = !(modeCheck && perm.x) 176 val ldPf = (ldPermFail || pf) && (TlbCmd.isRead(cmd) && !TlbCmd.isAmo(cmd)) 177 val stPf = (stPermFail || pf) && (TlbCmd.isWrite(cmd) || TlbCmd.isAmo(cmd)) 178 val instrPf = (instrPermFail || pf) && TlbCmd.isExec(cmd) 179 val fault_valid = portTranslateEnable(idx) 180 resp(idx).bits.excp(nDups).pf.ld := (ldPf || ldUpdate) && fault_valid && !af 181 resp(idx).bits.excp(nDups).pf.st := (stPf || stUpdate) && fault_valid && !af 182 resp(idx).bits.excp(nDups).pf.instr := (instrPf || instrUpdate) && fault_valid && !af 183 // NOTE: pf need && with !af, page fault has higher priority than access fault 184 // but ptw may also have access fault, then af happens, the translation is wrong. 185 // In this case, pf has lower priority than af 186 187 resp(idx).bits.excp(nDups).af.ld := (af || (spm_v && !spm.r)) && TlbCmd.isRead(cmd) && fault_valid 188 resp(idx).bits.excp(nDups).af.st := (af || (spm_v && !spm.w)) && TlbCmd.isWrite(cmd) && fault_valid 189 resp(idx).bits.excp(nDups).af.instr := (af || (spm_v && !spm.x)) && TlbCmd.isExec(cmd) && fault_valid 190 resp(idx).bits.static_pm.valid := spm_v && fault_valid // ls/st unit should use this mmio, not the result from pmp 191 resp(idx).bits.static_pm.bits := !spm.c 192 } 193 194 def handle_nonblock(idx: Int): Unit = { 195 io.requestor(idx).resp.valid := req_out_v(idx) 196 io.requestor(idx).req.ready := io.requestor(idx).resp.ready // should always be true 197 XSError(!io.requestor(idx).resp.ready, s"${q.name} port ${idx} is non-block, resp.ready must be true.B") 198 199 val ptw_just_back = ptw.resp.fire && ptw.resp.bits.entry.hit(get_pn(req_out(idx).vaddr), asid = io.csr.satp.asid, allType = true) 200 io.ptw.req(idx).valid := RegNext(req_out_v(idx) && missVec(idx) && !ptw_just_back, false.B) // TODO: remove the regnext, timing 201 when (RegEnable(io.requestor(idx).req_kill, RegNext(io.requestor(idx).req.fire))) { 202 io.ptw.req(idx).valid := false.B 203 } 204 io.ptw.req(idx).bits.vpn := RegNext(get_pn(req_out(idx).vaddr)) 205 } 206 207 def handle_block(idx: Int): Unit = { 208 // three valid: 1.if exist a entry; 2.if sent to ptw; 3.unset resp.valid 209 io.requestor(idx).req.ready := !req_out_v(idx) || io.requestor(idx).resp.fire() 210 // req_out_v for if there is a request, may long latency, fixme 211 212 // miss request entries 213 val miss_req_vpn = get_pn(req_out(idx).vaddr) 214 val hit = io.ptw.resp.bits.entry.hit(miss_req_vpn, io.csr.satp.asid, allType = true) && io.ptw.resp.valid 215 216 val new_coming = RegNext(req_in(idx).fire && !req_in(idx).bits.kill && !flush_pipe(idx), false.B) 217 val miss_wire = new_coming && missVec(idx) 218 val miss_v = ValidHoldBypass(miss_wire, resp(idx).fire(), flush_pipe(idx)) 219 val miss_req_v = ValidHoldBypass(miss_wire || (miss_v && flush_mmu && !mmu_flush_pipe), 220 io.ptw.req(idx).fire() || resp(idx).fire(), flush_pipe(idx)) 221 222 // when ptw resp, check if hit, reset miss_v, resp to lsu/ifu 223 resp(idx).valid := req_out_v(idx) && !(miss_v && portTranslateEnable(idx)) 224 when (io.ptw.resp.fire() && hit && req_out_v(idx) && portTranslateEnable(idx)) { 225 val pte = io.ptw.resp.bits 226 resp(idx).valid := true.B 227 resp(idx).bits.miss := false.B // for blocked tlb, this is useless 228 for (d <- 0 until nRespDups) { 229 resp(idx).bits.paddr(d) := Cat(pte.entry.genPPN(get_pn(req_out(idx).vaddr)), get_off(req_out(idx).vaddr)) 230 perm_check(pte, req_out(idx).cmd, 0.U.asTypeOf(new TlbPMBundle), false.B, idx, d) 231 } 232 pmp_check(resp(idx).bits.paddr(0), req_out(idx).size, req_out(idx).cmd, idx) 233 234 // NOTE: the unfiltered req would be handled by Repeater 235 } 236 assert(RegNext(!resp(idx).valid || resp(idx).ready, true.B), "when tlb resp valid, ready should be true, must") 237 assert(RegNext(req_out_v(idx) || !(miss_v || miss_req_v), true.B), "when not req_out_v, should not set miss_v/miss_req_v") 238 239 val ptw_req = io.ptw.req(idx) 240 ptw_req.valid := miss_req_v 241 ptw_req.bits.vpn := miss_req_vpn 242 243 // NOTE: when flush pipe, tlb should abandon last req 244 // however, some outside modules like icache, dont care flushPipe, and still waiting for tlb resp 245 // just resp valid and raise page fault to go through. The pipe(ifu) will abandon it. 246 if (!q.outsideRecvFlush) { 247 when (req_out_v(idx) && flush_pipe(idx) && portTranslateEnable(idx)) { 248 resp(idx).valid := true.B 249 for (d <- 0 until nRespDups) { 250 resp(idx).bits.excp(d).pf.ld := true.B // sfence happened, pf for not to use this addr 251 resp(idx).bits.excp(d).pf.st := true.B 252 resp(idx).bits.excp(d).pf.instr := true.B 253 } 254 } 255 } 256 } 257 258 // when ptw resp, tlb at refill_idx maybe set to miss by force. 259 // Bypass ptw resp to check. 260 def ptw_resp_bypass(vpn: UInt) = { 261 val p_hit = RegNext(ptw.resp.bits.entry.hit(vpn, io.csr.satp.asid, allType = true) && io.ptw.resp.fire) 262 val p_ppn = RegEnable(ptw.resp.bits.entry.genPPN(vpn), io.ptw.resp.fire) 263 val p_perm = RegEnable(ptwresp_to_tlbperm(ptw.resp.bits), io.ptw.resp.fire) 264 (p_hit, p_ppn, p_perm) 265 } 266 267 // assert 268 for(i <- 0 until Width) { 269 TimeOutAssert(req_out_v(i) && !resp(i).valid, timeOutThreshold, s"{q.name} port{i} long time no resp valid.") 270 } 271 272 // perf event 273 val result_ok = req_in.map(a => RegNext(a.fire())) 274 val perfEvents = 275 Seq( 276 ("access", PopCount((0 until Width).map{i => if (Block(i)) io.requestor(i).req.fire() else portTranslateEnable(i) && result_ok(i) })), 277 ("miss ", PopCount((0 until Width).map{i => if (Block(i)) portTranslateEnable(i) && result_ok(i) && missVec(i) else ptw.req(i).fire() })), 278 ) 279 generatePerfEvent() 280 281 // perf log 282 for (i <- 0 until Width) { 283 if (Block(i)) { 284 XSPerfAccumulate(s"access${i}",result_ok(i) && portTranslateEnable(i)) 285 XSPerfAccumulate(s"miss${i}", result_ok(i) && missVec(i)) 286 } else { 287 XSPerfAccumulate("first_access" + Integer.toString(i, 10), result_ok(i) && portTranslateEnable(i) && RegNext(req(i).bits.debug.isFirstIssue)) 288 XSPerfAccumulate("access" + Integer.toString(i, 10), result_ok(i) && portTranslateEnable(i)) 289 XSPerfAccumulate("first_miss" + Integer.toString(i, 10), result_ok(i) && portTranslateEnable(i) && missVec(i) && RegNext(req(i).bits.debug.isFirstIssue)) 290 XSPerfAccumulate("miss" + Integer.toString(i, 10), result_ok(i) && portTranslateEnable(i) && missVec(i)) 291 } 292 } 293 XSPerfAccumulate("ptw_resp_count", ptw.resp.fire()) 294 XSPerfAccumulate("ptw_resp_pf_count", ptw.resp.fire() && ptw.resp.bits.pf) 295 296 // Log 297 for(i <- 0 until Width) { 298 XSDebug(req(i).valid, p"req(${i.U}): (${req(i).valid} ${req(i).ready}) ${req(i).bits}\n") 299 XSDebug(resp(i).valid, p"resp(${i.U}): (${resp(i).valid} ${resp(i).ready}) ${resp(i).bits}\n") 300 } 301 302 XSDebug(io.sfence.valid, p"Sfence: ${io.sfence}\n") 303 XSDebug(ParallelOR(req_out_v) || ptw.resp.valid, p"vmEnable:${vmEnable} hit:${Binary(VecInit(hitVec).asUInt)} miss:${Binary(VecInit(missVec).asUInt)}\n") 304 for (i <- ptw.req.indices) { 305 XSDebug(ptw.req(i).fire(), p"L2TLB req:${ptw.req(i).bits}\n") 306 } 307 XSDebug(ptw.resp.valid, p"L2TLB resp:${ptw.resp.bits} (v:${ptw.resp.valid}r:${ptw.resp.ready}) \n") 308 309 println(s"${q.name}: normal page: ${q.normalNWays} ${q.normalAssociative} ${q.normalReplacer.get} super page: ${q.superNWays} ${q.superAssociative} ${q.superReplacer.get}") 310 311 if (env.EnableDifftest) { 312 val l1tlbid = Wire(UInt(2.W)) 313 if (q.name == "itlb") { 314 l1tlbid := 0.U 315 } else if (q.name == "ldtlb") { 316 l1tlbid := 1.U 317 } else { 318 l1tlbid := 2.U 319 } 320 321 for (i <- 0 until Width) { 322 val pf = io.requestor(i).resp.bits.excp(0).pf.instr || io.requestor(i).resp.bits.excp(0).pf.st || io.requestor(i).resp.bits.excp(0).pf.ld 323 val af = io.requestor(i).resp.bits.excp(0).af.instr || io.requestor(i).resp.bits.excp(0).af.st || io.requestor(i).resp.bits.excp(0).af.ld 324 val difftest = Module(new DifftestL1TLBEvent) 325 difftest.io.clock := clock 326 difftest.io.coreid := p(XSCoreParamsKey).HartId.asUInt 327 difftest.io.valid := RegNext(io.requestor(i).req.fire) && !RegNext(io.requestor(i).req_kill) && io.requestor(i).resp.fire && !io.requestor(i).resp.bits.miss && !pf && !af && portTranslateEnable(i) 328 difftest.io.index := i.U 329 difftest.io.l1tlbid := l1tlbid 330 difftest.io.satp := io.csr.satp.ppn 331 difftest.io.vpn := RegNext(get_pn(req_in(i).bits.vaddr)) 332 difftest.io.ppn := get_pn(io.requestor(i).resp.bits.paddr(0)) 333 } 334 } 335 336} 337 338class TLBNonBlock(Width: Int, nRespDups: Int = 1, q: TLBParameters)(implicit p: Parameters) extends TLB(Width, nRespDups, Seq.fill(Width)(false), q) 339class TLBBLock(Width: Int, nRespDups: Int = 1, q: TLBParameters)(implicit p: Parameters) extends TLB(Width, nRespDups, Seq.fill(Width)(true), q) 340 341class TlbReplace(Width: Int, q: TLBParameters)(implicit p: Parameters) extends TlbModule { 342 val io = IO(new TlbReplaceIO(Width, q)) 343 344 if (q.normalAssociative == "fa") { 345 val re = ReplacementPolicy.fromString(q.normalReplacer, q.normalNWays) 346 re.access(io.normalPage.access.map(_.touch_ways)) 347 io.normalPage.refillIdx := re.way 348 } else { // set-acco && plru 349 val re = ReplacementPolicy.fromString(q.normalReplacer, q.normalNSets, q.normalNWays) 350 re.access(io.normalPage.access.map(_.sets), io.normalPage.access.map(_.touch_ways)) 351 io.normalPage.refillIdx := { if (q.normalNWays == 1) 0.U else re.way(io.normalPage.chosen_set) } 352 } 353 354 if (q.superAssociative == "fa") { 355 val re = ReplacementPolicy.fromString(q.superReplacer, q.superNWays) 356 re.access(io.superPage.access.map(_.touch_ways)) 357 io.superPage.refillIdx := re.way 358 } else { // set-acco && plru 359 val re = ReplacementPolicy.fromString(q.superReplacer, q.superNSets, q.superNWays) 360 re.access(io.superPage.access.map(_.sets), io.superPage.access.map(_.touch_ways)) 361 io.superPage.refillIdx := { if (q.superNWays == 1) 0.U else re.way(io.superPage.chosen_set) } 362 } 363} 364