1/*************************************************************************************** 2 * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3 * Copyright (c) 2020-2021 Peng Cheng Laboratory 4 * 5 * XiangShan is licensed under Mulan PSL v2. 6 * You can use this software according to the terms and conditions of the Mulan PSL v2. 7 * You may obtain a copy of Mulan PSL v2 at: 8 * http://license.coscl.org.cn/MulanPSL2 9 * 10 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13 * 14 * See the Mulan PSL v2 for more details. 15 ***************************************************************************************/ 16 17package xiangshan.cache.mmu 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.experimental.{ExtModule, chiselName} 22import chisel3.util._ 23import utils._ 24import utility._ 25import freechips.rocketchip.formal.PropertyClass 26import xiangshan.backend.fu.util.HasCSRConst 27 28import scala.math.min 29 30class BankedAsyncDataModuleTemplateWithDup[T <: Data]( 31 gen: T, 32 numEntries: Int, 33 numRead: Int, 34 numDup: Int, 35 numBanks: Int 36) extends Module { 37 val io = IO(new Bundle { 38 val raddr = Vec(numRead, Input(UInt(log2Ceil(numEntries).W))) 39 val rdata = Vec(numRead, Vec(numDup, Output(gen))) 40 val wen = Input(Bool()) 41 val waddr = Input(UInt(log2Ceil(numEntries).W)) 42 val wdata = Input(gen) 43 }) 44 require(numBanks > 1) 45 require(numEntries > numBanks) 46 47 val numBankEntries = numEntries / numBanks 48 def bankOffset(address: UInt): UInt = { 49 address(log2Ceil(numBankEntries) - 1, 0) 50 } 51 52 def bankIndex(address: UInt): UInt = { 53 address(log2Ceil(numEntries) - 1, log2Ceil(numBankEntries)) 54 } 55 56 val dataBanks = Seq.tabulate(numBanks)(i => { 57 val bankEntries = if (i < numBanks - 1) numBankEntries else (numEntries - (i * numBankEntries)) 58 Mem(bankEntries, gen) 59 }) 60 61 // async read, but regnext 62 for (i <- 0 until numRead) { 63 val data_read = Reg(Vec(numDup, Vec(numBanks, gen))) 64 val bank_index = Reg(Vec(numDup, UInt(numBanks.W))) 65 for (j <- 0 until numDup) { 66 bank_index(j) := UIntToOH(bankIndex(io.raddr(i))) 67 for (k <- 0 until numBanks) { 68 data_read(j)(k) := Mux(io.wen && (io.waddr === io.raddr(i)), 69 io.wdata, dataBanks(k)(bankOffset(io.raddr(i)))) 70 } 71 } 72 // next cycle 73 for (j <- 0 until numDup) { 74 io.rdata(i)(j) := Mux1H(bank_index(j), data_read(j)) 75 } 76 } 77 78 // write 79 for (i <- 0 until numBanks) { 80 when (io.wen && (bankIndex(io.waddr) === i.U)) { 81 dataBanks(i)(bankOffset(io.waddr)) := io.wdata 82 } 83 } 84} 85 86@chiselName 87class TLBFA( 88 parentName: String, 89 ports: Int, 90 nSets: Int, 91 nWays: Int, 92 saveLevel: Boolean = false, 93 normalPage: Boolean, 94 superPage: Boolean 95)(implicit p: Parameters) extends TlbModule with HasPerfEvents { 96 97 val io = IO(new TlbStorageIO(nSets, nWays, ports)) 98 io.r.req.map(_.ready := true.B) 99 100 val v = RegInit(VecInit(Seq.fill(nWays)(false.B))) 101 val entries = Reg(Vec(nWays, new TlbSectorEntry(normalPage, superPage))) 102 val g = entries.map(_.perm.g) 103 104 for (i <- 0 until ports) { 105 val req = io.r.req(i) 106 val resp = io.r.resp(i) 107 val access = io.access(i) 108 109 val vpn = req.bits.vpn 110 val vpn_reg = RegEnable(vpn, req.fire()) 111 val vpn_gen_ppn = if(saveLevel) vpn else vpn_reg 112 113 val refill_mask = Mux(io.w.valid, UIntToOH(io.w.bits.wayIdx), 0.U(nWays.W)) 114 val hitVec = VecInit((entries.zipWithIndex).zip(v zip refill_mask.asBools).map{case (e, m) => e._1.hit(vpn, io.csr.satp.asid) && m._1 && !m._2 }) 115 116 hitVec.suggestName("hitVec") 117 118 val hitVecReg = RegEnable(hitVec, req.fire()) 119 // Sector tlb may trigger multi-hit, see def "wbhit" 120 XSPerfAccumulate(s"port${i}_multi_hit", !(!resp.valid || (PopCount(hitVecReg) === 0.U || PopCount(hitVecReg) === 1.U))) 121 // assert(!resp.valid || (PopCount(hitVecReg) === 0.U || PopCount(hitVecReg) === 1.U), s"${parentName} fa port${i} multi-hit") 122 123 resp.valid := RegNext(req.valid) 124 resp.bits.hit := Cat(hitVecReg).orR 125 if (nWays == 1) { 126 resp.bits.ppn(0) := entries(0).genPPN(saveLevel, req.valid)(vpn_gen_ppn) 127 resp.bits.perm(0) := entries(0).perm 128 } else { 129 resp.bits.ppn(0) := ParallelMux(hitVecReg zip entries.map(_.genPPN(saveLevel, req.valid)(vpn_gen_ppn))) 130 resp.bits.perm(0) := ParallelMux(hitVecReg zip entries.map(_.perm)) 131 } 132 133 access.sets := get_set_idx(vpn_reg(vpn_reg.getWidth - 1, sectortlbwidth), nSets) // no use 134 access.touch_ways.valid := resp.valid && Cat(hitVecReg).orR 135 access.touch_ways.bits := OHToUInt(hitVecReg) 136 137 resp.bits.hit.suggestName("hit") 138 resp.bits.ppn.suggestName("ppn") 139 resp.bits.perm.suggestName("perm") 140 } 141 142 when (io.w.valid) { 143 v(io.w.bits.wayIdx) := true.B 144 entries(io.w.bits.wayIdx).apply(io.w.bits.data, io.csr.satp.asid, io.w.bits.data_replenish) 145 } 146 // write assert, should not duplicate with the existing entries 147 val w_hit_vec = VecInit(entries.zip(v).map{case (e, vi) => e.wbhit(io.w.bits.data, io.csr.satp.asid) && vi }) 148 XSError(io.w.valid && Cat(w_hit_vec).orR, s"${parentName} refill, duplicate with existing entries") 149 150 val refill_vpn_reg = RegNext(io.w.bits.data.entry.tag) 151 val refill_wayIdx_reg = RegNext(io.w.bits.wayIdx) 152 when (RegNext(io.w.valid)) { 153 io.access.map { access => 154 access.sets := get_set_idx(refill_vpn_reg, nSets) 155 access.touch_ways.valid := true.B 156 access.touch_ways.bits := refill_wayIdx_reg 157 } 158 } 159 160 val sfence = io.sfence 161 val sfence_vpn = sfence.bits.addr.asTypeOf(new VaBundle().cloneType).vpn 162 val sfenceHit = entries.map(_.hit(sfence_vpn, sfence.bits.asid)) 163 val sfenceHit_noasid = entries.map(_.hit(sfence_vpn, sfence.bits.asid, ignoreAsid = true)) 164 // Sfence will flush all sectors of an entry when hit 165 when (io.sfence.valid) { 166 when (sfence.bits.rs1) { // virtual address *.rs1 <- (rs1===0.U) 167 when (sfence.bits.rs2) { // asid, but i do not want to support asid, *.rs2 <- (rs2===0.U) 168 // all addr and all asid 169 v.map(_ := false.B) 170 }.otherwise { 171 // all addr but specific asid 172 v.zipWithIndex.map{ case (a,i) => a := a & (g(i) | !(entries(i).asid === sfence.bits.asid)) } 173 } 174 }.otherwise { 175 when (sfence.bits.rs2) { 176 // specific addr but all asid 177 v.zipWithIndex.map{ case (a,i) => a := a & !sfenceHit_noasid(i) } 178 }.otherwise { 179 // specific addr and specific asid 180 v.zipWithIndex.map{ case (a,i) => a := a & !(sfenceHit(i) && !g(i)) } 181 } 182 } 183 } 184 185 val victim_idx = io.w.bits.wayIdx 186 io.victim.out.valid := v(victim_idx) && io.w.valid && entries(victim_idx).is_normalentry() 187 io.victim.out.bits.entry := ns_to_n(entries(victim_idx)) 188 189 def ns_to_n(ns: TlbSectorEntry): TlbEntry = { 190 val n = Wire(new TlbEntry(pageNormal = true, pageSuper = false)) 191 n.perm.af := ns.perm.af 192 n.perm.pf := ns.perm.pf 193 n.perm.d := ns.perm.d 194 n.perm.a := ns.perm.a 195 n.perm.g := ns.perm.g 196 n.perm.u := ns.perm.u 197 n.perm.x := ns.perm.x 198 n.perm.w := ns.perm.w 199 n.perm.r := ns.perm.r 200 n.perm.pm := ns.perm.pm(OHToUInt(ns.pteidx)) 201 n.ppn := Cat(ns.ppn, ns.ppn_low(OHToUInt(ns.pteidx))) 202 n.tag := Cat(ns.tag, OHToUInt(ns.pteidx)) 203 n.asid := ns.asid 204 n 205 } 206 207 XSPerfAccumulate(s"access", io.r.resp.map(_.valid.asUInt()).fold(0.U)(_ + _)) 208 XSPerfAccumulate(s"hit", io.r.resp.map(a => a.valid && a.bits.hit).fold(0.U)(_.asUInt() + _.asUInt())) 209 210 for (i <- 0 until nWays) { 211 XSPerfAccumulate(s"access${i}", io.r.resp.zip(io.access.map(acc => UIntToOH(acc.touch_ways.bits))).map{ case (a, b) => 212 a.valid && a.bits.hit && b(i)}.fold(0.U)(_.asUInt() + _.asUInt())) 213 } 214 for (i <- 0 until nWays) { 215 XSPerfAccumulate(s"refill${i}", io.w.valid && io.w.bits.wayIdx === i.U) 216 } 217 218 val perfEvents = Seq( 219 ("tlbstore_access", io.r.resp.map(_.valid.asUInt()).fold(0.U)(_ + _) ), 220 ("tlbstore_hit ", io.r.resp.map(a => a.valid && a.bits.hit).fold(0.U)(_.asUInt() + _.asUInt())), 221 ) 222 generatePerfEvent() 223 224 println(s"${parentName} tlb_fa: nSets${nSets} nWays:${nWays}") 225} 226 227@chiselName 228class TLBSA( 229 parentName: String, 230 ports: Int, 231 nDups: Int, 232 nSets: Int, 233 nWays: Int, 234 normalPage: Boolean, 235 superPage: Boolean 236)(implicit p: Parameters) extends TlbModule { 237 require(!superPage, "super page should use reg/fa") 238 require(nWays == 1, "nWays larger than 1 causes bad timing") 239 240 // timing optimization to divide v select into two cycles. 241 val VPRE_SELECT = min(8, nSets) 242 val VPOST_SELECT = nSets / VPRE_SELECT 243 val nBanks = 8 244 245 val io = IO(new TlbStorageIO(nSets, nWays, ports, nDups)) 246 247 io.r.req.map(_.ready := true.B) 248 val v = RegInit(VecInit(Seq.fill(nSets)(VecInit(Seq.fill(nWays)(false.B))))) 249 val entries = Module(new BankedAsyncDataModuleTemplateWithDup(new TlbEntry(normalPage, superPage), nSets, ports, nDups, nBanks)) 250 251 for (i <- 0 until ports) { // duplicate sram 252 val req = io.r.req(i) 253 val resp = io.r.resp(i) 254 val access = io.access(i) 255 256 val vpn = req.bits.vpn 257 val vpn_reg = RegEnable(vpn, req.fire()) 258 259 val ridx = get_set_idx(vpn, nSets) 260 val v_resize = v.asTypeOf(Vec(VPRE_SELECT, Vec(VPOST_SELECT, UInt(nWays.W)))) 261 val vidx_resize = RegNext(v_resize(get_set_idx(drop_set_idx(vpn, VPOST_SELECT), VPRE_SELECT))) 262 val vidx = vidx_resize(get_set_idx(vpn_reg, VPOST_SELECT)).asBools.map(_ && RegNext(req.fire())) 263 val vidx_bypass = RegNext((entries.io.waddr === ridx) && entries.io.wen) 264 entries.io.raddr(i) := ridx 265 266 val data = entries.io.rdata(i) 267 val hit = data(0).hit(vpn_reg, io.csr.satp.asid, nSets) && (vidx(0) || vidx_bypass) 268 resp.bits.hit := hit 269 for (d <- 0 until nDups) { 270 resp.bits.ppn(d) := data(d).genPPN()(vpn_reg) 271 resp.bits.perm(d).pf := data(d).perm.pf 272 resp.bits.perm(d).af := data(d).perm.af 273 resp.bits.perm(d).d := data(d).perm.d 274 resp.bits.perm(d).a := data(d).perm.a 275 resp.bits.perm(d).g := data(d).perm.g 276 resp.bits.perm(d).u := data(d).perm.u 277 resp.bits.perm(d).x := data(d).perm.x 278 resp.bits.perm(d).w := data(d).perm.w 279 resp.bits.perm(d).r := data(d).perm.r 280 for (i <- 0 until tlbcontiguous) { 281 resp.bits.perm(d).pm(i) := data(d).perm.pm 282 } 283 } 284 285 resp.valid := { RegNext(req.valid) } 286 resp.bits.hit.suggestName("hit") 287 resp.bits.ppn.suggestName("ppn") 288 resp.bits.perm.suggestName("perm") 289 290 access.sets := get_set_idx(vpn_reg, nSets) // no use 291 access.touch_ways.valid := resp.valid && hit 292 access.touch_ways.bits := 1.U // TODO: set-assoc need no replacer when nset is 1 293 } 294 295 // W ports should be 1, or, check at above will be wrong. 296 entries.io.wen := io.w.valid || io.victim.in.valid 297 entries.io.waddr := Mux(io.w.valid, 298 get_set_idx(io.w.bits.data.entry.tag, nSets), 299 get_set_idx(io.victim.in.bits.entry.tag, nSets)) 300 entries.io.wdata := Mux(io.w.valid, 301 (Wire(new TlbEntry(normalPage, superPage)).apply(io.w.bits.data, io.csr.satp.asid, io.w.bits.data_replenish(OHToUInt(io.w.bits.data.pteidx)))), 302 io.victim.in.bits.entry) 303 304 when (io.victim.in.valid) { 305 v(get_set_idx(io.victim.in.bits.entry.tag, nSets))(io.w.bits.wayIdx) := true.B 306 } 307 // w has higher priority than victim 308 when (io.w.valid) { 309 v(get_set_idx(io.w.bits.data.entry.tag, nSets))(io.w.bits.wayIdx) := true.B 310 } 311 312 val refill_vpn_reg = RegNext(Mux(io.victim.in.valid, io.victim.in.bits.entry.tag, io.w.bits.data.entry.tag)) 313 val refill_wayIdx_reg = RegNext(io.w.bits.wayIdx) 314 when (RegNext(io.w.valid || io.victim.in.valid)) { 315 io.access.map { access => 316 access.sets := get_set_idx(refill_vpn_reg, nSets) 317 access.touch_ways.valid := true.B 318 access.touch_ways.bits := refill_wayIdx_reg 319 } 320 } 321 322 val sfence = io.sfence 323 val sfence_vpn = sfence.bits.addr.asTypeOf(new VaBundle().cloneType).vpn 324 when (io.sfence.valid) { 325 when (sfence.bits.rs1) { // virtual address *.rs1 <- (rs1===0.U) 326 v.map(a => a.map(b => b := false.B)) 327 }.otherwise { 328 // specific addr but all asid 329 v(get_set_idx(sfence_vpn, nSets)).map(_ := false.B) 330 } 331 } 332 333 io.victim.out := DontCare 334 io.victim.out.valid := false.B 335 336 XSPerfAccumulate(s"access", io.r.req.map(_.valid.asUInt()).fold(0.U)(_ + _)) 337 XSPerfAccumulate(s"hit", io.r.resp.map(a => a.valid && a.bits.hit).fold(0.U)(_.asUInt() + _.asUInt())) 338 339 for (i <- 0 until nSets) { 340 XSPerfAccumulate(s"refill${i}", (io.w.valid || io.victim.in.valid) && 341 (Mux(io.w.valid, get_set_idx(io.w.bits.data.entry.tag, nSets), get_set_idx(io.victim.in.bits.entry.tag, nSets)) === i.U) 342 ) 343 } 344 345 for (i <- 0 until nSets) { 346 XSPerfAccumulate(s"hit${i}", io.r.resp.map(a => a.valid & a.bits.hit) 347 .zip(io.r.req.map(a => RegNext(get_set_idx(a.bits.vpn, nSets)) === i.U)) 348 .map{a => (a._1 && a._2).asUInt()} 349 .fold(0.U)(_ + _) 350 ) 351 } 352 353 for (i <- 0 until nSets) { 354 XSPerfAccumulate(s"access${i}", io.r.resp.map(_.valid) 355 .zip(io.r.req.map(a => RegNext(get_set_idx(a.bits.vpn, nSets)) === i.U)) 356 .map{a => (a._1 && a._2).asUInt()} 357 .fold(0.U)(_ + _) 358 ) 359 } 360 361 println(s"${parentName} tlb_sa: nSets:${nSets} nWays:${nWays}") 362} 363 364@chiselName 365class TLBFakeSP( 366 ports: Int, 367 nSets: Int, 368 nWays: Int, 369 useDmode: Boolean = false 370 )(implicit p: Parameters) extends TlbModule with HasCSRConst{ 371 372 val io = IO(new TlbStorageIO(nSets, nWays, ports)) 373 io.r.req.map(_.ready := true.B) 374 val mode = if (useDmode) io.csr.priv.dmode else io.csr.priv.imode 375 val vmEnable = if (EnbaleTlbDebug) (io.csr.satp.mode === 8.U) 376 else (io.csr.satp.mode === 8.U && (mode < ModeM)) 377 378 for (i <- 0 until ports) { 379 val req = io.r.req(i) 380 val resp = io.r.resp(i) 381 382 val helper = Module(new PTEHelper()) 383 helper.clock := clock 384 helper.satp := io.csr.satp.ppn 385 helper.enable := req.fire && vmEnable 386 helper.vpn := req.bits.vpn 387 388 val pte = helper.pte.asTypeOf(new PteBundle) 389 val ppn = pte.ppn 390 val vpn_reg = RegNext(req.bits.vpn) 391 val pf = helper.pf 392 val level = helper.level 393 394 resp.valid := RegNext(req.valid) 395 resp.bits.hit := true.B 396 resp.bits.perm(0).pf := pf 397 resp.bits.perm(0).af := false.B 398 resp.bits.perm(0).d := pte.perm.d 399 resp.bits.perm(0).a := pte.perm.a 400 resp.bits.perm(0).g := pte.perm.g 401 resp.bits.perm(0).u := pte.perm.u 402 resp.bits.perm(0).x := pte.perm.x 403 resp.bits.perm(0).w := pte.perm.w 404 resp.bits.perm(0).r := pte.perm.r 405 resp.bits.perm(0).pm := DontCare 406 407 resp.bits.ppn(0) := MuxLookup(level, 0.U, Seq( 408 0.U -> Cat(ppn(ppn.getWidth-1, vpnnLen*2), vpn_reg(vpnnLen*2-1, 0)), 409 1.U -> Cat(ppn(ppn.getWidth-1, vpnnLen), vpn_reg(vpnnLen-1, 0)), 410 2.U -> ppn) 411 ) 412 } 413 414 io.access := DontCare 415 io.victim.out := DontCare 416 417} 418 419@chiselName 420class TLBFakeNP( 421 ports: Int, 422 nDups: Int, 423 nSets: Int, 424 nWays: Int 425 )(implicit p: Parameters) extends TlbModule { 426 427 val io = IO(new TlbStorageIO(nSets, nWays, ports, nDups)) 428 429 io.r.req.map(_.ready := true.B) 430 io.r.resp := DontCare 431 io.access := DontCare 432 io.victim.out := DontCare 433} 434 435object TlbStorage { 436 def apply 437 ( 438 parentName: String, 439 associative: String, 440 ports: Int, 441 nDups: Int = 1, 442 nSets: Int, 443 nWays: Int, 444 saveLevel: Boolean = false, 445 normalPage: Boolean, 446 superPage: Boolean, 447 useDmode: Boolean, 448 SoftTLB: Boolean 449 )(implicit p: Parameters) = { 450 if (SoftTLB) { 451 if (superPage == true) { 452 val storage = Module(new TLBFakeSP(ports, nSets, nWays, useDmode)) 453 storage.suggestName(s"${parentName}_fakesp") 454 storage.io 455 } else { 456 val storage = Module(new TLBFakeNP(ports, nDups, nSets, nWays)) 457 storage.suggestName(s"${parentName}_fakenp") 458 storage.io 459 } 460 } else { 461 if (associative == "fa") { 462 val storage = Module(new TLBFA(parentName, ports, nSets, nWays, saveLevel, normalPage, superPage)) 463 storage.suggestName(s"${parentName}_fa") 464 storage.io 465 } else { 466 val storage = Module(new TLBSA(parentName, ports, nDups, nSets, nWays, normalPage, superPage)) 467 storage.suggestName(s"${parentName}_sa") 468 storage.io 469 } 470 } 471 } 472} 473 474class TlbStorageWrapper(ports: Int, q: TLBParameters, nDups: Int = 1)(implicit p: Parameters) extends TlbModule { 475 val io = IO(new TlbStorageWrapperIO(ports, q, nDups)) 476 477// TODO: wrap Normal page and super page together, wrap the declare & refill dirty codes 478 val normalPage = TlbStorage( 479 parentName = q.name + "_np_storage", 480 associative = q.normalAssociative, 481 ports = ports, 482 nDups = nDups, 483 nSets = q.normalNSets, 484 nWays = q.normalNWays, 485 saveLevel = q.saveLevel, 486 normalPage = true, 487 superPage = false, 488 useDmode = q.useDmode, 489 SoftTLB = coreParams.softTLB 490 ) 491 val superPage = TlbStorage( 492 parentName = q.name + "_sp_storage", 493 associative = q.superAssociative, 494 ports = ports, 495 nSets = q.superNSets, 496 nWays = q.superNWays, 497 normalPage = q.normalAsVictim, 498 superPage = true, 499 useDmode = q.useDmode, 500 SoftTLB = coreParams.softTLB 501 ) 502 503 for (i <- 0 until ports) { 504 normalPage.r_req_apply( 505 valid = io.r.req(i).valid, 506 vpn = io.r.req(i).bits.vpn, 507 i = i 508 ) 509 superPage.r_req_apply( 510 valid = io.r.req(i).valid, 511 vpn = io.r.req(i).bits.vpn, 512 i = i 513 ) 514 } 515 516 for (i <- 0 until ports) { 517 val nq = normalPage.r.req(i) 518 val np = normalPage.r.resp(i) 519 val sq = superPage.r.req(i) 520 val sp = superPage.r.resp(i) 521 val rq = io.r.req(i) 522 val rp = io.r.resp(i) 523 rq.ready := nq.ready && sq.ready // actually, not used 524 rp.valid := np.valid && sp.valid // actually, not used 525 rp.bits.hit := np.bits.hit || sp.bits.hit 526 for (d <- 0 until nDups) { 527 rp.bits.ppn(d) := Mux(sp.bits.hit, sp.bits.ppn(0), np.bits.ppn(d)) 528 rp.bits.perm(d).pf := Mux(sp.bits.hit, sp.bits.perm(0).pf, np.bits.perm(d).pf) 529 rp.bits.perm(d).af := Mux(sp.bits.hit, sp.bits.perm(0).af, np.bits.perm(d).af) 530 rp.bits.perm(d).d := Mux(sp.bits.hit, sp.bits.perm(0).d, np.bits.perm(d).d) 531 rp.bits.perm(d).a := Mux(sp.bits.hit, sp.bits.perm(0).a, np.bits.perm(d).a) 532 rp.bits.perm(d).g := Mux(sp.bits.hit, sp.bits.perm(0).g, np.bits.perm(d).g) 533 rp.bits.perm(d).u := Mux(sp.bits.hit, sp.bits.perm(0).u, np.bits.perm(d).u) 534 rp.bits.perm(d).x := Mux(sp.bits.hit, sp.bits.perm(0).x, np.bits.perm(d).x) 535 rp.bits.perm(d).w := Mux(sp.bits.hit, sp.bits.perm(0).w, np.bits.perm(d).w) 536 rp.bits.perm(d).r := Mux(sp.bits.hit, sp.bits.perm(0).r, np.bits.perm(d).r) 537 rp.bits.perm(d).pm := DontCare 538 } 539 rp.bits.super_hit := sp.bits.hit 540 rp.bits.super_ppn := sp.bits.ppn(0) 541 rp.bits.spm := np.bits.perm(0).pm(0) 542 // Sector tlb may trigger multi-hit, see def "wbhit" 543 XSPerfAccumulate(s"port${i}_np_sp_multi_hit", !(!np.bits.hit || !sp.bits.hit || !rp.valid)) 544 //assert(!np.bits.hit || !sp.bits.hit || !rp.valid, s"${q.name} storage ports${i} normal and super multi-hit") 545 } 546 547 normalPage.victim.in <> superPage.victim.out 548 normalPage.victim.out <> superPage.victim.in 549 normalPage.sfence <> io.sfence 550 superPage.sfence <> io.sfence 551 normalPage.csr <> io.csr 552 superPage.csr <> io.csr 553 554 val normal_refill_idx = if (q.outReplace) { 555 io.replace.normalPage.access <> normalPage.access 556 io.replace.normalPage.chosen_set := get_set_idx(io.w.bits.data.entry.tag, q.normalNSets) 557 io.replace.normalPage.refillIdx 558 } else if (q.normalAssociative == "fa") { 559 val re = ReplacementPolicy.fromString(q.normalReplacer, q.normalNWays) 560 re.access(normalPage.access.map(_.touch_ways)) // normalhitVecVec.zipWithIndex.map{ case (hv, i) => get_access(hv, validRegVec(i))}) 561 re.way 562 } else { // set-acco && plru 563 val re = ReplacementPolicy.fromString(q.normalReplacer, q.normalNSets, q.normalNWays) 564 re.access(normalPage.access.map(_.sets), normalPage.access.map(_.touch_ways)) 565 re.way(get_set_idx(io.w.bits.data.entry.tag, q.normalNSets)) 566 } 567 568 val super_refill_idx = if (q.outReplace) { 569 io.replace.superPage.access <> superPage.access 570 io.replace.superPage.chosen_set := DontCare 571 io.replace.superPage.refillIdx 572 } else { 573 val re = ReplacementPolicy.fromString(q.superReplacer, q.superNWays) 574 re.access(superPage.access.map(_.touch_ways)) 575 re.way 576 } 577 578 normalPage.w_apply( 579 valid = { if (q.normalAsVictim) false.B 580 else io.w.valid && io.w.bits.data.entry.level.get === 2.U }, 581 wayIdx = normal_refill_idx, 582 data = io.w.bits.data, 583 data_replenish = io.w.bits.data_replenish 584 ) 585 superPage.w_apply( 586 valid = { if (q.normalAsVictim) io.w.valid 587 else io.w.valid && io.w.bits.data.entry.level.get =/= 2.U }, 588 wayIdx = super_refill_idx, 589 data = io.w.bits.data, 590 data_replenish = io.w.bits.data_replenish 591 ) 592 593 // replacement 594 def get_access(one_hot: UInt, valid: Bool): Valid[UInt] = { 595 val res = Wire(Valid(UInt(log2Up(one_hot.getWidth).W))) 596 res.valid := Cat(one_hot).orR && valid 597 res.bits := OHToUInt(one_hot) 598 res 599 } 600} 601