1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17// See LICENSE.SiFive for license details. 18 19package xiangshan.backend.fu 20 21import chipsalliance.rocketchip.config.Parameters 22import chisel3._ 23import chisel3.internal.naming.chiselName 24import chisel3.util._ 25import utils.MaskedRegMap.WritableMask 26import xiangshan._ 27import xiangshan.backend.fu.util.HasCSRConst 28import utils._ 29import xiangshan.cache.mmu.{TlbCmd, TlbExceptionBundle} 30 31trait PMPConst extends HasPMParameters { 32 val PMPOffBits = 2 // minimal 4bytes 33 val CoarserGrain: Boolean = PlatformGrain > PMPOffBits 34} 35 36abstract class PMPBundle(implicit val p: Parameters) extends Bundle with PMPConst 37abstract class PMPModule(implicit val p: Parameters) extends Module with PMPConst 38abstract class PMPXSModule(implicit p: Parameters) extends XSModule with PMPConst 39 40@chiselName 41class PMPConfig(implicit p: Parameters) extends PMPBundle { 42 val l = Bool() 43 val c = Bool() // res(1), unuse in pmp 44 val atomic = Bool() // res(0), unuse in pmp 45 val a = UInt(2.W) 46 val x = Bool() 47 val w = Bool() 48 val r = Bool() 49 50 def res: UInt = Cat(c, atomic) // in pmp, unused 51 def off = a === 0.U 52 def tor = a === 1.U 53 def na4 = { if (CoarserGrain) false.B else a === 2.U } 54 def napot = { if (CoarserGrain) a(1).asBool else a === 3.U } 55 def off_tor = !a(1) 56 def na4_napot = a(1) 57 58 def locked = l 59 def addr_locked: Bool = locked 60 def addr_locked(next: PMPConfig): Bool = locked || (next.locked && next.tor) 61} 62 63trait PMPReadWriteMethodBare extends PMPConst { 64 def match_mask(cfg: PMPConfig, paddr: UInt) = { 65 val match_mask_c_addr = Cat(paddr, cfg.a(0)) | (((1 << PlatformGrain) - 1) >> PMPOffBits).U((paddr.getWidth + 1).W) 66 Cat(match_mask_c_addr & ~(match_mask_c_addr + 1.U), ((1 << PMPOffBits) - 1).U(PMPOffBits.W)) 67 } 68 69 def write_cfg_vec(mask: Vec[UInt], addr: Vec[UInt], index: Int)(cfgs: UInt): UInt = { 70 val cfgVec = Wire(Vec(cfgs.getWidth/8, new PMPConfig)) 71 for (i <- cfgVec.indices) { 72 val cfg_w_m_tmp = cfgs((i+1)*8-1, i*8).asUInt.asTypeOf(new PMPConfig) 73 cfgVec(i) := cfg_w_m_tmp 74 when (!cfg_w_m_tmp.l) { 75 cfgVec(i).w := cfg_w_m_tmp.w && cfg_w_m_tmp.r 76 if (CoarserGrain) { cfgVec(i).a := Cat(cfg_w_m_tmp.a(1), cfg_w_m_tmp.a.orR) } 77 when (cfgVec(i).na4_napot) { 78 mask(index + i) := match_mask(cfgVec(i), addr(index + i)) 79 } 80 } 81 } 82 cfgVec.asUInt 83 } 84 85 def read_addr(cfg: PMPConfig)(addr: UInt): UInt = { 86 val G = PlatformGrain - PMPOffBits 87 require(G >= 0) 88 if (G == 0) { 89 addr 90 } else if (G >= 2) { 91 Mux(cfg.na4_napot, set_low_bits(addr, G-1), clear_low_bits(addr, G)) 92 } else { // G is 1 93 Mux(cfg.off_tor, clear_low_bits(addr, G), addr) 94 } 95 } 96 97 def write_addr(next: PMPConfig, mask: UInt)(paddr: UInt, cfg: PMPConfig, addr: UInt): UInt = { 98 val locked = cfg.addr_locked(next) 99 mask := Mux(!locked, match_mask(cfg, paddr), mask) 100 Mux(!locked, paddr, addr) 101 } 102 103 def set_low_bits(data: UInt, num: Int): UInt = { 104 require(num >= 0) 105 data | ((1 << num)-1).U 106 } 107 108 /** mask the data's low num bits (lsb) */ 109 def clear_low_bits(data: UInt, num: Int): UInt = { 110 require(num >= 0) 111 // use Cat instead of & with mask to avoid "Signal Width" problem 112 if (num == 0) { data } 113 else { Cat(data(data.getWidth-1, num), 0.U(num.W)) } 114 } 115} 116 117trait PMPReadWriteMethod extends PMPReadWriteMethodBare { this: PMPBase => 118 def write_cfg_vec(cfgs: UInt): UInt = { 119 val cfgVec = Wire(Vec(cfgs.getWidth/8, new PMPConfig)) 120 for (i <- cfgVec.indices) { 121 val cfg_w_tmp = cfgs((i+1)*8-1, i*8).asUInt.asTypeOf(new PMPConfig) 122 cfgVec(i) := cfg_w_tmp 123 when (!cfg_w_tmp.l) { 124 cfgVec(i).w := cfg_w_tmp.w && cfg_w_tmp.r 125 if (CoarserGrain) { cfgVec(i).a := Cat(cfg_w_tmp.a(1), cfg_w_tmp.a.orR) } 126 } 127 } 128 cfgVec.asUInt 129 } 130 131 /** In general, the PMP grain is 2**{G+2} bytes. when G >= 1, na4 is not selectable. 132 * When G >= 2 and cfg.a(1) is set(then the mode is napot), the bits addr(G-2, 0) read as zeros. 133 * When G >= 1 and cfg.a(1) is clear(the mode is off or tor), the addr(G-1, 0) read as zeros. 134 * The low OffBits is dropped 135 */ 136 def read_addr(): UInt = { 137 read_addr(cfg)(addr) 138 } 139 140 /** addr for inside addr, drop OffBits with. 141 * compare_addr for inside addr for comparing. 142 * paddr for outside addr. 143 */ 144 def write_addr(next: PMPConfig)(paddr: UInt): UInt = { 145 Mux(!cfg.addr_locked(next), paddr, addr) 146 } 147 def write_addr(paddr: UInt): UInt = { 148 Mux(!cfg.addr_locked, paddr, addr) 149 } 150} 151 152/** PMPBase for CSR unit 153 * with only read and write logic 154 */ 155@chiselName 156class PMPBase(implicit p: Parameters) extends PMPBundle with PMPReadWriteMethod { 157 val cfg = new PMPConfig 158 val addr = UInt((PMPAddrBits - PMPOffBits).W) 159 160 def gen(cfg: PMPConfig, addr: UInt) = { 161 require(addr.getWidth == this.addr.getWidth) 162 this.cfg := cfg 163 this.addr := addr 164 } 165} 166 167trait PMPMatchMethod extends PMPConst { this: PMPEntry => 168 /** compare_addr is used to compare with input addr */ 169 def compare_addr: UInt = ((addr << PMPOffBits) & ~(((1 << PlatformGrain) - 1).U(PMPAddrBits.W))).asUInt 170 171 /** size and maxSize are all log2 Size 172 * for dtlb, the maxSize is bPMXLEN which is 8 173 * for itlb and ptw, the maxSize is log2(512) ? 174 * but we may only need the 64 bytes? how to prevent the bugs? 175 * TODO: handle the special case that itlb & ptw & dcache access wider size than PMXLEN 176 */ 177 def is_match(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last_pmp: PMPEntry): Bool = { 178 Mux(cfg.na4_napot, napotMatch(paddr, lgSize, lgMaxSize), 179 Mux(cfg.tor, torMatch(paddr, lgSize, lgMaxSize, last_pmp), false.B)) 180 } 181 182 /** generate match mask to help match in napot mode */ 183 def match_mask(paddr: UInt): UInt = { 184 match_mask(cfg, paddr) 185 } 186 187 def boundMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int): Bool = { 188 if (lgMaxSize <= PlatformGrain) { 189 (paddr < compare_addr) 190 } else { 191 val highLess = (paddr >> lgMaxSize) < (compare_addr >> lgMaxSize) 192 val highEqual = (paddr >> lgMaxSize) === (compare_addr >> lgMaxSize) 193 val lowLess = (paddr(lgMaxSize-1, 0) | OneHot.UIntToOH1(lgSize, lgMaxSize)) < compare_addr(lgMaxSize-1, 0) 194 highLess || (highEqual && lowLess) 195 } 196 } 197 198 def lowerBoundMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int): Bool = { 199 !boundMatch(paddr, lgSize, lgMaxSize) 200 } 201 202 def higherBoundMatch(paddr: UInt, lgMaxSize: Int) = { 203 boundMatch(paddr, 0.U, lgMaxSize) 204 } 205 206 def torMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last_pmp: PMPEntry): Bool = { 207 last_pmp.lowerBoundMatch(paddr, lgSize, lgMaxSize) && higherBoundMatch(paddr, lgMaxSize) 208 } 209 210 def unmaskEqual(a: UInt, b: UInt, m: UInt) = { 211 (a & ~m) === (b & ~m) 212 } 213 214 def napotMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int) = { 215 if (lgMaxSize <= PlatformGrain) { 216 unmaskEqual(paddr, compare_addr, mask) 217 } else { 218 val lowMask = mask | OneHot.UIntToOH1(lgSize, lgMaxSize) 219 val highMatch = unmaskEqual(paddr >> lgMaxSize, compare_addr >> lgMaxSize, mask >> lgMaxSize) 220 val lowMatch = unmaskEqual(paddr(lgMaxSize-1, 0), compare_addr(lgMaxSize-1, 0), lowMask(lgMaxSize-1, 0)) 221 highMatch && lowMatch 222 } 223 } 224 225 def aligned(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last: PMPEntry) = { 226 if (lgMaxSize <= PlatformGrain) { 227 true.B 228 } else { 229 val lowBitsMask = OneHot.UIntToOH1(lgSize, lgMaxSize) 230 val lowerBound = ((paddr >> lgMaxSize) === (last.compare_addr >> lgMaxSize)) && 231 ((~paddr(lgMaxSize-1, 0) & last.compare_addr(lgMaxSize-1, 0)) =/= 0.U) 232 val upperBound = ((paddr >> lgMaxSize) === (compare_addr >> lgMaxSize)) && 233 ((compare_addr(lgMaxSize-1, 0) & (paddr(lgMaxSize-1, 0) | lowBitsMask)) =/= 0.U) 234 val torAligned = !(lowerBound || upperBound) 235 val napotAligned = (lowBitsMask & ~mask(lgMaxSize-1, 0)) === 0.U 236 Mux(cfg.na4_napot, napotAligned, torAligned) 237 } 238 } 239} 240 241/** PMPEntry for outside pmp copies 242 * with one more elements mask to help napot match 243 * TODO: make mask an element, not an method, for timing opt 244 */ 245@chiselName 246class PMPEntry(implicit p: Parameters) extends PMPBase with PMPMatchMethod { 247 val mask = UInt(PMPAddrBits.W) // help to match in napot 248 249 def write_addr(next: PMPConfig, mask: UInt)(paddr: UInt) = { 250 mask := Mux(!cfg.addr_locked(next), match_mask(paddr), mask) 251 Mux(!cfg.addr_locked(next), paddr, addr) 252 } 253 254 def write_addr(mask: UInt)(paddr: UInt) = { 255 mask := Mux(!cfg.addr_locked, match_mask(paddr), mask) 256 Mux(!cfg.addr_locked, paddr, addr) 257 } 258 259 def gen(cfg: PMPConfig, addr: UInt, mask: UInt) = { 260 require(addr.getWidth == this.addr.getWidth) 261 this.cfg := cfg 262 this.addr := addr 263 this.mask := mask 264 } 265} 266 267trait PMPMethod extends PMPConst { 268 def pmp_init() : (Vec[UInt], Vec[UInt], Vec[UInt])= { 269 val cfg = WireInit(0.U.asTypeOf(Vec(NumPMP/8, UInt(PMXLEN.W)))) 270 val addr = Wire(Vec(NumPMP, UInt((PMPAddrBits-PMPOffBits).W))) 271 val mask = Wire(Vec(NumPMP, UInt(PMPAddrBits.W))) 272 addr := DontCare 273 mask := DontCare 274 (cfg, addr, mask) 275 } 276 277 def pmp_gen_mapping 278 ( 279 init: () => (Vec[UInt], Vec[UInt], Vec[UInt]), 280 num: Int = 16, 281 cfgBase: Int, 282 addrBase: Int, 283 entries: Vec[PMPEntry] 284 ) = { 285 val pmpCfgPerCSR = PMXLEN / new PMPConfig().getWidth 286 def pmpCfgIndex(i: Int) = (PMXLEN / 32) * (i / pmpCfgPerCSR) 287 val init_value = init() 288 /** to fit MaskedRegMap's write, declare cfgs as Merged CSRs and split them into each pmp */ 289 val cfgMerged = RegInit(init_value._1) //(Vec(num / pmpCfgPerCSR, UInt(PMXLEN.W))) // RegInit(VecInit(Seq.fill(num / pmpCfgPerCSR)(0.U(PMXLEN.W)))) 290 val cfgs = WireInit(cfgMerged).asTypeOf(Vec(num, new PMPConfig())) 291 val addr = RegInit(init_value._2) // (Vec(num, UInt((PMPAddrBits-PMPOffBits).W))) 292 val mask = RegInit(init_value._3) // (Vec(num, UInt(PMPAddrBits.W))) 293 294 for (i <- entries.indices) { 295 entries(i).gen(cfgs(i), addr(i), mask(i)) 296 } 297 298 val cfg_mapping = (0 until num by pmpCfgPerCSR).map(i => {Map( 299 MaskedRegMap( 300 addr = cfgBase + pmpCfgIndex(i), 301 reg = cfgMerged(i/pmpCfgPerCSR), 302 wmask = WritableMask, 303 wfn = new PMPBase().write_cfg_vec(mask, addr, i) 304 )) 305 }).fold(Map())((a, b) => a ++ b) // ugly code, hit me if u have better codes 306 307 val addr_mapping = (0 until num).map(i => {Map( 308 MaskedRegMap( 309 addr = addrBase + i, 310 reg = addr(i), 311 wmask = WritableMask, 312 wfn = { if (i != num-1) entries(i).write_addr(entries(i+1).cfg, mask(i)) else entries(i).write_addr(mask(i)) }, 313 rmask = WritableMask, 314 rfn = new PMPBase().read_addr(entries(i).cfg) 315 )) 316 }).fold(Map())((a, b) => a ++ b) // ugly code, hit me if u have better codes. 317 318 cfg_mapping ++ addr_mapping 319 } 320} 321 322@chiselName 323class PMP(implicit p: Parameters) extends PMPXSModule with HasXSParameter with PMPMethod with PMAMethod with HasCSRConst { 324 val io = IO(new Bundle { 325 val distribute_csr = Flipped(new DistributedCSRIO()) 326 val pmp = Output(Vec(NumPMP, new PMPEntry())) 327 val pma = Output(Vec(NumPMA, new PMPEntry())) 328 }) 329 330 val w = io.distribute_csr.w 331 332 val pmp = Wire(Vec(NumPMP, new PMPEntry())) 333 val pma = Wire(Vec(NumPMA, new PMPEntry())) 334 335 val pmpMapping = pmp_gen_mapping(pmp_init, NumPMP, PmpcfgBase, PmpaddrBase, pmp) 336 val pmaMapping = pmp_gen_mapping(pma_init, NumPMA, PmacfgBase, PmaaddrBase, pma) 337 val mapping = pmpMapping ++ pmaMapping 338 339 val rdata = Wire(UInt(PMXLEN.W)) 340 MaskedRegMap.generate(mapping, w.bits.addr, rdata, w.valid, w.bits.data) 341 342 io.pmp := pmp 343 io.pma := pma 344} 345 346class PMPReqBundle(lgMaxSize: Int = 3)(implicit p: Parameters) extends PMPBundle { 347 val addr = Output(UInt(PMPAddrBits.W)) 348 val size = Output(UInt(log2Ceil(lgMaxSize+1).W)) 349 val cmd = Output(TlbCmd()) 350 351 def apply(addr: UInt, size: UInt, cmd: UInt) { 352 this.addr := addr 353 this.size := size 354 this.cmd := cmd 355 } 356 357 def apply(addr: UInt) { // req minimal permission and req align size 358 apply(addr, lgMaxSize.U, TlbCmd.read) 359 } 360 361 override def cloneType = (new PMPReqBundle(lgMaxSize)).asInstanceOf[this.type] 362} 363 364class PMPRespBundle(implicit p: Parameters) extends PMPBundle { 365 val ld = Output(Bool()) 366 val st = Output(Bool()) 367 val instr = Output(Bool()) 368 val mmio = Output(Bool()) 369 370 def |(resp: PMPRespBundle): PMPRespBundle = { 371 val res = Wire(new PMPRespBundle()) 372 res.ld := this.ld || resp.ld 373 res.st := this.st || resp.st 374 res.instr := this.instr || resp.instr 375 res.mmio := this.mmio || resp.mmio 376 res 377 } 378} 379 380trait PMPCheckMethod extends PMPConst { 381 def pmp_check(cmd: UInt, cfg: PMPConfig) = { 382 val resp = Wire(new PMPRespBundle) 383 resp.ld := TlbCmd.isRead(cmd) && !TlbCmd.isAtom(cmd) && !cfg.r 384 resp.st := (TlbCmd.isWrite(cmd) || TlbCmd.isAtom(cmd)) && !cfg.w 385 resp.instr := TlbCmd.isExec(cmd) && !cfg.x 386 resp.mmio := false.B 387 resp 388 } 389 390 def pmp_match_res(leaveHitMux: Boolean = false, valid: Bool = true.B)( 391 addr: UInt, 392 size: UInt, 393 pmpEntries: Vec[PMPEntry], 394 mode: UInt, 395 lgMaxSize: Int 396 ) = { 397 val num = pmpEntries.size 398 require(num == NumPMP) 399 400 val passThrough = if (pmpEntries.isEmpty) true.B else (mode > 1.U) 401 val pmpDefault = WireInit(0.U.asTypeOf(new PMPEntry())) 402 pmpDefault.cfg.r := passThrough 403 pmpDefault.cfg.w := passThrough 404 pmpDefault.cfg.x := passThrough 405 406 val match_vec = Wire(Vec(num+1, Bool())) 407 val cfg_vec = Wire(Vec(num+1, new PMPEntry())) 408 409 pmpEntries.zip(pmpDefault +: pmpEntries.take(num-1)).zipWithIndex.foreach{ case ((pmp, last_pmp), i) => 410 val is_match = pmp.is_match(addr, size, lgMaxSize, last_pmp) 411 val ignore = passThrough && !pmp.cfg.l 412 val aligned = pmp.aligned(addr, size, lgMaxSize, last_pmp) 413 414 val cur = WireInit(pmp) 415 cur.cfg.r := aligned && (pmp.cfg.r || ignore) 416 cur.cfg.w := aligned && (pmp.cfg.w || ignore) 417 cur.cfg.x := aligned && (pmp.cfg.x || ignore) 418 419// Mux(is_match, cur, prev) 420 match_vec(i) := is_match 421 cfg_vec(i) := cur 422 } 423 424 // default value 425 match_vec(num) := true.B 426 cfg_vec(num) := pmpDefault 427 428 if (leaveHitMux) { 429 ParallelPriorityMux(match_vec.map(RegEnable(_, init = false.B, valid)), RegEnable(cfg_vec, valid)) 430 } else { 431 ParallelPriorityMux(match_vec, cfg_vec) 432 } 433 } 434} 435 436class PMPCheckerEnv(implicit p: Parameters) extends PMPBundle { 437 val mode = UInt(2.W) 438 val pmp = Vec(NumPMP, new PMPEntry()) 439 val pma = Vec(NumPMA, new PMPEntry()) 440 441 def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry]): Unit = { 442 this.mode := mode 443 this.pmp := pmp 444 this.pma := pma 445 } 446} 447 448class PMPCheckIO(lgMaxSize: Int)(implicit p: Parameters) extends PMPBundle { 449 val check_env = Input(new PMPCheckerEnv()) 450 val req = Flipped(Valid(new PMPReqBundle(lgMaxSize))) // usage: assign the valid to fire signal 451 val resp = new PMPRespBundle() 452 453 def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry], req: Valid[PMPReqBundle]) = { 454 check_env.apply(mode, pmp, pma) 455 this.req := req 456 resp 457 } 458 459 def req_apply(valid: Bool, addr: UInt): Unit = { 460 this.req.valid := valid 461 this.req.bits.apply(addr) 462 } 463 464 def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry], valid: Bool, addr: UInt) = { 465 check_env.apply(mode, pmp, pma) 466 req_apply(valid, addr) 467 resp 468 } 469 override def cloneType: this.type = (new PMPCheckIO(lgMaxSize)).asInstanceOf[this.type] 470} 471 472class PMPCheckv2IO(lgMaxSize: Int)(implicit p: Parameters) extends PMPBundle { 473 val check_env = Input(new PMPCheckerEnv()) 474 val req = Flipped(Valid(new PMPReqBundle(lgMaxSize))) // usage: assign the valid to fire signal 475 val resp = Output(new PMPConfig()) 476 477 def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry], req: Valid[PMPReqBundle]) = { 478 check_env.apply(mode, pmp, pma) 479 this.req := req 480 resp 481 } 482 483 def req_apply(valid: Bool, addr: UInt): Unit = { 484 this.req.valid := valid 485 this.req.bits.apply(addr) 486 } 487 488 def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry], valid: Bool, addr: UInt) = { 489 check_env.apply(mode, pmp, pma) 490 req_apply(valid, addr) 491 resp 492 } 493 override def cloneType: this.type = (new PMPCheckv2IO(lgMaxSize)).asInstanceOf[this.type] 494} 495 496@chiselName 497class PMPChecker 498( 499 lgMaxSize: Int = 3, 500 sameCycle: Boolean = false, 501 leaveHitMux: Boolean = false, 502 pmpUsed: Boolean = true 503)(implicit p: Parameters) extends PMPModule 504 with PMPCheckMethod 505 with PMACheckMethod 506{ 507 require(!(leaveHitMux && sameCycle)) 508 val io = IO(new PMPCheckIO(lgMaxSize)) 509 510 val req = io.req.bits 511 512 val res_pmp = pmp_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.check_env.pmp, io.check_env.mode, lgMaxSize) 513 val res_pma = pma_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.check_env.pma, io.check_env.mode, lgMaxSize) 514 515 val resp_pmp = pmp_check(req.cmd, res_pmp.cfg) 516 val resp_pma = pma_check(req.cmd, res_pma.cfg) 517 val resp = if (pmpUsed) (resp_pmp | resp_pma) else resp_pma 518 519 if (sameCycle || leaveHitMux) { 520 io.resp := resp 521 } else { 522 io.resp := RegEnable(resp, io.req.valid) 523 } 524} 525 526/* get config with check */ 527@chiselName 528class PMPCheckerv2 529( 530 lgMaxSize: Int = 3, 531 sameCycle: Boolean = false, 532 leaveHitMux: Boolean = false 533)(implicit p: Parameters) extends PMPModule 534 with PMPCheckMethod 535 with PMACheckMethod 536{ 537 require(!(leaveHitMux && sameCycle)) 538 val io = IO(new PMPCheckv2IO(lgMaxSize)) 539 540 val req = io.req.bits 541 542 val res_pmp = pmp_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.check_env.pmp, io.check_env.mode, lgMaxSize) 543 val res_pma = pma_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.check_env.pma, io.check_env.mode, lgMaxSize) 544 545 val resp = and(res_pmp, res_pma) 546 547 if (sameCycle || leaveHitMux) { 548 io.resp := resp 549 } else { 550 io.resp := RegEnable(resp, io.req.valid) 551 } 552 553 def and(pmp: PMPEntry, pma: PMPEntry): PMPConfig = { 554 val tmp_res = Wire(new PMPConfig) 555 tmp_res.l := DontCare 556 tmp_res.a := DontCare 557 tmp_res.r := pmp.cfg.r && pma.cfg.r 558 tmp_res.w := pmp.cfg.w && pma.cfg.w 559 tmp_res.x := pmp.cfg.x && pma.cfg.x 560 tmp_res.c := pma.cfg.c 561 tmp_res.atomic := pma.cfg.atomic 562 tmp_res 563 } 564}