xref: /XiangShan/src/main/scala/xiangshan/backend/fu/PMP.scala (revision ca2f90a69ce970d90edc8b017b309154608dfe8c)
1b6982e83SLemover/***************************************************************************************
2b6982e83SLemover* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3b6982e83SLemover* Copyright (c) 2020-2021 Peng Cheng Laboratory
4b6982e83SLemover*
5b6982e83SLemover* XiangShan is licensed under Mulan PSL v2.
6b6982e83SLemover* You can use this software according to the terms and conditions of the Mulan PSL v2.
7b6982e83SLemover* You may obtain a copy of Mulan PSL v2 at:
8b6982e83SLemover*          http://license.coscl.org.cn/MulanPSL2
9b6982e83SLemover*
10b6982e83SLemover* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11b6982e83SLemover* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12b6982e83SLemover* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13b6982e83SLemover*
14b6982e83SLemover* See the Mulan PSL v2 for more details.
15b6982e83SLemover***************************************************************************************/
16b6982e83SLemover
17b6982e83SLemoverpackage xiangshan.backend.fu
18b6982e83SLemover
19b6982e83SLemoverimport chipsalliance.rocketchip.config.Parameters
20b6982e83SLemoverimport chisel3._
21b6982e83SLemoverimport chisel3.internal.naming.chiselName
22b6982e83SLemoverimport chisel3.util._
23b6982e83SLemoverimport utils.MaskedRegMap.WritableMask
24b6982e83SLemoverimport xiangshan._
25b6982e83SLemoverimport xiangshan.backend.fu.util.HasCSRConst
26b6982e83SLemoverimport utils._
27b6982e83SLemoverimport xiangshan.cache.mmu.{TlbCmd, TlbExceptionBundle}
28b6982e83SLemover
29b6982e83SLemovertrait PMPConst {
30b6982e83SLemover  val PMPOffBits = 2 // minimal 4bytes
31b6982e83SLemover}
32b6982e83SLemover
33b6982e83SLemoverabstract class PMPBundle(implicit p: Parameters) extends XSBundle with PMPConst {
34b6982e83SLemover  val CoarserGrain: Boolean = PlatformGrain > PMPOffBits
35b6982e83SLemover}
36b6982e83SLemover
37b6982e83SLemoverabstract class PMPModule(implicit p: Parameters) extends XSModule with PMPConst with HasCSRConst
38b6982e83SLemover
39b6982e83SLemover@chiselName
40b6982e83SLemoverclass PMPConfig(implicit p: Parameters) extends PMPBundle {
41b6982e83SLemover  val l = Bool()
42*ca2f90a6SLemover  val c = Bool() // res(1), unuse in pmp
43*ca2f90a6SLemover  val atomic = Bool() // res(0), unuse in pmp
44b6982e83SLemover  val a = UInt(2.W)
45b6982e83SLemover  val x = Bool()
46b6982e83SLemover  val w = Bool()
47b6982e83SLemover  val r = Bool()
48b6982e83SLemover
49*ca2f90a6SLemover  def res: UInt = Cat(c, atomic) // in pmp, unused
50b6982e83SLemover  def off = a === 0.U
51b6982e83SLemover  def tor = a === 1.U
52b6982e83SLemover  def na4 = { if (CoarserGrain) false.B else a === 2.U }
53b6982e83SLemover  def napot = { if (CoarserGrain) a(1).asBool else a === 3.U }
54b6982e83SLemover  def off_tor = !a(1)
55b6982e83SLemover  def na4_napot = a(1)
56b6982e83SLemover
57b6982e83SLemover  def locked = l
58b6982e83SLemover  def addr_locked: Bool = locked
59b6982e83SLemover  def addr_locked(next: PMPConfig): Bool = locked || (next.locked && next.tor)
60*ca2f90a6SLemover}
61b6982e83SLemover
62*ca2f90a6SLemovertrait PMPReadWriteMethod extends PMPConst { this: PMPBase =>
63b6982e83SLemover  def write_cfg_vec(cfgs: UInt): UInt = {
64b6982e83SLemover    val cfgVec = Wire(Vec(cfgs.getWidth/8, new PMPConfig))
65b6982e83SLemover    for (i <- cfgVec.indices) {
66*ca2f90a6SLemover      val cfg_w_tmp = cfgs((i+1)*8-1, i*8).asUInt.asTypeOf(new PMPConfig)
67*ca2f90a6SLemover      cfgVec(i) := cfg_w_tmp
68*ca2f90a6SLemover      cfgVec(i).w := cfg_w_tmp.w && cfg_w_tmp.r
69*ca2f90a6SLemover      if (CoarserGrain) { cfgVec(i).a := Cat(cfg_w_tmp.a(1), cfg_w_tmp.a.orR) }
70b6982e83SLemover    }
71b6982e83SLemover    cfgVec.asUInt
72b6982e83SLemover  }
73b6982e83SLemover
74b6982e83SLemover  def write_cfg_vec(mask: Vec[UInt], addr: Vec[UInt], index: Int)(cfgs: UInt): UInt = {
75b6982e83SLemover    val cfgVec = Wire(Vec(cfgs.getWidth/8, new PMPConfig))
76b6982e83SLemover    for (i <- cfgVec.indices) {
77*ca2f90a6SLemover      val cfg_w_m_tmp = cfgs((i+1)*8-1, i*8).asUInt.asTypeOf(new PMPConfig)
78*ca2f90a6SLemover      cfgVec(i) := cfg_w_m_tmp
79*ca2f90a6SLemover      cfgVec(i).w := cfg_w_m_tmp.w && cfg_w_m_tmp.r
80*ca2f90a6SLemover      if (CoarserGrain) { cfgVec(i).a := Cat(cfg_w_m_tmp.a(1), cfg_w_m_tmp.a.orR) }
81b6982e83SLemover      when (cfgVec(i).na4_napot) {
82b6982e83SLemover        mask(index + i) := new PMPEntry().match_mask(cfgVec(i), addr(index + i))
83b6982e83SLemover      }
84b6982e83SLemover    }
85b6982e83SLemover    cfgVec.asUInt
86b6982e83SLemover  }
87b6982e83SLemover
88b6982e83SLemover  /** In general, the PMP grain is 2**{G+2} bytes. when G >= 1, na4 is not selectable.
89b6982e83SLemover   * When G >= 2 and cfg.a(1) is set(then the mode is napot), the bits addr(G-2, 0) read as zeros.
90b6982e83SLemover   * When G >= 1 and cfg.a(1) is clear(the mode is off or tor), the addr(G-1, 0) read as zeros.
91b6982e83SLemover   * The low OffBits is dropped
92b6982e83SLemover   */
93b6982e83SLemover  def read_addr(): UInt = {
94b6982e83SLemover    read_addr(cfg)(addr)
95b6982e83SLemover  }
96b6982e83SLemover
97b6982e83SLemover  def read_addr(cfg: PMPConfig)(addr: UInt): UInt = {
98b6982e83SLemover    val G = PlatformGrain - PMPOffBits
99b6982e83SLemover    require(G >= 0)
100b6982e83SLemover    if (G == 0) {
101b6982e83SLemover      addr
102b6982e83SLemover    } else if (G >= 2) {
103b6982e83SLemover      Mux(cfg.na4_napot, set_low_bits(addr, G-1), clear_low_bits(addr, G))
104b6982e83SLemover    } else { // G is 1
105b6982e83SLemover      Mux(cfg.off_tor, clear_low_bits(addr, G), addr)
106b6982e83SLemover    }
107b6982e83SLemover  }
108b6982e83SLemover  /** addr for inside addr, drop OffBits with.
109b6982e83SLemover   * compare_addr for inside addr for comparing.
110b6982e83SLemover   * paddr for outside addr.
111b6982e83SLemover   */
112b6982e83SLemover  def write_addr(next: PMPBase)(paddr: UInt) = {
113b6982e83SLemover    Mux(!cfg.addr_locked(next.cfg), paddr, addr)
114b6982e83SLemover  }
115b6982e83SLemover  def write_addr(paddr: UInt) = {
116b6982e83SLemover    Mux(!cfg.addr_locked, paddr, addr)
117b6982e83SLemover  }
118b6982e83SLemover
119b6982e83SLemover  def set_low_bits(data: UInt, num: Int): UInt = {
120b6982e83SLemover    require(num >= 0)
121b6982e83SLemover    data | ((1 << num)-1).U
122b6982e83SLemover  }
123b6982e83SLemover
124b6982e83SLemover  /** mask the data's low num bits (lsb) */
125b6982e83SLemover  def clear_low_bits(data: UInt, num: Int): UInt = {
126b6982e83SLemover    require(num >= 0)
127b6982e83SLemover    // use Cat instead of & with mask to avoid "Signal Width" problem
128b6982e83SLemover    if (num == 0) { data }
129b6982e83SLemover    else { Cat(data(data.getWidth-1, num), 0.U(num.W)) }
130b6982e83SLemover  }
131*ca2f90a6SLemover}
132*ca2f90a6SLemover
133*ca2f90a6SLemover/** PMPBase for CSR unit
134*ca2f90a6SLemover  * with only read and write logic
135*ca2f90a6SLemover  */
136*ca2f90a6SLemover@chiselName
137*ca2f90a6SLemoverclass PMPBase(implicit p: Parameters) extends PMPBundle with PMPReadWriteMethod {
138*ca2f90a6SLemover  val cfg = new PMPConfig
139*ca2f90a6SLemover  val addr = UInt((PAddrBits - PMPOffBits).W)
140b6982e83SLemover
141b6982e83SLemover  def gen(cfg: PMPConfig, addr: UInt) = {
142b6982e83SLemover    require(addr.getWidth == this.addr.getWidth)
143b6982e83SLemover    this.cfg := cfg
144b6982e83SLemover    this.addr := addr
145b6982e83SLemover  }
146b6982e83SLemover}
147b6982e83SLemover
148*ca2f90a6SLemovertrait PMPMatchMethod extends PMPConst { this: PMPEntry =>
149b6982e83SLemover  /** compare_addr is used to compare with input addr */
150*ca2f90a6SLemover  def compare_addr: UInt = ((addr << PMPOffBits) & ~(((1 << PlatformGrain) - 1).U(PAddrBits.W))).asUInt
151b6982e83SLemover
152b6982e83SLemover  /** size and maxSize are all log2 Size
153b6982e83SLemover   * for dtlb, the maxSize is bXLEN which is 8
154b6982e83SLemover   * for itlb and ptw, the maxSize is log2(512) ?
155b6982e83SLemover   * but we may only need the 64 bytes? how to prevent the bugs?
156b6982e83SLemover   * TODO: handle the special case that itlb & ptw & dcache access wider size than XLEN
157b6982e83SLemover   */
158b6982e83SLemover  def is_match(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last_pmp: PMPEntry): Bool = {
159b6982e83SLemover    Mux(cfg.na4_napot, napotMatch(paddr, lgSize, lgMaxSize),
160b6982e83SLemover      Mux(cfg.tor, torMatch(paddr, lgSize, lgMaxSize, last_pmp), false.B))
161b6982e83SLemover  }
162b6982e83SLemover
163b6982e83SLemover  /** generate match mask to help match in napot mode */
164b6982e83SLemover  def match_mask(paddr: UInt) = {
165*ca2f90a6SLemover    val match_mask_addr: UInt = Cat(paddr, cfg.a(0)).asUInt() | (((1 << PlatformGrain) - 1) >> PMPOffBits).U((paddr.getWidth + 1).W)
166*ca2f90a6SLemover    Cat(match_mask_addr & ~(match_mask_addr + 1.U), ((1 << PMPOffBits) - 1).U(PMPOffBits.W))
167b6982e83SLemover  }
168b6982e83SLemover
169b6982e83SLemover  def match_mask(cfg: PMPConfig, paddr: UInt) = {
170*ca2f90a6SLemover    val match_mask_c_addr = Cat(paddr, cfg.a(0)) | (((1 << PlatformGrain) - 1) >> PMPOffBits).U((paddr.getWidth + 1).W)
171*ca2f90a6SLemover    Cat(match_mask_c_addr & ~(match_mask_c_addr + 1.U), ((1 << PMPOffBits) - 1).U(PMPOffBits.W))
172b6982e83SLemover  }
173b6982e83SLemover
174*ca2f90a6SLemover  def boundMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int): Bool = {
175b6982e83SLemover    if (lgMaxSize <= PlatformGrain) {
176*ca2f90a6SLemover      (paddr < compare_addr)
177b6982e83SLemover    } else {
178b6982e83SLemover      val highLess = (paddr >> lgMaxSize) < (compare_addr >> lgMaxSize)
179b6982e83SLemover      val highEqual = (paddr >> lgMaxSize) === (compare_addr >> lgMaxSize)
180b6982e83SLemover      val lowLess = (paddr(lgMaxSize-1, 0) | OneHot.UIntToOH1(lgSize, lgMaxSize))  < compare_addr(lgMaxSize-1, 0)
181b6982e83SLemover      highLess || (highEqual && lowLess)
182b6982e83SLemover    }
183b6982e83SLemover  }
184b6982e83SLemover
185*ca2f90a6SLemover  def lowerBoundMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int): Bool = {
186b6982e83SLemover    !boundMatch(paddr, lgSize, lgMaxSize)
187b6982e83SLemover  }
188b6982e83SLemover
189b6982e83SLemover  def higherBoundMatch(paddr: UInt, lgMaxSize: Int) = {
190b6982e83SLemover    boundMatch(paddr, 0.U, lgMaxSize)
191b6982e83SLemover  }
192b6982e83SLemover
193*ca2f90a6SLemover  def torMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last_pmp: PMPEntry): Bool = {
194b6982e83SLemover    last_pmp.lowerBoundMatch(paddr, lgSize, lgMaxSize) && higherBoundMatch(paddr, lgMaxSize)
195b6982e83SLemover  }
196b6982e83SLemover
197b6982e83SLemover  def unmaskEqual(a: UInt, b: UInt, m: UInt) = {
198b6982e83SLemover    (a & ~m) === (b & ~m)
199b6982e83SLemover  }
200b6982e83SLemover
201b6982e83SLemover  def napotMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int) = {
202b6982e83SLemover    if (lgMaxSize <= PlatformGrain) {
203b6982e83SLemover      unmaskEqual(paddr, compare_addr, mask)
204b6982e83SLemover    } else {
205b6982e83SLemover      val lowMask = mask | OneHot.UIntToOH1(lgSize, lgMaxSize)
206b6982e83SLemover      val highMatch = unmaskEqual(paddr >> lgMaxSize, compare_addr >> lgMaxSize, mask >> lgMaxSize)
207b6982e83SLemover      val lowMatch = unmaskEqual(paddr(lgMaxSize-1, 0), compare_addr(lgMaxSize-1, 0), lowMask(lgMaxSize-1, 0))
208b6982e83SLemover      highMatch && lowMatch
209b6982e83SLemover    }
210b6982e83SLemover  }
211b6982e83SLemover
212b6982e83SLemover  def aligned(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last: PMPEntry) = {
213b6982e83SLemover    if (lgMaxSize <= PlatformGrain) {
214b6982e83SLemover      true.B
215b6982e83SLemover    } else {
216b6982e83SLemover      val lowBitsMask = OneHot.UIntToOH1(lgSize, lgMaxSize)
217b6982e83SLemover      val lowerBound = ((paddr >> lgMaxSize) === (last.compare_addr >> lgMaxSize)) &&
218b6982e83SLemover        ((~paddr(lgMaxSize-1, 0) & last.compare_addr(lgMaxSize-1, 0)) =/= 0.U)
219b6982e83SLemover      val upperBound = ((paddr >> lgMaxSize) === (compare_addr >> lgMaxSize)) &&
220b6982e83SLemover        ((compare_addr(lgMaxSize-1, 0) & (paddr(lgMaxSize-1, 0) | lowBitsMask)) =/= 0.U)
221b6982e83SLemover      val torAligned = !(lowerBound || upperBound)
222b6982e83SLemover      val napotAligned = (lowBitsMask & ~mask(lgMaxSize-1, 0)) === 0.U
223b6982e83SLemover      Mux(cfg.na4_napot, napotAligned, torAligned)
224b6982e83SLemover    }
225b6982e83SLemover  }
226*ca2f90a6SLemover}
227*ca2f90a6SLemover
228*ca2f90a6SLemover/** PMPEntry for outside pmp copies
229*ca2f90a6SLemover  * with one more elements mask to help napot match
230*ca2f90a6SLemover  * TODO: make mask an element, not an method, for timing opt
231*ca2f90a6SLemover  */
232*ca2f90a6SLemover@chiselName
233*ca2f90a6SLemoverclass PMPEntry(implicit p: Parameters) extends PMPBase with PMPMatchMethod {
234*ca2f90a6SLemover  val mask = UInt(PAddrBits.W) // help to match in napot
235*ca2f90a6SLemover
236*ca2f90a6SLemover  def write_addr(next: PMPBase, mask: UInt)(paddr: UInt) = {
237*ca2f90a6SLemover    mask := Mux(!cfg.addr_locked(next.cfg), match_mask(paddr), mask)
238*ca2f90a6SLemover    Mux(!cfg.addr_locked(next.cfg), paddr, addr)
239*ca2f90a6SLemover  }
240*ca2f90a6SLemover
241*ca2f90a6SLemover  def write_addr(mask: UInt)(paddr: UInt) = {
242*ca2f90a6SLemover    mask := Mux(!cfg.addr_locked, match_mask(paddr), mask)
243*ca2f90a6SLemover    Mux(!cfg.addr_locked, paddr, addr)
244*ca2f90a6SLemover  }
245b6982e83SLemover
246b6982e83SLemover  def gen(cfg: PMPConfig, addr: UInt, mask: UInt) = {
247b6982e83SLemover    require(addr.getWidth == this.addr.getWidth)
248b6982e83SLemover    this.cfg := cfg
249b6982e83SLemover    this.addr := addr
250b6982e83SLemover    this.mask := mask
251b6982e83SLemover  }
252*ca2f90a6SLemover}
253b6982e83SLemover
254*ca2f90a6SLemovertrait PMPMethod extends HasXSParameter with PMPConst { this: XSModule =>
255*ca2f90a6SLemover  def pmp_init() : (Vec[UInt], Vec[UInt], Vec[UInt])= {
256*ca2f90a6SLemover    val cfg = WireInit(0.U.asTypeOf(Vec(NumPMP/8, UInt(XLEN.W))))
257*ca2f90a6SLemover    val addr = Wire(Vec(NumPMP, UInt((PAddrBits-PMPOffBits).W)))
258*ca2f90a6SLemover    val mask = Wire(Vec(NumPMP, UInt(PAddrBits.W)))
259*ca2f90a6SLemover    addr := DontCare
260*ca2f90a6SLemover    mask := DontCare
261*ca2f90a6SLemover    (cfg, addr, mask)
262*ca2f90a6SLemover  }
263*ca2f90a6SLemover
264*ca2f90a6SLemover  def pmp_gen_mapping
265*ca2f90a6SLemover  (
266*ca2f90a6SLemover    init: () => (Vec[UInt], Vec[UInt], Vec[UInt]),
267*ca2f90a6SLemover    num: Int = 16,
268*ca2f90a6SLemover    cfgBase: Int,
269*ca2f90a6SLemover    addrBase: Int,
270*ca2f90a6SLemover    entries: Vec[PMPEntry]
271*ca2f90a6SLemover  ) = {
272*ca2f90a6SLemover    val pmpCfgPerCSR = XLEN / new PMPConfig().getWidth
273*ca2f90a6SLemover    def pmpCfgIndex(i: Int) = (XLEN / 32) * (i / pmpCfgPerCSR)
274*ca2f90a6SLemover    val init_value = init()
275*ca2f90a6SLemover    /** to fit MaskedRegMap's write, declare cfgs as Merged CSRs and split them into each pmp */
276*ca2f90a6SLemover    val cfgMerged = RegInit(init_value._1) //(Vec(num / pmpCfgPerCSR, UInt(XLEN.W))) // RegInit(VecInit(Seq.fill(num / pmpCfgPerCSR)(0.U(XLEN.W))))
277*ca2f90a6SLemover    val cfgs = WireInit(cfgMerged).asTypeOf(Vec(num, new PMPConfig()))
278*ca2f90a6SLemover    val addr = RegInit(init_value._2) // (Vec(num, UInt((PAddrBits-PMPOffBits).W)))
279*ca2f90a6SLemover    val mask = RegInit(init_value._3) // (Vec(num, UInt(PAddrBits.W)))
280*ca2f90a6SLemover
281*ca2f90a6SLemover    for (i <- entries.indices) {
282*ca2f90a6SLemover      entries(i).gen(cfgs(i), addr(i), mask(i))
283*ca2f90a6SLemover    }
284*ca2f90a6SLemover
285*ca2f90a6SLemover
286*ca2f90a6SLemover
287*ca2f90a6SLemover    val cfg_mapping = (0 until num by pmpCfgPerCSR).map(i => {Map(
288*ca2f90a6SLemover      MaskedRegMap(
289*ca2f90a6SLemover        addr = cfgBase + pmpCfgIndex(i),
290*ca2f90a6SLemover        reg = cfgMerged(i/pmpCfgPerCSR),
291*ca2f90a6SLemover        wmask = WritableMask,
292*ca2f90a6SLemover        wfn = new PMPBase().write_cfg_vec(mask, addr, i)
293*ca2f90a6SLemover      ))
294*ca2f90a6SLemover    }).fold(Map())((a, b) => a ++ b) // ugly code, hit me if u have better codes
295*ca2f90a6SLemover
296*ca2f90a6SLemover    val addr_mapping = (0 until num).map(i => {Map(
297*ca2f90a6SLemover      MaskedRegMap(
298*ca2f90a6SLemover        addr = addrBase + i,
299*ca2f90a6SLemover        reg = addr(i),
300*ca2f90a6SLemover        wmask = WritableMask,
301*ca2f90a6SLemover        wfn = { if (i != num-1) entries(i).write_addr(entries(i+1), mask(i)) else entries(i).write_addr(mask(i)) },
302*ca2f90a6SLemover        rmask = WritableMask,
303*ca2f90a6SLemover        rfn = new PMPBase().read_addr(entries(i).cfg)
304*ca2f90a6SLemover      ))
305*ca2f90a6SLemover    }).fold(Map())((a, b) => a ++ b) // ugly code, hit me if u have better codes.
306*ca2f90a6SLemover
307*ca2f90a6SLemover
308*ca2f90a6SLemover
309*ca2f90a6SLemover    cfg_mapping ++ addr_mapping
310b6982e83SLemover  }
311b6982e83SLemover}
312b6982e83SLemover
313b6982e83SLemover@chiselName
314*ca2f90a6SLemoverclass PMP(implicit p: Parameters) extends PMPModule with PMPMethod with PMAMethod {
315b6982e83SLemover  val io = IO(new Bundle {
316b6982e83SLemover    val distribute_csr = Flipped(new DistributedCSRIO())
317b6982e83SLemover    val pmp = Output(Vec(NumPMP, new PMPEntry()))
318*ca2f90a6SLemover    val pma = Output(Vec(NumPMA, new PMPEntry()))
319b6982e83SLemover  })
320b6982e83SLemover
321b6982e83SLemover  val w = io.distribute_csr.w
322b6982e83SLemover
323b6982e83SLemover  val pmp = Wire(Vec(NumPMP, new PMPEntry()))
324*ca2f90a6SLemover  val pma = Wire(Vec(NumPMA, new PMPEntry()))
325b6982e83SLemover
326*ca2f90a6SLemover  val pmpMapping = pmp_gen_mapping(pmp_init, NumPMP, PmpcfgBase, PmpaddrBase, pmp)
327*ca2f90a6SLemover  val pmaMapping = pmp_gen_mapping(pma_init, NumPMA, PmacfgBase, PmaaddrBase, pma)
328*ca2f90a6SLemover  val mapping = pmpMapping ++ pmaMapping
329b6982e83SLemover
330b6982e83SLemover  val rdata = Wire(UInt(XLEN.W))
331*ca2f90a6SLemover  MaskedRegMap.generate(mapping, w.bits.addr, rdata, w.valid, w.bits.data)
332b6982e83SLemover
333b6982e83SLemover  io.pmp := pmp
334*ca2f90a6SLemover  io.pma := pma
335b6982e83SLemover}
336b6982e83SLemover
337b6982e83SLemoverclass PMPReqBundle(lgMaxSize: Int = 3)(implicit p: Parameters) extends PMPBundle {
338b6982e83SLemover  val addr = Output(UInt(PAddrBits.W))
339b6982e83SLemover  val size = Output(UInt(log2Ceil(lgMaxSize+1).W))
340b6982e83SLemover  val cmd = Output(TlbCmd())
341b6982e83SLemover
342b6982e83SLemover  override def cloneType = (new PMPReqBundle(lgMaxSize)).asInstanceOf[this.type]
343b6982e83SLemover}
344b6982e83SLemover
345*ca2f90a6SLemoverclass PMPRespBundle(implicit p: Parameters) extends TlbExceptionBundle {
346*ca2f90a6SLemover  val mmio = Output(Bool())
347b6982e83SLemover
348*ca2f90a6SLemover  def |(resp: PMPRespBundle): PMPRespBundle = {
349*ca2f90a6SLemover    val res = Wire(new PMPRespBundle())
350*ca2f90a6SLemover    res.ld := this.ld || resp.ld
351*ca2f90a6SLemover    res.st := this.st || resp.st
352*ca2f90a6SLemover    res.instr := this.instr || resp.instr
353*ca2f90a6SLemover    res.mmio := this.mmio || resp.mmio
354*ca2f90a6SLemover    res
355*ca2f90a6SLemover  }
356*ca2f90a6SLemover}
357b6982e83SLemover
358*ca2f90a6SLemovertrait PMPCheckMethod extends HasXSParameter with HasCSRConst { this: PMPChecker =>
359*ca2f90a6SLemover  def pmp_check(cmd: UInt, cfg: PMPConfig)(implicit p: Parameters) = {
360*ca2f90a6SLemover    val resp = Wire(new PMPRespBundle)
361*ca2f90a6SLemover    resp.ld := TlbCmd.isRead(cmd) && !TlbCmd.isAtom(cmd) && !cfg.r
362*ca2f90a6SLemover    resp.st := (TlbCmd.isWrite(cmd) || TlbCmd.isAtom(cmd)) && !cfg.w
363*ca2f90a6SLemover    resp.instr := TlbCmd.isExec(cmd) && !cfg.x
364*ca2f90a6SLemover    resp.mmio := false.B
365*ca2f90a6SLemover    resp
366*ca2f90a6SLemover  }
367b6982e83SLemover
368*ca2f90a6SLemover  def pmp_match_res(addr: UInt, size: UInt, pmpEntries: Vec[PMPEntry], mode: UInt, lgMaxSize: Int) = {
369*ca2f90a6SLemover    val num = pmpEntries.size
370*ca2f90a6SLemover    require(num == NumPMP)
371*ca2f90a6SLemover
372*ca2f90a6SLemover    val passThrough = if (pmpEntries.isEmpty) true.B else (mode > ModeS)
373b6982e83SLemover    val pmpMinuxOne = WireInit(0.U.asTypeOf(new PMPEntry()))
374b6982e83SLemover    pmpMinuxOne.cfg.r := passThrough
375b6982e83SLemover    pmpMinuxOne.cfg.w := passThrough
376b6982e83SLemover    pmpMinuxOne.cfg.x := passThrough
377b6982e83SLemover
378*ca2f90a6SLemover    val res = pmpEntries.zip(pmpMinuxOne +: pmpEntries.take(num-1)).zipWithIndex
379b6982e83SLemover      .reverse.foldLeft(pmpMinuxOne) { case (prev, ((pmp, last_pmp), i)) =>
380*ca2f90a6SLemover      val is_match = pmp.is_match(addr, size, lgMaxSize, last_pmp)
381b6982e83SLemover      val ignore = passThrough && !pmp.cfg.l
382*ca2f90a6SLemover      val aligned = pmp.aligned(addr, size, lgMaxSize, last_pmp)
383b6982e83SLemover
384b6982e83SLemover      val cur = WireInit(pmp)
385b6982e83SLemover      cur.cfg.r := aligned && (pmp.cfg.r || ignore)
386b6982e83SLemover      cur.cfg.w := aligned && (pmp.cfg.w || ignore)
387b6982e83SLemover      cur.cfg.x := aligned && (pmp.cfg.x || ignore)
388b6982e83SLemover
389b6982e83SLemover      Mux(is_match, cur, prev)
390b6982e83SLemover    }
391*ca2f90a6SLemover    res
392*ca2f90a6SLemover  }
393*ca2f90a6SLemover}
394b6982e83SLemover
395*ca2f90a6SLemover@chiselName
396*ca2f90a6SLemoverclass PMPChecker
397*ca2f90a6SLemover(
398*ca2f90a6SLemover  lgMaxSize: Int = 3,
399*ca2f90a6SLemover  sameCycle: Boolean = false
400*ca2f90a6SLemover)(implicit p: Parameters)
401*ca2f90a6SLemover  extends PMPModule
402*ca2f90a6SLemover  with PMPCheckMethod
403*ca2f90a6SLemover  with PMACheckMethod
404*ca2f90a6SLemover{
405*ca2f90a6SLemover  val io = IO(new Bundle{
406*ca2f90a6SLemover    val env = Input(new Bundle {
407*ca2f90a6SLemover      val mode = Input(UInt(2.W))
408*ca2f90a6SLemover      val pmp = Input(Vec(NumPMP, new PMPEntry()))
409*ca2f90a6SLemover      val pma = Input(Vec(NumPMA, new PMPEntry()))
410*ca2f90a6SLemover    })
411*ca2f90a6SLemover    val req = Flipped(Valid(new PMPReqBundle(lgMaxSize))) // usage: assign the valid to fire signal
412*ca2f90a6SLemover    val resp = new PMPRespBundle()
413*ca2f90a6SLemover  })
414*ca2f90a6SLemover
415*ca2f90a6SLemover  val req = io.req.bits
416*ca2f90a6SLemover
417*ca2f90a6SLemover  val res_pmp = pmp_match_res(req.addr, req.size, io.env.pmp, io.env.mode, lgMaxSize)
418*ca2f90a6SLemover  val res_pma = pma_match_res(req.addr, req.size, io.env.pma, io.env.mode, lgMaxSize)
419*ca2f90a6SLemover
420*ca2f90a6SLemover  val resp_pmp = pmp_check(req.cmd, res_pmp.cfg)
421*ca2f90a6SLemover  val resp_pma = pma_check(req.cmd, res_pma.cfg)
422*ca2f90a6SLemover  val resp = resp_pmp | resp_pma
423*ca2f90a6SLemover
424b6982e83SLemover  if (sameCycle) {
425*ca2f90a6SLemover    io.resp := resp
426b6982e83SLemover  } else {
427*ca2f90a6SLemover    io.resp := RegEnable(resp, io.req.valid)
428b6982e83SLemover  }
429b6982e83SLemover}