xref: /XiangShan/src/main/scala/xiangshan/backend/fu/PMP.scala (revision 5cf62c1ad0b87eb1f5673fe7b697faa43e4a6790)
1b6982e83SLemover/***************************************************************************************
2b6982e83SLemover* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3b6982e83SLemover* Copyright (c) 2020-2021 Peng Cheng Laboratory
4b6982e83SLemover*
5b6982e83SLemover* XiangShan is licensed under Mulan PSL v2.
6b6982e83SLemover* You can use this software according to the terms and conditions of the Mulan PSL v2.
7b6982e83SLemover* You may obtain a copy of Mulan PSL v2 at:
8b6982e83SLemover*          http://license.coscl.org.cn/MulanPSL2
9b6982e83SLemover*
10b6982e83SLemover* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11b6982e83SLemover* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12b6982e83SLemover* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13b6982e83SLemover*
14b6982e83SLemover* See the Mulan PSL v2 for more details.
15b6982e83SLemover***************************************************************************************/
16b6982e83SLemover
17a15116bdSLemover// See LICENSE.SiFive for license details.
18a15116bdSLemover
19b6982e83SLemoverpackage xiangshan.backend.fu
20b6982e83SLemover
21b6982e83SLemoverimport chipsalliance.rocketchip.config.Parameters
22b6982e83SLemoverimport chisel3._
23b6982e83SLemoverimport chisel3.internal.naming.chiselName
24b6982e83SLemoverimport chisel3.util._
25b6982e83SLemoverimport utils.MaskedRegMap.WritableMask
26b6982e83SLemoverimport xiangshan._
27b6982e83SLemoverimport xiangshan.backend.fu.util.HasCSRConst
28b6982e83SLemoverimport utils._
29b6982e83SLemoverimport xiangshan.cache.mmu.{TlbCmd, TlbExceptionBundle}
30b6982e83SLemover
31b6982e83SLemovertrait PMPConst {
32b6982e83SLemover  val PMPOffBits = 2 // minimal 4bytes
33b6982e83SLemover}
34b6982e83SLemover
35b6982e83SLemoverabstract class PMPBundle(implicit p: Parameters) extends XSBundle with PMPConst {
36b6982e83SLemover  val CoarserGrain: Boolean = PlatformGrain > PMPOffBits
37b6982e83SLemover}
38b6982e83SLemover
39b6982e83SLemoverabstract class PMPModule(implicit p: Parameters) extends XSModule with PMPConst with HasCSRConst
40b6982e83SLemover
41b6982e83SLemover@chiselName
42b6982e83SLemoverclass PMPConfig(implicit p: Parameters) extends PMPBundle {
43b6982e83SLemover  val l = Bool()
44ca2f90a6SLemover  val c = Bool() // res(1), unuse in pmp
45ca2f90a6SLemover  val atomic = Bool() // res(0), unuse in pmp
46b6982e83SLemover  val a = UInt(2.W)
47b6982e83SLemover  val x = Bool()
48b6982e83SLemover  val w = Bool()
49b6982e83SLemover  val r = Bool()
50b6982e83SLemover
51ca2f90a6SLemover  def res: UInt = Cat(c, atomic) // in pmp, unused
52b6982e83SLemover  def off = a === 0.U
53b6982e83SLemover  def tor = a === 1.U
54b6982e83SLemover  def na4 = { if (CoarserGrain) false.B else a === 2.U }
55b6982e83SLemover  def napot = { if (CoarserGrain) a(1).asBool else a === 3.U }
56b6982e83SLemover  def off_tor = !a(1)
57b6982e83SLemover  def na4_napot = a(1)
58b6982e83SLemover
59b6982e83SLemover  def locked = l
60b6982e83SLemover  def addr_locked: Bool = locked
61b6982e83SLemover  def addr_locked(next: PMPConfig): Bool = locked || (next.locked && next.tor)
62ca2f90a6SLemover}
63b6982e83SLemover
64ca2f90a6SLemovertrait PMPReadWriteMethod extends PMPConst { this: PMPBase =>
65b6982e83SLemover  def write_cfg_vec(cfgs: UInt): UInt = {
66b6982e83SLemover    val cfgVec = Wire(Vec(cfgs.getWidth/8, new PMPConfig))
67b6982e83SLemover    for (i <- cfgVec.indices) {
68ca2f90a6SLemover      val cfg_w_tmp = cfgs((i+1)*8-1, i*8).asUInt.asTypeOf(new PMPConfig)
69ca2f90a6SLemover      cfgVec(i) := cfg_w_tmp
70ca2f90a6SLemover      cfgVec(i).w := cfg_w_tmp.w && cfg_w_tmp.r
71ca2f90a6SLemover      if (CoarserGrain) { cfgVec(i).a := Cat(cfg_w_tmp.a(1), cfg_w_tmp.a.orR) }
72b6982e83SLemover    }
73b6982e83SLemover    cfgVec.asUInt
74b6982e83SLemover  }
75b6982e83SLemover
76b6982e83SLemover  def write_cfg_vec(mask: Vec[UInt], addr: Vec[UInt], index: Int)(cfgs: UInt): UInt = {
77b6982e83SLemover    val cfgVec = Wire(Vec(cfgs.getWidth/8, new PMPConfig))
78b6982e83SLemover    for (i <- cfgVec.indices) {
79ca2f90a6SLemover      val cfg_w_m_tmp = cfgs((i+1)*8-1, i*8).asUInt.asTypeOf(new PMPConfig)
80ca2f90a6SLemover      cfgVec(i) := cfg_w_m_tmp
81ca2f90a6SLemover      cfgVec(i).w := cfg_w_m_tmp.w && cfg_w_m_tmp.r
82ca2f90a6SLemover      if (CoarserGrain) { cfgVec(i).a := Cat(cfg_w_m_tmp.a(1), cfg_w_m_tmp.a.orR) }
83b6982e83SLemover      when (cfgVec(i).na4_napot) {
84b6982e83SLemover        mask(index + i) := new PMPEntry().match_mask(cfgVec(i), addr(index + i))
85b6982e83SLemover      }
86b6982e83SLemover    }
87b6982e83SLemover    cfgVec.asUInt
88b6982e83SLemover  }
89b6982e83SLemover
90b6982e83SLemover  /** In general, the PMP grain is 2**{G+2} bytes. when G >= 1, na4 is not selectable.
91b6982e83SLemover   * When G >= 2 and cfg.a(1) is set(then the mode is napot), the bits addr(G-2, 0) read as zeros.
92b6982e83SLemover   * When G >= 1 and cfg.a(1) is clear(the mode is off or tor), the addr(G-1, 0) read as zeros.
93b6982e83SLemover   * The low OffBits is dropped
94b6982e83SLemover   */
95b6982e83SLemover  def read_addr(): UInt = {
96b6982e83SLemover    read_addr(cfg)(addr)
97b6982e83SLemover  }
98b6982e83SLemover
99b6982e83SLemover  def read_addr(cfg: PMPConfig)(addr: UInt): UInt = {
100b6982e83SLemover    val G = PlatformGrain - PMPOffBits
101b6982e83SLemover    require(G >= 0)
102b6982e83SLemover    if (G == 0) {
103b6982e83SLemover      addr
104b6982e83SLemover    } else if (G >= 2) {
105b6982e83SLemover      Mux(cfg.na4_napot, set_low_bits(addr, G-1), clear_low_bits(addr, G))
106b6982e83SLemover    } else { // G is 1
107b6982e83SLemover      Mux(cfg.off_tor, clear_low_bits(addr, G), addr)
108b6982e83SLemover    }
109b6982e83SLemover  }
110b6982e83SLemover  /** addr for inside addr, drop OffBits with.
111b6982e83SLemover   * compare_addr for inside addr for comparing.
112b6982e83SLemover   * paddr for outside addr.
113b6982e83SLemover   */
114b6982e83SLemover  def write_addr(next: PMPBase)(paddr: UInt) = {
115b6982e83SLemover    Mux(!cfg.addr_locked(next.cfg), paddr, addr)
116b6982e83SLemover  }
117b6982e83SLemover  def write_addr(paddr: UInt) = {
118b6982e83SLemover    Mux(!cfg.addr_locked, paddr, addr)
119b6982e83SLemover  }
120b6982e83SLemover
121b6982e83SLemover  def set_low_bits(data: UInt, num: Int): UInt = {
122b6982e83SLemover    require(num >= 0)
123b6982e83SLemover    data | ((1 << num)-1).U
124b6982e83SLemover  }
125b6982e83SLemover
126b6982e83SLemover  /** mask the data's low num bits (lsb) */
127b6982e83SLemover  def clear_low_bits(data: UInt, num: Int): UInt = {
128b6982e83SLemover    require(num >= 0)
129b6982e83SLemover    // use Cat instead of & with mask to avoid "Signal Width" problem
130b6982e83SLemover    if (num == 0) { data }
131b6982e83SLemover    else { Cat(data(data.getWidth-1, num), 0.U(num.W)) }
132b6982e83SLemover  }
133ca2f90a6SLemover}
134ca2f90a6SLemover
135ca2f90a6SLemover/** PMPBase for CSR unit
136ca2f90a6SLemover  * with only read and write logic
137ca2f90a6SLemover  */
138ca2f90a6SLemover@chiselName
139ca2f90a6SLemoverclass PMPBase(implicit p: Parameters) extends PMPBundle with PMPReadWriteMethod {
140ca2f90a6SLemover  val cfg = new PMPConfig
141ca2f90a6SLemover  val addr = UInt((PAddrBits - PMPOffBits).W)
142b6982e83SLemover
143b6982e83SLemover  def gen(cfg: PMPConfig, addr: UInt) = {
144b6982e83SLemover    require(addr.getWidth == this.addr.getWidth)
145b6982e83SLemover    this.cfg := cfg
146b6982e83SLemover    this.addr := addr
147b6982e83SLemover  }
148b6982e83SLemover}
149b6982e83SLemover
150ca2f90a6SLemovertrait PMPMatchMethod extends PMPConst { this: PMPEntry =>
151b6982e83SLemover  /** compare_addr is used to compare with input addr */
152ca2f90a6SLemover  def compare_addr: UInt = ((addr << PMPOffBits) & ~(((1 << PlatformGrain) - 1).U(PAddrBits.W))).asUInt
153b6982e83SLemover
154b6982e83SLemover  /** size and maxSize are all log2 Size
155b6982e83SLemover   * for dtlb, the maxSize is bXLEN which is 8
156b6982e83SLemover   * for itlb and ptw, the maxSize is log2(512) ?
157b6982e83SLemover   * but we may only need the 64 bytes? how to prevent the bugs?
158b6982e83SLemover   * TODO: handle the special case that itlb & ptw & dcache access wider size than XLEN
159b6982e83SLemover   */
160b6982e83SLemover  def is_match(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last_pmp: PMPEntry): Bool = {
161b6982e83SLemover    Mux(cfg.na4_napot, napotMatch(paddr, lgSize, lgMaxSize),
162b6982e83SLemover      Mux(cfg.tor, torMatch(paddr, lgSize, lgMaxSize, last_pmp), false.B))
163b6982e83SLemover  }
164b6982e83SLemover
165b6982e83SLemover  /** generate match mask to help match in napot mode */
166b6982e83SLemover  def match_mask(paddr: UInt) = {
167ca2f90a6SLemover    val match_mask_addr: UInt = Cat(paddr, cfg.a(0)).asUInt() | (((1 << PlatformGrain) - 1) >> PMPOffBits).U((paddr.getWidth + 1).W)
168ca2f90a6SLemover    Cat(match_mask_addr & ~(match_mask_addr + 1.U), ((1 << PMPOffBits) - 1).U(PMPOffBits.W))
169b6982e83SLemover  }
170b6982e83SLemover
171b6982e83SLemover  def match_mask(cfg: PMPConfig, paddr: UInt) = {
172ca2f90a6SLemover    val match_mask_c_addr = Cat(paddr, cfg.a(0)) | (((1 << PlatformGrain) - 1) >> PMPOffBits).U((paddr.getWidth + 1).W)
173ca2f90a6SLemover    Cat(match_mask_c_addr & ~(match_mask_c_addr + 1.U), ((1 << PMPOffBits) - 1).U(PMPOffBits.W))
174b6982e83SLemover  }
175b6982e83SLemover
176ca2f90a6SLemover  def boundMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int): Bool = {
177b6982e83SLemover    if (lgMaxSize <= PlatformGrain) {
178ca2f90a6SLemover      (paddr < compare_addr)
179b6982e83SLemover    } else {
180b6982e83SLemover      val highLess = (paddr >> lgMaxSize) < (compare_addr >> lgMaxSize)
181b6982e83SLemover      val highEqual = (paddr >> lgMaxSize) === (compare_addr >> lgMaxSize)
182b6982e83SLemover      val lowLess = (paddr(lgMaxSize-1, 0) | OneHot.UIntToOH1(lgSize, lgMaxSize))  < compare_addr(lgMaxSize-1, 0)
183b6982e83SLemover      highLess || (highEqual && lowLess)
184b6982e83SLemover    }
185b6982e83SLemover  }
186b6982e83SLemover
187ca2f90a6SLemover  def lowerBoundMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int): Bool = {
188b6982e83SLemover    !boundMatch(paddr, lgSize, lgMaxSize)
189b6982e83SLemover  }
190b6982e83SLemover
191b6982e83SLemover  def higherBoundMatch(paddr: UInt, lgMaxSize: Int) = {
192b6982e83SLemover    boundMatch(paddr, 0.U, lgMaxSize)
193b6982e83SLemover  }
194b6982e83SLemover
195ca2f90a6SLemover  def torMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last_pmp: PMPEntry): Bool = {
196b6982e83SLemover    last_pmp.lowerBoundMatch(paddr, lgSize, lgMaxSize) && higherBoundMatch(paddr, lgMaxSize)
197b6982e83SLemover  }
198b6982e83SLemover
199b6982e83SLemover  def unmaskEqual(a: UInt, b: UInt, m: UInt) = {
200b6982e83SLemover    (a & ~m) === (b & ~m)
201b6982e83SLemover  }
202b6982e83SLemover
203b6982e83SLemover  def napotMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int) = {
204b6982e83SLemover    if (lgMaxSize <= PlatformGrain) {
205b6982e83SLemover      unmaskEqual(paddr, compare_addr, mask)
206b6982e83SLemover    } else {
207b6982e83SLemover      val lowMask = mask | OneHot.UIntToOH1(lgSize, lgMaxSize)
208b6982e83SLemover      val highMatch = unmaskEqual(paddr >> lgMaxSize, compare_addr >> lgMaxSize, mask >> lgMaxSize)
209b6982e83SLemover      val lowMatch = unmaskEqual(paddr(lgMaxSize-1, 0), compare_addr(lgMaxSize-1, 0), lowMask(lgMaxSize-1, 0))
210b6982e83SLemover      highMatch && lowMatch
211b6982e83SLemover    }
212b6982e83SLemover  }
213b6982e83SLemover
214b6982e83SLemover  def aligned(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last: PMPEntry) = {
215b6982e83SLemover    if (lgMaxSize <= PlatformGrain) {
216b6982e83SLemover      true.B
217b6982e83SLemover    } else {
218b6982e83SLemover      val lowBitsMask = OneHot.UIntToOH1(lgSize, lgMaxSize)
219b6982e83SLemover      val lowerBound = ((paddr >> lgMaxSize) === (last.compare_addr >> lgMaxSize)) &&
220b6982e83SLemover        ((~paddr(lgMaxSize-1, 0) & last.compare_addr(lgMaxSize-1, 0)) =/= 0.U)
221b6982e83SLemover      val upperBound = ((paddr >> lgMaxSize) === (compare_addr >> lgMaxSize)) &&
222b6982e83SLemover        ((compare_addr(lgMaxSize-1, 0) & (paddr(lgMaxSize-1, 0) | lowBitsMask)) =/= 0.U)
223b6982e83SLemover      val torAligned = !(lowerBound || upperBound)
224b6982e83SLemover      val napotAligned = (lowBitsMask & ~mask(lgMaxSize-1, 0)) === 0.U
225b6982e83SLemover      Mux(cfg.na4_napot, napotAligned, torAligned)
226b6982e83SLemover    }
227b6982e83SLemover  }
228ca2f90a6SLemover}
229ca2f90a6SLemover
230ca2f90a6SLemover/** PMPEntry for outside pmp copies
231ca2f90a6SLemover  * with one more elements mask to help napot match
232ca2f90a6SLemover  * TODO: make mask an element, not an method, for timing opt
233ca2f90a6SLemover  */
234ca2f90a6SLemover@chiselName
235ca2f90a6SLemoverclass PMPEntry(implicit p: Parameters) extends PMPBase with PMPMatchMethod {
236ca2f90a6SLemover  val mask = UInt(PAddrBits.W) // help to match in napot
237ca2f90a6SLemover
238ca2f90a6SLemover  def write_addr(next: PMPBase, mask: UInt)(paddr: UInt) = {
239ca2f90a6SLemover    mask := Mux(!cfg.addr_locked(next.cfg), match_mask(paddr), mask)
240ca2f90a6SLemover    Mux(!cfg.addr_locked(next.cfg), paddr, addr)
241ca2f90a6SLemover  }
242ca2f90a6SLemover
243ca2f90a6SLemover  def write_addr(mask: UInt)(paddr: UInt) = {
244ca2f90a6SLemover    mask := Mux(!cfg.addr_locked, match_mask(paddr), mask)
245ca2f90a6SLemover    Mux(!cfg.addr_locked, paddr, addr)
246ca2f90a6SLemover  }
247b6982e83SLemover
248b6982e83SLemover  def gen(cfg: PMPConfig, addr: UInt, mask: UInt) = {
249b6982e83SLemover    require(addr.getWidth == this.addr.getWidth)
250b6982e83SLemover    this.cfg := cfg
251b6982e83SLemover    this.addr := addr
252b6982e83SLemover    this.mask := mask
253b6982e83SLemover  }
254ca2f90a6SLemover}
255b6982e83SLemover
256ca2f90a6SLemovertrait PMPMethod extends HasXSParameter with PMPConst { this: XSModule =>
257ca2f90a6SLemover  def pmp_init() : (Vec[UInt], Vec[UInt], Vec[UInt])= {
258ca2f90a6SLemover    val cfg = WireInit(0.U.asTypeOf(Vec(NumPMP/8, UInt(XLEN.W))))
259ca2f90a6SLemover    val addr = Wire(Vec(NumPMP, UInt((PAddrBits-PMPOffBits).W)))
260ca2f90a6SLemover    val mask = Wire(Vec(NumPMP, UInt(PAddrBits.W)))
261ca2f90a6SLemover    addr := DontCare
262ca2f90a6SLemover    mask := DontCare
263ca2f90a6SLemover    (cfg, addr, mask)
264ca2f90a6SLemover  }
265ca2f90a6SLemover
266ca2f90a6SLemover  def pmp_gen_mapping
267ca2f90a6SLemover  (
268ca2f90a6SLemover    init: () => (Vec[UInt], Vec[UInt], Vec[UInt]),
269ca2f90a6SLemover    num: Int = 16,
270ca2f90a6SLemover    cfgBase: Int,
271ca2f90a6SLemover    addrBase: Int,
272ca2f90a6SLemover    entries: Vec[PMPEntry]
273ca2f90a6SLemover  ) = {
274ca2f90a6SLemover    val pmpCfgPerCSR = XLEN / new PMPConfig().getWidth
275ca2f90a6SLemover    def pmpCfgIndex(i: Int) = (XLEN / 32) * (i / pmpCfgPerCSR)
276ca2f90a6SLemover    val init_value = init()
277ca2f90a6SLemover    /** to fit MaskedRegMap's write, declare cfgs as Merged CSRs and split them into each pmp */
278ca2f90a6SLemover    val cfgMerged = RegInit(init_value._1) //(Vec(num / pmpCfgPerCSR, UInt(XLEN.W))) // RegInit(VecInit(Seq.fill(num / pmpCfgPerCSR)(0.U(XLEN.W))))
279ca2f90a6SLemover    val cfgs = WireInit(cfgMerged).asTypeOf(Vec(num, new PMPConfig()))
280ca2f90a6SLemover    val addr = RegInit(init_value._2) // (Vec(num, UInt((PAddrBits-PMPOffBits).W)))
281ca2f90a6SLemover    val mask = RegInit(init_value._3) // (Vec(num, UInt(PAddrBits.W)))
282ca2f90a6SLemover
283ca2f90a6SLemover    for (i <- entries.indices) {
284ca2f90a6SLemover      entries(i).gen(cfgs(i), addr(i), mask(i))
285ca2f90a6SLemover    }
286ca2f90a6SLemover
287ca2f90a6SLemover
288ca2f90a6SLemover
289ca2f90a6SLemover    val cfg_mapping = (0 until num by pmpCfgPerCSR).map(i => {Map(
290ca2f90a6SLemover      MaskedRegMap(
291ca2f90a6SLemover        addr = cfgBase + pmpCfgIndex(i),
292ca2f90a6SLemover        reg = cfgMerged(i/pmpCfgPerCSR),
293ca2f90a6SLemover        wmask = WritableMask,
294ca2f90a6SLemover        wfn = new PMPBase().write_cfg_vec(mask, addr, i)
295ca2f90a6SLemover      ))
296ca2f90a6SLemover    }).fold(Map())((a, b) => a ++ b) // ugly code, hit me if u have better codes
297ca2f90a6SLemover
298ca2f90a6SLemover    val addr_mapping = (0 until num).map(i => {Map(
299ca2f90a6SLemover      MaskedRegMap(
300ca2f90a6SLemover        addr = addrBase + i,
301ca2f90a6SLemover        reg = addr(i),
302ca2f90a6SLemover        wmask = WritableMask,
303ca2f90a6SLemover        wfn = { if (i != num-1) entries(i).write_addr(entries(i+1), mask(i)) else entries(i).write_addr(mask(i)) },
304ca2f90a6SLemover        rmask = WritableMask,
305ca2f90a6SLemover        rfn = new PMPBase().read_addr(entries(i).cfg)
306ca2f90a6SLemover      ))
307ca2f90a6SLemover    }).fold(Map())((a, b) => a ++ b) // ugly code, hit me if u have better codes.
308ca2f90a6SLemover
309ca2f90a6SLemover
310ca2f90a6SLemover
311ca2f90a6SLemover    cfg_mapping ++ addr_mapping
312b6982e83SLemover  }
313b6982e83SLemover}
314b6982e83SLemover
315b6982e83SLemover@chiselName
316ca2f90a6SLemoverclass PMP(implicit p: Parameters) extends PMPModule with PMPMethod with PMAMethod {
317b6982e83SLemover  val io = IO(new Bundle {
318b6982e83SLemover    val distribute_csr = Flipped(new DistributedCSRIO())
319b6982e83SLemover    val pmp = Output(Vec(NumPMP, new PMPEntry()))
320ca2f90a6SLemover    val pma = Output(Vec(NumPMA, new PMPEntry()))
321b6982e83SLemover  })
322b6982e83SLemover
323b6982e83SLemover  val w = io.distribute_csr.w
324b6982e83SLemover
325b6982e83SLemover  val pmp = Wire(Vec(NumPMP, new PMPEntry()))
326ca2f90a6SLemover  val pma = Wire(Vec(NumPMA, new PMPEntry()))
327b6982e83SLemover
328ca2f90a6SLemover  val pmpMapping = pmp_gen_mapping(pmp_init, NumPMP, PmpcfgBase, PmpaddrBase, pmp)
329ca2f90a6SLemover  val pmaMapping = pmp_gen_mapping(pma_init, NumPMA, PmacfgBase, PmaaddrBase, pma)
330ca2f90a6SLemover  val mapping = pmpMapping ++ pmaMapping
331b6982e83SLemover
332b6982e83SLemover  val rdata = Wire(UInt(XLEN.W))
333ca2f90a6SLemover  MaskedRegMap.generate(mapping, w.bits.addr, rdata, w.valid, w.bits.data)
334b6982e83SLemover
335b6982e83SLemover  io.pmp := pmp
336ca2f90a6SLemover  io.pma := pma
337b6982e83SLemover}
338b6982e83SLemover
339b6982e83SLemoverclass PMPReqBundle(lgMaxSize: Int = 3)(implicit p: Parameters) extends PMPBundle {
340b6982e83SLemover  val addr = Output(UInt(PAddrBits.W))
341b6982e83SLemover  val size = Output(UInt(log2Ceil(lgMaxSize+1).W))
342b6982e83SLemover  val cmd = Output(TlbCmd())
343b6982e83SLemover
344b6982e83SLemover  override def cloneType = (new PMPReqBundle(lgMaxSize)).asInstanceOf[this.type]
345b6982e83SLemover}
346b6982e83SLemover
347ca2f90a6SLemoverclass PMPRespBundle(implicit p: Parameters) extends TlbExceptionBundle {
348ca2f90a6SLemover  val mmio = Output(Bool())
349b6982e83SLemover
350ca2f90a6SLemover  def |(resp: PMPRespBundle): PMPRespBundle = {
351ca2f90a6SLemover    val res = Wire(new PMPRespBundle())
352ca2f90a6SLemover    res.ld := this.ld || resp.ld
353ca2f90a6SLemover    res.st := this.st || resp.st
354ca2f90a6SLemover    res.instr := this.instr || resp.instr
355ca2f90a6SLemover    res.mmio := this.mmio || resp.mmio
356ca2f90a6SLemover    res
357ca2f90a6SLemover  }
358ca2f90a6SLemover}
359b6982e83SLemover
360ca2f90a6SLemovertrait PMPCheckMethod extends HasXSParameter with HasCSRConst { this: PMPChecker =>
361ca2f90a6SLemover  def pmp_check(cmd: UInt, cfg: PMPConfig)(implicit p: Parameters) = {
362ca2f90a6SLemover    val resp = Wire(new PMPRespBundle)
363ca2f90a6SLemover    resp.ld := TlbCmd.isRead(cmd) && !TlbCmd.isAtom(cmd) && !cfg.r
364ca2f90a6SLemover    resp.st := (TlbCmd.isWrite(cmd) || TlbCmd.isAtom(cmd)) && !cfg.w
365ca2f90a6SLemover    resp.instr := TlbCmd.isExec(cmd) && !cfg.x
366ca2f90a6SLemover    resp.mmio := false.B
367ca2f90a6SLemover    resp
368ca2f90a6SLemover  }
369b6982e83SLemover
370*5cf62c1aSLemover  def pmp_match_res(leaveHitMux: Boolean = false, valid: Bool = true.B)(
371*5cf62c1aSLemover    addr: UInt,
372*5cf62c1aSLemover    size: UInt,
373*5cf62c1aSLemover    pmpEntries: Vec[PMPEntry],
374*5cf62c1aSLemover    mode: UInt,
375*5cf62c1aSLemover    lgMaxSize: Int
376*5cf62c1aSLemover  ) = {
377ca2f90a6SLemover    val num = pmpEntries.size
378ca2f90a6SLemover    require(num == NumPMP)
379ca2f90a6SLemover
380ca2f90a6SLemover    val passThrough = if (pmpEntries.isEmpty) true.B else (mode > ModeS)
381a15116bdSLemover    val pmpDefault = WireInit(0.U.asTypeOf(new PMPEntry()))
382a15116bdSLemover    pmpDefault.cfg.r := passThrough
383a15116bdSLemover    pmpDefault.cfg.w := passThrough
384a15116bdSLemover    pmpDefault.cfg.x := passThrough
385b6982e83SLemover
386a15116bdSLemover    val match_vec = Wire(Vec(num+1, Bool()))
387a15116bdSLemover    val cfg_vec = Wire(Vec(num+1, new PMPEntry()))
388a15116bdSLemover
389a15116bdSLemover    pmpEntries.zip(pmpDefault +: pmpEntries.take(num-1)).zipWithIndex.foreach{ case ((pmp, last_pmp), i) =>
390ca2f90a6SLemover      val is_match = pmp.is_match(addr, size, lgMaxSize, last_pmp)
391b6982e83SLemover      val ignore = passThrough && !pmp.cfg.l
392ca2f90a6SLemover      val aligned = pmp.aligned(addr, size, lgMaxSize, last_pmp)
393b6982e83SLemover
394b6982e83SLemover      val cur = WireInit(pmp)
395b6982e83SLemover      cur.cfg.r := aligned && (pmp.cfg.r || ignore)
396b6982e83SLemover      cur.cfg.w := aligned && (pmp.cfg.w || ignore)
397b6982e83SLemover      cur.cfg.x := aligned && (pmp.cfg.x || ignore)
398b6982e83SLemover
399a15116bdSLemover//      Mux(is_match, cur, prev)
400a15116bdSLemover      match_vec(i) := is_match
401a15116bdSLemover      cfg_vec(i) := cur
402b6982e83SLemover    }
403a15116bdSLemover
404a15116bdSLemover    // default value
405a15116bdSLemover    match_vec(num) := true.B
406a15116bdSLemover    cfg_vec(num) := pmpDefault
407a15116bdSLemover
408*5cf62c1aSLemover    if (leaveHitMux) {
409*5cf62c1aSLemover      ParallelPriorityMux(match_vec.map(RegEnable(_, init = false.B, valid)), RegEnable(cfg_vec, valid))
410*5cf62c1aSLemover    } else {
411a15116bdSLemover      ParallelPriorityMux(match_vec, cfg_vec)
412ca2f90a6SLemover    }
413ca2f90a6SLemover  }
414*5cf62c1aSLemover}
415b6982e83SLemover
416ca2f90a6SLemover@chiselName
417ca2f90a6SLemoverclass PMPChecker
418ca2f90a6SLemover(
419ca2f90a6SLemover  lgMaxSize: Int = 3,
420*5cf62c1aSLemover  sameCycle: Boolean = false,
421*5cf62c1aSLemover  leaveHitMux: Boolean = false
422ca2f90a6SLemover)(implicit p: Parameters)
423ca2f90a6SLemover  extends PMPModule
424ca2f90a6SLemover  with PMPCheckMethod
425ca2f90a6SLemover  with PMACheckMethod
426ca2f90a6SLemover{
427ca2f90a6SLemover  val io = IO(new Bundle{
428ca2f90a6SLemover    val env = Input(new Bundle {
429ca2f90a6SLemover      val mode = Input(UInt(2.W))
430ca2f90a6SLemover      val pmp = Input(Vec(NumPMP, new PMPEntry()))
431ca2f90a6SLemover      val pma = Input(Vec(NumPMA, new PMPEntry()))
432ca2f90a6SLemover    })
433ca2f90a6SLemover    val req = Flipped(Valid(new PMPReqBundle(lgMaxSize))) // usage: assign the valid to fire signal
434ca2f90a6SLemover    val resp = new PMPRespBundle()
435ca2f90a6SLemover  })
436*5cf62c1aSLemover  require(!(leaveHitMux && sameCycle))
437ca2f90a6SLemover
438ca2f90a6SLemover  val req = io.req.bits
439ca2f90a6SLemover
440*5cf62c1aSLemover  val res_pmp = pmp_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.env.pmp, io.env.mode, lgMaxSize)
441*5cf62c1aSLemover  val res_pma = pma_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.env.pma, io.env.mode, lgMaxSize)
442ca2f90a6SLemover
443ca2f90a6SLemover  val resp_pmp = pmp_check(req.cmd, res_pmp.cfg)
444ca2f90a6SLemover  val resp_pma = pma_check(req.cmd, res_pma.cfg)
445ca2f90a6SLemover  val resp = resp_pmp | resp_pma
446ca2f90a6SLemover
447*5cf62c1aSLemover  if (sameCycle || leaveHitMux) {
448ca2f90a6SLemover    io.resp := resp
449b6982e83SLemover  } else {
450ca2f90a6SLemover    io.resp := RegEnable(resp, io.req.valid)
451b6982e83SLemover  }
452b6982e83SLemover}