xref: /XiangShan/src/main/scala/xiangshan/backend/fu/PMA.scala (revision 57bb43b5f11c3f1e89ac52f232fe73056b35d9bd)
1/***************************************************************************************
2 * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3 * Copyright (c) 2020-2021 Peng Cheng Laboratory
4 *
5 * XiangShan is licensed under Mulan PSL v2.
6 * You can use this software according to the terms and conditions of the Mulan PSL v2.
7 * You may obtain a copy of Mulan PSL v2 at:
8 *          http://license.coscl.org.cn/MulanPSL2
9 *
10 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13 *
14 * See the Mulan PSL v2 for more details.
15 ***************************************************************************************/
16
17package xiangshan.backend.fu
18
19import chisel3._
20import chisel3.util._
21import freechips.rocketchip.regmapper.{RegField, RegFieldDesc, RegReadFn, RegWriteFn}
22import utils.{ParallelPriorityMux, ZeroExt, ValidHold}
23import xiangshan.cache.mmu.TlbCmd
24
25/* Memory Mapped PMA */
26case class MMPMAConfig
27(
28  address: BigInt,
29  mask: BigInt,
30  lgMaxSize: Int,
31  sameCycle: Boolean,
32  num: Int
33)
34
35trait PMAConst extends PMPConst
36
37trait MMPMAMethod extends PMAConst with PMAMethod with PMPReadWriteMethodBare {
38  def gen_mmpma_mapping(num: Int) = {
39    val pmaCfgPerCSR = PMXLEN / new PMPConfig().getWidth
40    def pmaCfgLogicIndex(i: Int) = (PMXLEN / 32) * (i / pmaCfgPerCSR)
41    def pmaCfgIndex(i: Int) = (i / pmaCfgPerCSR)
42
43    val pma = Wire(Vec(num, new PMPEntry))
44
45    /* pma init value */
46    val init_value = pma_init()
47
48    val pmaCfgMerged = RegInit(init_value._1)
49    val addr = RegInit(init_value._2)
50    val mask = RegInit(init_value._3)
51    val cfg = WireInit(pmaCfgMerged).asTypeOf(Vec(num, new PMPConfig()))
52    //  pmaMask are implicit regs that just used for timing optimization
53    for (i <- pma.indices) {
54      pma(i).gen(cfg(i), addr(i), mask(i))
55    }
56
57    val blankCfg = PMXLEN == 32
58    val cfg_index_wrapper = (0 until num by 4).zip((0 until num by 4).map(a => blankCfg || (a % pmaCfgPerCSR == 0)))
59    val cfg_map = (cfg_index_wrapper).map{ case(i, notempty) => {
60//      println(s"tlbpma i:$i notempty:$notempty")
61      RegField.apply(n = PMXLEN, r = RegReadFn{(ivalid, oready) =>
62        val r_ready = Wire(Bool())
63        val o_valid = Wire(Bool())
64        val v_reg = ValidHold(r_ready && ivalid, o_valid && oready, false.B)
65        r_ready := !v_reg
66        o_valid := v_reg
67
68        if (notempty) { (r_ready, o_valid, pmaCfgMerged(pmaCfgIndex(i))) }
69        else { (r_ready, o_valid, 0.U) }
70      }, w = RegWriteFn((valid, data) => {
71        if (notempty) { when (valid) { pmaCfgMerged(pmaCfgIndex(i)) := write_cfg_vec(mask, addr, i)(data) } }
72        true.B
73      }), desc = RegFieldDesc(s"MMPMA_config_${i}", s"pma config register #${i}"))
74    }}
75
76    val addr_map = (0 until num).map{ i => {
77      val next_cfg = if (i == 0) 0.U.asTypeOf(new PMPConfig()) else cfg(i-1)
78      RegField(
79        n = PMXLEN,
80        r = ZeroExt(read_addr(cfg(i))(addr(i)), PMXLEN),
81        w = RegWriteFn((valid, data) => {
82          when (valid) { addr(i) := write_addr(next_cfg, mask(i))(data(addr(0).getWidth-1, 0), cfg(i), addr(i))}
83          true.B
84        }),
85        desc = RegFieldDesc(s"MMPMA_addr_${i}", s"pma addr register #${i}")
86      )
87    }}
88
89    (cfg_map, addr_map, pma)
90  }
91
92}
93
94trait PMAMethod extends PMAConst {
95  /**
96  def SimpleMemMapList = List(
97      //     Base address      Top address       Width  Description    Mode (RWXIDSAC)
98      MemMap("h00_0000_0000", "h00_0FFF_FFFF",   "h0", "Reserved",    "RW"),
99      MemMap("h00_1000_0000", "h00_1FFF_FFFF",   "h0", "QSPI_Flash",  "RWX"),
100      MemMap("h00_2000_0000", "h00_2FFF_FFFF",   "h0", "Reserved",    "RW"),
101      MemMap("h00_3000_0000", "h00_3000_FFFF",   "h0", "DMA",         "RW"),
102      MemMap("h00_3001_0000", "h00_3004_FFFF",   "h0", "GPU",         "RWC"),
103      MemMap("h00_3005_0000", "h00_3006_FFFF",   "h0", "USB/SDMMC",   "RW"),
104      MemMap("h00_3007_0000", "h00_30FF_FFFF",   "h0", "Reserved",    "RW"),
105      MemMap("h00_3100_0000", "h00_3111_FFFF",   "h0", "MMIO",        "RW"),
106      MemMap("h00_3112_0000", "h00_37FF_FFFF",   "h0", "Reserved",    "RW"),
107      MemMap("h00_3800_0000", "h00_3800_FFFF",   "h0", "CLINT",       "RW"),
108      MemMap("h00_3801_0000", "h00_3801_FFFF",   "h0", "BEU",         "RW"),
109      MemMap("h00_3802_0000", "h00_3802_0FFF",   "h0", "DebugModule", "RWX"),
110      MemMap("h00_3802_1000", "h00_3802_1FFF",   "h0", "MMPMA",       "RW"),
111      MemMap("h00_3802_2000", "h00_3900_0000",   "h0", "Reserved",    ""),
112      MemMap("h00_3900_0000", "h00_3900_1FFF",   "h0", "L3CacheCtrl",  "RW"),
113      MemMap("h00_3900_2000", "h00_39FF_FFFF",   "h0", "Reserved",    ""),
114      MemMap("h00_3A00_0000", "h00_3A00_0FFF",   "h0", "PLL0",        "RW),
115      MemMap('h00_3A00_1000", "h00_3BFF_FFFF",   "h0", "Reserved",    ""),
116      MemMap("h00_3C00_0000", "h00_3FFF_FFFF",   "h0", "PLIC",        "RW"),
117      MemMap("h00_4000_0000", "h00_7FFF_FFFF",   "h0", "PCIe",        "RW"),
118      MemMap("h00_8000_0000", "h0F_FFFF_FFFF",   "h0", "DDR",         "RWXIDSA"),
119    )
120   */
121
122  def pma_init() : (Vec[UInt], Vec[UInt], Vec[UInt]) = {
123    // the init value is zero
124    // from 0 to num(default 16) - 1, lower priority
125    // according to simple map, 9 entries is needed, pick 6-14, leave 0-5 & 15 unusedcfgMerged.map(_ := 0.U)
126
127    val num = NumPMA
128    require(num >= 16)
129    val cfg = WireInit(0.U.asTypeOf(Vec(num, new PMPConfig())))
130
131    val addr = Wire(Vec(num, UInt((PMPAddrBits-PMPOffBits).W)))
132    val mask = Wire(Vec(num, UInt(PMPAddrBits.W)))
133    addr := DontCare
134    mask := DontCare
135
136    var idx = num-1
137
138    // TODO: turn to napot to save entries
139    // use tor instead of napot, for napot may be confusing and hard to understand
140    // NOTE: all the addr space are default set to DDR, RWXCA
141    idx = idx - 1
142    addr(idx) := shift_addr(0xFFFFFFFFFL) // all the addr are default ddr, whicn means rwxca
143    cfg(idx).a := 3.U; cfg(idx).r := true.B; cfg(idx).w := true.B; cfg(idx).x := true.B; cfg(idx).c := true.B; cfg(idx).atomic := true.B
144    mask(idx) := match_mask(addr(idx), cfg(idx))
145    idx = idx - 1
146
147    // NOTE: (0x0_0000_0000L, 0x0_8000_0000L) are default set to MMIO, only RW
148    addr(idx) := get_napot(0x00000000L, 0x80000000L)
149    cfg(idx).a := 3.U; cfg(idx).r := true.B; cfg(idx).w := true.B
150    mask(idx) := match_mask(addr(idx), cfg(idx))
151    idx = idx - 1
152
153    addr(idx) := shift_addr(0x3C000000)
154    cfg(idx).a := 1.U
155    idx = idx - 1
156
157    addr(idx) := shift_addr(0x3A001000)
158    cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B
159    idx = idx - 1
160
161    addr(idx) := shift_addr(0x3A000000)
162    cfg(idx).a := 1.U
163    idx = idx - 1
164
165    addr(idx) := shift_addr(0x39002000)
166    cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B
167    idx = idx - 1
168
169    addr(idx) := shift_addr(0x39000000)
170    cfg(idx).a := 1.U
171    idx = idx - 1
172
173    addr(idx) := shift_addr(0x38022000)
174    cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B
175    idx = idx - 1
176
177    addr(idx) := shift_addr(0x38021000)
178    cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B; cfg(idx).x := true.B
179    idx = idx - 1
180
181    addr(idx) := shift_addr(0x38020000)
182    cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B
183    idx = idx - 1
184
185    addr(idx) := shift_addr( 0x30050000)
186    cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B
187    idx = idx - 1
188
189    addr(idx) := shift_addr( 0x30010000)
190    cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B
191    idx = idx - 1
192
193    addr(idx) := shift_addr( 0x20000000)
194    cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B; cfg(idx).x := true.B
195    idx = idx - 1
196
197    addr(idx) := shift_addr( 0x10000000)
198    cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B
199    idx = idx - 1
200
201    require(idx >= 0)
202    addr(idx) := shift_addr(0)
203
204    val cfgInitMerge = cfg.asTypeOf(Vec(num/8, UInt(PMXLEN.W)))
205    (cfgInitMerge, addr, mask)
206  }
207
208  def get_napot(base: BigInt, range: BigInt) = {
209    val PlatformGrainBytes = (1 << PlatformGrain)
210    if ((base % PlatformGrainBytes) != 0) {
211      println("base:%x", base)
212    }
213    if ((range % PlatformGrainBytes) != 0) {
214      println("range: %x", range)
215    }
216    require((base % PlatformGrainBytes) == 0)
217    require((range % PlatformGrainBytes) == 0)
218
219    ((base + (range/2 - 1)) >> PMPOffBits).U
220  }
221
222  def match_mask(paddr: UInt, cfg: PMPConfig) = {
223    val match_mask_addr: UInt = Cat(paddr, cfg.a(0)).asUInt() | (((1 << PlatformGrain) - 1) >> PMPOffBits).U((paddr.getWidth + 1).W)
224    Cat(match_mask_addr & ~(match_mask_addr + 1.U), ((1 << PMPOffBits) - 1).U(PMPOffBits.W))
225  }
226
227  def shift_addr(addr: BigInt) = {
228    (addr >> 2).U
229  }
230}
231
232trait PMACheckMethod extends PMPConst {
233  def pma_check(cmd: UInt, cfg: PMPConfig) = {
234    val resp = Wire(new PMPRespBundle)
235    resp.ld := TlbCmd.isRead(cmd) && !TlbCmd.isAtom(cmd) && !cfg.r
236    resp.st := (TlbCmd.isWrite(cmd) || TlbCmd.isAtom(cmd) && cfg.atomic) && !cfg.w
237    resp.instr := TlbCmd.isExec(cmd) && !cfg.x
238    resp.mmio := !cfg.c
239    resp
240  }
241
242  def pma_match_res(leaveHitMux: Boolean = false, valid: Bool = true.B)(
243    addr: UInt,
244    size: UInt,
245    pmaEntries: Vec[PMPEntry],
246    mode: UInt,
247    lgMaxSize: Int
248  ) = {
249    val num = pmaEntries.size
250    require(num == NumPMA)
251    // pma should always be checked, could not be ignored
252    // like amo and cached, it is the attribute not protection
253    // so it must have initialization.
254    require(!pmaEntries.isEmpty)
255
256    val pmaDefault = WireInit(0.U.asTypeOf(new PMPEntry()))
257    val match_vec = Wire(Vec(num+1, Bool()))
258    val cfg_vec = Wire(Vec(num+1, new PMPEntry()))
259
260    pmaEntries.zip(pmaDefault +: pmaEntries.take(num-1)).zipWithIndex.foreach{ case ((pma, last_pma), i) =>
261      val is_match = pma.is_match(addr, size, lgMaxSize, last_pma)
262      val aligned = pma.aligned(addr, size, lgMaxSize, last_pma)
263
264      val cur = WireInit(pma)
265      cur.cfg.r := aligned && pma.cfg.r
266      cur.cfg.w := aligned && pma.cfg.w
267      cur.cfg.x := aligned && pma.cfg.x
268      cur.cfg.atomic := aligned && pma.cfg.atomic
269      cur.cfg.c := aligned && pma.cfg.c
270
271      match_vec(i) := is_match
272      cfg_vec(i) := cur
273    }
274
275    match_vec(num) := true.B
276    cfg_vec(num) := pmaDefault
277    if (leaveHitMux) {
278      ParallelPriorityMux(match_vec.map(RegEnable(_, init = false.B, valid)), RegEnable(cfg_vec, valid))
279    } else {
280      ParallelPriorityMux(match_vec, cfg_vec)
281    }
282  }
283}
284