xref: /XiangShan/src/main/scala/xiangshan/backend/fu/PMA.scala (revision 24bb726d80e7b0ea2ad2c685838b3a749ec0178d)
1/***************************************************************************************
2 * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3 * Copyright (c) 2020-2021 Peng Cheng Laboratory
4 *
5 * XiangShan is licensed under Mulan PSL v2.
6 * You can use this software according to the terms and conditions of the Mulan PSL v2.
7 * You may obtain a copy of Mulan PSL v2 at:
8 *          http://license.coscl.org.cn/MulanPSL2
9 *
10 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13 *
14 * See the Mulan PSL v2 for more details.
15 ***************************************************************************************/
16
17package xiangshan.backend.fu
18
19import chisel3._
20import chisel3.util._
21import freechips.rocketchip.regmapper.{RegField, RegFieldDesc, RegReadFn, RegWriteFn}
22import utility.{ParallelPriorityMux, ValidHold, ZeroExt}
23import xiangshan.cache.mmu.TlbCmd
24
25import scala.collection.mutable.ListBuffer
26
27/* Memory Mapped PMA */
28case class MMPMAConfig
29(
30  address: BigInt,
31  mask: BigInt,
32  lgMaxSize: Int,
33  sameCycle: Boolean,
34  num: Int
35)
36
37trait PMAConst extends PMPConst
38
39trait MMPMAMethod extends PMAConst with PMAMethod with PMPReadWriteMethodBare {
40  def gen_mmpma_mapping(num: Int) = {
41    val pmaCfgPerCSR = PMXLEN / new PMPConfig().getWidth
42    def pmaCfgLogicIndex(i: Int) = (PMXLEN / 32) * (i / pmaCfgPerCSR)
43    def pmaCfgIndex(i: Int) = (i / pmaCfgPerCSR)
44
45    val pma = Wire(Vec(num, new PMPEntry))
46
47    /* pma init value */
48    val init_value = pma_init()
49
50    val pmaCfgMerged = RegInit(init_value._1)
51    val addr = RegInit(init_value._2)
52    val mask = RegInit(init_value._3)
53    val cfg = WireInit(pmaCfgMerged).asTypeOf(Vec(num, new PMPConfig()))
54    //  pmaMask are implicit regs that just used for timing optimization
55    for (i <- pma.indices) {
56      pma(i).gen(cfg(i), addr(i), mask(i))
57    }
58
59    val blankCfg = PMXLEN == 32
60    val cfg_index_wrapper = (0 until num by 4).zip((0 until num by 4).map(a => blankCfg || (a % pmaCfgPerCSR == 0)))
61    val cfg_map = (cfg_index_wrapper).map{ case(i, notempty) => {
62//      println(s"tlbpma i:$i notempty:$notempty")
63      RegField.apply(n = PMXLEN, r = RegReadFn{(ivalid, oready) =>
64        val r_ready = Wire(Bool())
65        val o_valid = Wire(Bool())
66        val v_reg = ValidHold(r_ready && ivalid, o_valid && oready, false.B)
67        r_ready := !v_reg
68        o_valid := v_reg
69
70        if (notempty) { (r_ready, o_valid, pmaCfgMerged(pmaCfgIndex(i))) }
71        else { (r_ready, o_valid, 0.U) }
72      }, w = RegWriteFn((valid, data) => {
73        if (notempty) { when (valid) { pmaCfgMerged(pmaCfgIndex(i)) := write_cfg_vec(mask, addr, i, pmaCfgMerged(pmaCfgIndex(i)))(data) } }
74        true.B
75      }), desc = RegFieldDesc(s"MMPMA_config_${i}", s"pma config register #${i}"))
76    }}
77
78    val addr_map = (0 until num).map{ i => {
79      val next_cfg = if (i == 0) 0.U.asTypeOf(new PMPConfig()) else cfg(i-1)
80      RegField(
81        n = PMXLEN,
82        r = ZeroExt(read_addr(cfg(i))(addr(i)), PMXLEN),
83        w = RegWriteFn((valid, data) => {
84          when (valid) { addr(i) := write_addr(next_cfg, mask(i))(data(addr(0).getWidth-1, 0), cfg(i), addr(i))}
85          true.B
86        }),
87        desc = RegFieldDesc(s"MMPMA_addr_${i}", s"pma addr register #${i}")
88      )
89    }}
90
91    (cfg_map, addr_map, pma)
92  }
93
94}
95
96trait PMAMethod extends PMAConst {
97  /**
98  def SimpleMemMapList = List(
99      //     Base address      Top address       Width  Description    Mode (RWXIDSAC)
100      MemMap("h00_0000_0000", "h00_0FFF_FFFF",   "h0", "Reserved",    "RW"),
101      MemMap("h00_1000_0000", "h00_1FFF_FFFF",   "h0", "QSPI_Flash",  "RWX"),
102      MemMap("h00_2000_0000", "h00_2FFF_FFFF",   "h0", "Reserved",    "RW"),
103      MemMap("h00_3000_0000", "h00_3000_FFFF",   "h0", "DMA",         "RW"),
104      MemMap("h00_3001_0000", "h00_3004_FFFF",   "h0", "GPU",         "RWC"),
105      MemMap("h00_3005_0000", "h00_3006_FFFF",   "h0", "USB/SDMMC",   "RW"),
106      MemMap("h00_3007_0000", "h00_30FF_FFFF",   "h0", "Reserved",    "RW"),
107      MemMap("h00_3100_0000", "h00_3111_FFFF",   "h0", "MMIO",        "RW"),
108      MemMap("h00_3112_0000", "h00_37FF_FFFF",   "h0", "Reserved",    "RW"),
109      MemMap("h00_3800_0000", "h00_3800_FFFF",   "h0", "CLINT",       "RW"),
110      MemMap("h00_3801_0000", "h00_3801_FFFF",   "h0", "BEU",         "RW"),
111      MemMap("h00_3802_0000", "h00_3802_0FFF",   "h0", "DebugModule", "RWX"),
112      MemMap("h00_3802_1000", "h00_3802_1FFF",   "h0", "MMPMA",       "RW"),
113      MemMap("h00_3802_2000", "h00_3900_0000",   "h0", "Reserved",    ""),
114      MemMap("h00_3900_0000", "h00_3900_1FFF",   "h0", "L3CacheCtrl",  "RW"),
115      MemMap("h00_3900_2000", "h00_39FF_FFFF",   "h0", "Reserved",    ""),
116      MemMap("h00_3A00_0000", "h00_3FFF_FFFF",   "h0", "",            "RW),
117         Sub("h00_3A00_0000", "h00_3A00_0FFF",   "h0", "PLL0",        "RW),
118         Sub('h00_3A00_1000", "h00_3A7F_FFFF",   "h0", "Reserved",    "RW"),
119         Sub('h00_3A80_0000", "h00_3AFF_FFFF",   "h0", "IMSIC(M)",    "RW"),
120         Sub('h00_3B00_0000", "h00_3BFF_FFFF",   "h0", "IMSIC(S/VS)", "RW"),
121         Sub("h00_3C00_0000", "h00_3FFF_FFFF",   "h0", "PLIC",        "RW"),
122      MemMap("h00_4000_0000", "h00_7FFF_FFFF",   "h0", "PCIe",        "RW"),
123      MemMap("h00_8000_0000", " MAX_ADDRESS ",   "h0", "DDR",         "RWXIDSA"),
124    )
125   */
126
127  def pma_init() : (Vec[UInt], Vec[UInt], Vec[UInt]) = {
128    def genAddr(init_addr: BigInt) = {
129      init_addr.U((PMPAddrBits - PMPOffBits).W)
130    }
131    def genMask(init_addr: BigInt, a: BigInt) = {
132      val match_mask_addr = (init_addr << 1) | (a & 0x1) | (((1 << PlatformGrain) - 1) >> PMPOffBits)
133      val mask = ((match_mask_addr & ~(match_mask_addr + 1)) << PMPOffBits) | ((1 << PMPOffBits) - 1)
134      mask.U(PMPAddrBits.W)
135    }
136
137    val num = NumPMA
138    require(num >= 16)
139
140    val cfg_list = ListBuffer[UInt]()
141    val addr_list = ListBuffer[UInt]()
142    val mask_list = ListBuffer[UInt]()
143    def addPMA(base_addr: BigInt,
144               range: BigInt = 0L, // only use for napot mode
145               l: Boolean = false,
146               c: Boolean = false,
147               atomic: Boolean = false,
148               a: Int = 0,
149               x: Boolean = false,
150               w: Boolean = false,
151               r: Boolean = false) = {
152      val addr = if (a < 2) { shift_addr(base_addr) }
153        else { get_napot(base_addr, range) }
154      cfg_list.append(PMPConfigUInt(l, c, atomic, a, x, w, r))
155      addr_list.append(genAddr(addr))
156      mask_list.append(genMask(addr, a))
157    }
158
159    addPMA(0x0L, range = 0x1000000000000L, c = true, atomic = true, a = 3, x = true, w = true, r = true)
160    addPMA(0x0L, range = 0x80000000L, a = 3, w = true, r = true)
161    addPMA(0x3A000000L, a = 1)
162    addPMA(0x39002000L, a = 1, w = true, r = true)
163    addPMA(0x39000000L, a = 1)
164    addPMA(0x38022000L, a = 1, w = true, r = true)
165    addPMA(0x38021000L, a = 1, x = true, w = true, r = true)
166    addPMA(0x38020000L, a = 1, w = true, r = true)
167    addPMA(0x30050000L, a = 1, w = true, r = true) // FIXME: GPU space is cacheable?
168    addPMA(0x30010000L, a = 1, w = true, r = true)
169    addPMA(0x20000000L, a = 1, x = true, w = true, r = true)
170    addPMA(0x10000000L, a = 1, w = true, r = true)
171    addPMA(0)
172    while (cfg_list.length < 16) {
173      addPMA(0)
174    }
175
176    val cfgInitMerge = Seq.tabulate(num / 8)(i => {
177      cfg_list.reverse.drop(8 * i).take(8).foldRight(BigInt(0L)) { case (a, result) =>
178        (result << a.getWidth) | a.litValue
179      }.U(PMXLEN.W)
180    })
181    val addr = addr_list.reverse
182    val mask = mask_list.reverse
183    (VecInit(cfgInitMerge), VecInit(addr.toSeq), VecInit(mask.toSeq))
184  }
185
186  def get_napot(base: BigInt, range: BigInt): BigInt = {
187    val PlatformGrainBytes = (1 << PlatformGrain)
188    if ((base % PlatformGrainBytes) != 0) {
189      println("base:%x", base)
190    }
191    if ((range % PlatformGrainBytes) != 0) {
192      println("range: %x", range)
193    }
194    require((base % PlatformGrainBytes) == 0)
195    require((range % PlatformGrainBytes) == 0)
196
197    ((base + (range/2 - 1)) >> PMPOffBits)
198  }
199
200  def match_mask(paddr: UInt, cfg: PMPConfig) = {
201    val match_mask_addr: UInt = Cat(paddr, cfg.a(0)).asUInt | (((1 << PlatformGrain) - 1) >> PMPOffBits).U((paddr.getWidth + 1).W)
202    Cat(match_mask_addr & ~(match_mask_addr + 1.U), ((1 << PMPOffBits) - 1).U(PMPOffBits.W))
203  }
204
205  def shift_addr(addr: BigInt) = {
206    addr >> 2
207  }
208}
209
210trait PMACheckMethod extends PMPConst {
211  def pma_check(cmd: UInt, cfg: PMPConfig) = {
212    val resp = Wire(new PMPRespBundle)
213    resp.ld := TlbCmd.isRead(cmd) && !TlbCmd.isAmo(cmd) && !cfg.r
214    resp.st := (TlbCmd.isWrite(cmd) || TlbCmd.isAmo(cmd) && cfg.atomic) && !cfg.w
215    resp.instr := TlbCmd.isExec(cmd) && !cfg.x
216    resp.mmio := !cfg.c
217    resp.atomic := cfg.atomic
218    resp
219  }
220
221  def pma_match_res(leaveHitMux: Boolean = false, valid: Bool = true.B)(
222    addr: UInt,
223    size: UInt,
224    pmaEntries: Vec[PMPEntry],
225    mode: UInt,
226    lgMaxSize: Int
227  ) = {
228    val num = pmaEntries.size
229    require(num == NumPMA)
230    // pma should always be checked, could not be ignored
231    // like amo and cached, it is the attribute not protection
232    // so it must have initialization.
233    require(!pmaEntries.isEmpty)
234
235    val pmaDefault = WireInit(0.U.asTypeOf(new PMPEntry()))
236    val match_vec = Wire(Vec(num+1, Bool()))
237    val cfg_vec = Wire(Vec(num+1, new PMPEntry()))
238
239    pmaEntries.zip(pmaDefault +: pmaEntries.take(num-1)).zipWithIndex.foreach{ case ((pma, last_pma), i) =>
240      val is_match = pma.is_match(addr, size, lgMaxSize, last_pma)
241      val aligned = pma.aligned(addr, size, lgMaxSize, last_pma)
242
243      val cur = WireInit(pma)
244      cur.cfg.r := aligned && pma.cfg.r
245      cur.cfg.w := aligned && pma.cfg.w
246      cur.cfg.x := aligned && pma.cfg.x
247      cur.cfg.atomic := aligned && pma.cfg.atomic
248      cur.cfg.c := aligned && pma.cfg.c
249
250      match_vec(i) := is_match
251      cfg_vec(i) := cur
252    }
253
254    match_vec(num) := true.B
255    cfg_vec(num) := pmaDefault
256    if (leaveHitMux) {
257      ParallelPriorityMux(match_vec.map(RegEnable(_, false.B, valid)), RegEnable(cfg_vec, valid))
258    } else {
259      ParallelPriorityMux(match_vec, cfg_vec)
260    }
261  }
262}
263