1/*************************************************************************************** 2 * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3 * Copyright (c) 2020-2021 Peng Cheng Laboratory 4 * 5 * XiangShan is licensed under Mulan PSL v2. 6 * You can use this software according to the terms and conditions of the Mulan PSL v2. 7 * You may obtain a copy of Mulan PSL v2 at: 8 * http://license.coscl.org.cn/MulanPSL2 9 * 10 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13 * 14 * See the Mulan PSL v2 for more details. 15 ***************************************************************************************/ 16 17package xiangshan.backend.fu 18 19import chisel3._ 20import chisel3.util._ 21import freechips.rocketchip.regmapper.{RegField, RegFieldDesc, RegReadFn, RegWriteFn} 22import utility.{ParallelPriorityMux, ValidHold, ZeroExt} 23import xiangshan.cache.mmu.TlbCmd 24 25import scala.collection.mutable.ListBuffer 26 27/* Memory Mapped PMA */ 28case class MMPMAConfig 29( 30 address: BigInt, 31 mask: BigInt, 32 lgMaxSize: Int, 33 sameCycle: Boolean, 34 num: Int 35) 36 37trait PMAConst extends PMPConst 38 39trait MMPMAMethod extends PMAConst with PMAMethod with PMPReadWriteMethodBare { 40 def gen_mmpma_mapping(num: Int) = { 41 val pmaCfgPerCSR = PMXLEN / new PMPConfig().getWidth 42 def pmaCfgLogicIndex(i: Int) = (PMXLEN / 32) * (i / pmaCfgPerCSR) 43 def pmaCfgIndex(i: Int) = (i / pmaCfgPerCSR) 44 45 val pma = Wire(Vec(num, new PMPEntry)) 46 47 /* pma init value */ 48 val init_value = pma_init() 49 50 val pmaCfgMerged = RegInit(init_value._1) 51 val addr = RegInit(init_value._2) 52 val mask = RegInit(init_value._3) 53 val cfg = WireInit(pmaCfgMerged).asTypeOf(Vec(num, new PMPConfig())) 54 // pmaMask are implicit regs that just used for timing optimization 55 for (i <- pma.indices) { 56 pma(i).gen(cfg(i), addr(i), mask(i)) 57 } 58 59 val blankCfg = PMXLEN == 32 60 val cfg_index_wrapper = (0 until num by 4).zip((0 until num by 4).map(a => blankCfg || (a % pmaCfgPerCSR == 0))) 61 val cfg_map = (cfg_index_wrapper).map{ case(i, notempty) => { 62// println(s"tlbpma i:$i notempty:$notempty") 63 RegField.apply(n = PMXLEN, r = RegReadFn{(ivalid, oready) => 64 val r_ready = Wire(Bool()) 65 val o_valid = Wire(Bool()) 66 val v_reg = ValidHold(r_ready && ivalid, o_valid && oready, false.B) 67 r_ready := !v_reg 68 o_valid := v_reg 69 70 if (notempty) { (r_ready, o_valid, pmaCfgMerged(pmaCfgIndex(i))) } 71 else { (r_ready, o_valid, 0.U) } 72 }, w = RegWriteFn((valid, data) => { 73 if (notempty) { when (valid) { pmaCfgMerged(pmaCfgIndex(i)) := write_cfg_vec(mask, addr, i, pmaCfgMerged(pmaCfgIndex(i)))(data) } } 74 true.B 75 }), desc = RegFieldDesc(s"MMPMA_config_${i}", s"pma config register #${i}")) 76 }} 77 78 val addr_map = (0 until num).map{ i => { 79 val next_cfg = if (i == 0) 0.U.asTypeOf(new PMPConfig()) else cfg(i-1) 80 RegField( 81 n = PMXLEN, 82 r = ZeroExt(read_addr(cfg(i))(addr(i)), PMXLEN), 83 w = RegWriteFn((valid, data) => { 84 when (valid) { addr(i) := write_addr(next_cfg, mask(i))(data(addr(0).getWidth-1, 0), cfg(i), addr(i))} 85 true.B 86 }), 87 desc = RegFieldDesc(s"MMPMA_addr_${i}", s"pma addr register #${i}") 88 ) 89 }} 90 91 (cfg_map, addr_map, pma) 92 } 93 94} 95 96trait PMAMethod extends PMAConst { 97 /** 98 def SimpleMemMapList = List( 99 // Base address Top address Width Description Mode (RWXIDSAC) 100 MemMap("h00_0000_0000", "h00_0FFF_FFFF", "h0", "Reserved", "RW"), 101 MemMap("h00_1000_0000", "h00_1FFF_FFFF", "h0", "QSPI_Flash", "RWX"), 102 MemMap("h00_2000_0000", "h00_2FFF_FFFF", "h0", "Reserved", "RW"), 103 MemMap("h00_3000_0000", "h00_3000_FFFF", "h0", "DMA", "RW"), 104 MemMap("h00_3001_0000", "h00_3004_FFFF", "h0", "GPU", "RWC"), 105 MemMap("h00_3005_0000", "h00_3006_FFFF", "h0", "USB/SDMMC", "RW"), 106 MemMap("h00_3007_0000", "h00_30FF_FFFF", "h0", "Reserved", "RW"), 107 MemMap("h00_3100_0000", "h00_3111_FFFF", "h0", "MMIO", "RW"), 108 MemMap("h00_3112_0000", "h00_37FF_FFFF", "h0", "Reserved", "RW"), 109 MemMap("h00_3800_0000", "h00_3800_FFFF", "h0", "CLINT", "RW"), 110 MemMap("h00_3801_0000", "h00_3801_FFFF", "h0", "BEU", "RW"), 111 MemMap("h00_3802_0000", "h00_3802_0FFF", "h0", "DebugModule", "RWX"), 112 MemMap("h00_3802_1000", "h00_3802_1FFF", "h0", "MMPMA", "RW"), 113 MemMap("h00_3802_2000", "h00_3900_0000", "h0", "Reserved", ""), 114 MemMap("h00_3900_0000", "h00_3900_1FFF", "h0", "L3CacheCtrl", "RW"), 115 MemMap("h00_3900_2000", "h00_39FF_FFFF", "h0", "Reserved", ""), 116 MemMap("h00_3A00_0000", "h00_3A00_0FFF", "h0", "PLL0", "RW), 117 MemMap('h00_3A00_1000", "h00_3BFF_FFFF", "h0", "Reserved", ""), 118 MemMap("h00_3C00_0000", "h00_3FFF_FFFF", "h0", "PLIC", "RW"), 119 MemMap("h00_4000_0000", "h00_7FFF_FFFF", "h0", "PCIe", "RW"), 120 MemMap("h00_8000_0000", "h0F_FFFF_FFFF", "h0", "DDR", "RWXIDSA"), 121 ) 122 */ 123 124 def pma_init() : (Vec[UInt], Vec[UInt], Vec[UInt]) = { 125 def genAddr(init_addr: BigInt) = { 126 init_addr.U((PMPAddrBits - PMPOffBits).W) 127 } 128 def genMask(init_addr: BigInt, a: BigInt) = { 129 val match_mask_addr = (init_addr << 1) | (a & 0x1) | (((1 << PlatformGrain) - 1) >> PMPOffBits) 130 val mask = ((match_mask_addr & ~(match_mask_addr + 1)) << PMPOffBits) | ((1 << PMPOffBits) - 1) 131 mask.U(PMPAddrBits.W) 132 } 133 134 val num = NumPMA 135 require(num >= 16) 136 137 val cfg_list = ListBuffer[UInt]() 138 val addr_list = ListBuffer[UInt]() 139 val mask_list = ListBuffer[UInt]() 140 def addPMA(base_addr: BigInt, 141 range: BigInt = 0L, // only use for napot mode 142 l: Boolean = false, 143 c: Boolean = false, 144 atomic: Boolean = false, 145 a: Int = 0, 146 x: Boolean = false, 147 w: Boolean = false, 148 r: Boolean = false) = { 149 val addr = if (a < 2) { shift_addr(base_addr) } 150 else { get_napot(base_addr, range) } 151 cfg_list.append(PMPConfigUInt(l, c, atomic, a, x, w, r)) 152 addr_list.append(genAddr(addr)) 153 mask_list.append(genMask(addr, a)) 154 } 155 156 addPMA(0x0L, range = 0x1000000000L, c = true, atomic = true, a = 3, x = true, w = true, r = true) 157 addPMA(0x0L, range = 0x80000000L, a = 3, w = true, r = true) 158 addPMA(0x3C000000L, a = 1) 159 addPMA(0x3A001000L, a = 1, w = true, r = true) 160 addPMA(0x3A000000L, a = 1) 161 addPMA(0x39002000L, a = 1, w = true, r = true) 162 addPMA(0x39000000L, a = 1) 163 addPMA(0x38022000L, a = 1, w = true, r = true) 164 addPMA(0x38021000L, a = 1, x = true, w = true, r = true) 165 addPMA(0x38020000L, a = 1, w = true, r = true) 166 addPMA(0x30050000L, a = 1, w = true, r = true) // FIXME: GPU space is cacheable? 167 addPMA(0x30010000L, a = 1, w = true, r = true) 168 addPMA(0x20000000L, a = 1, x = true, w = true, r = true) 169 addPMA(0x10000000L, a = 1, w = true, r = true) 170 addPMA(0) 171 while (cfg_list.length < 16) { 172 addPMA(0) 173 } 174 175 val cfgInitMerge = Seq.tabulate(num / 8)(i => { 176 cfg_list.reverse.drop(8 * i).take(8).foldRight(BigInt(0L)) { case (a, result) => 177 (result << a.getWidth) | a.litValue 178 }.U(PMXLEN.W) 179 }) 180 val addr = addr_list.reverse 181 val mask = mask_list.reverse 182 (VecInit(cfgInitMerge), VecInit(addr.toSeq), VecInit(mask.toSeq)) 183 } 184 185 def get_napot(base: BigInt, range: BigInt): BigInt = { 186 val PlatformGrainBytes = (1 << PlatformGrain) 187 if ((base % PlatformGrainBytes) != 0) { 188 println("base:%x", base) 189 } 190 if ((range % PlatformGrainBytes) != 0) { 191 println("range: %x", range) 192 } 193 require((base % PlatformGrainBytes) == 0) 194 require((range % PlatformGrainBytes) == 0) 195 196 ((base + (range/2 - 1)) >> PMPOffBits) 197 } 198 199 def match_mask(paddr: UInt, cfg: PMPConfig) = { 200 val match_mask_addr: UInt = Cat(paddr, cfg.a(0)).asUInt | (((1 << PlatformGrain) - 1) >> PMPOffBits).U((paddr.getWidth + 1).W) 201 Cat(match_mask_addr & ~(match_mask_addr + 1.U), ((1 << PMPOffBits) - 1).U(PMPOffBits.W)) 202 } 203 204 def shift_addr(addr: BigInt) = { 205 addr >> 2 206 } 207} 208 209trait PMACheckMethod extends PMPConst { 210 def pma_check(cmd: UInt, cfg: PMPConfig) = { 211 val resp = Wire(new PMPRespBundle) 212 resp.ld := TlbCmd.isRead(cmd) && !TlbCmd.isAmo(cmd) && !cfg.r 213 resp.st := (TlbCmd.isWrite(cmd) || TlbCmd.isAmo(cmd) && cfg.atomic) && !cfg.w 214 resp.instr := TlbCmd.isExec(cmd) && !cfg.x 215 resp.mmio := !cfg.c 216 resp.atomic := cfg.atomic 217 resp 218 } 219 220 def pma_match_res(leaveHitMux: Boolean = false, valid: Bool = true.B)( 221 addr: UInt, 222 size: UInt, 223 pmaEntries: Vec[PMPEntry], 224 mode: UInt, 225 lgMaxSize: Int 226 ) = { 227 val num = pmaEntries.size 228 require(num == NumPMA) 229 // pma should always be checked, could not be ignored 230 // like amo and cached, it is the attribute not protection 231 // so it must have initialization. 232 require(!pmaEntries.isEmpty) 233 234 val pmaDefault = WireInit(0.U.asTypeOf(new PMPEntry())) 235 val match_vec = Wire(Vec(num+1, Bool())) 236 val cfg_vec = Wire(Vec(num+1, new PMPEntry())) 237 238 pmaEntries.zip(pmaDefault +: pmaEntries.take(num-1)).zipWithIndex.foreach{ case ((pma, last_pma), i) => 239 val is_match = pma.is_match(addr, size, lgMaxSize, last_pma) 240 val aligned = pma.aligned(addr, size, lgMaxSize, last_pma) 241 242 val cur = WireInit(pma) 243 cur.cfg.r := aligned && pma.cfg.r 244 cur.cfg.w := aligned && pma.cfg.w 245 cur.cfg.x := aligned && pma.cfg.x 246 cur.cfg.atomic := aligned && pma.cfg.atomic 247 cur.cfg.c := aligned && pma.cfg.c 248 249 match_vec(i) := is_match 250 cfg_vec(i) := cur 251 } 252 253 match_vec(num) := true.B 254 cfg_vec(num) := pmaDefault 255 if (leaveHitMux) { 256 ParallelPriorityMux(match_vec.map(RegEnable(_, false.B, valid)), RegEnable(cfg_vec, valid)) 257 } else { 258 ParallelPriorityMux(match_vec, cfg_vec) 259 } 260 } 261} 262