1ca2f90a6SLemover/*************************************************************************************** 2ca2f90a6SLemover * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3ca2f90a6SLemover * Copyright (c) 2020-2021 Peng Cheng Laboratory 4ca2f90a6SLemover * 5ca2f90a6SLemover * XiangShan is licensed under Mulan PSL v2. 6ca2f90a6SLemover * You can use this software according to the terms and conditions of the Mulan PSL v2. 7ca2f90a6SLemover * You may obtain a copy of Mulan PSL v2 at: 8ca2f90a6SLemover * http://license.coscl.org.cn/MulanPSL2 9ca2f90a6SLemover * 10ca2f90a6SLemover * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11ca2f90a6SLemover * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12ca2f90a6SLemover * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13ca2f90a6SLemover * 14ca2f90a6SLemover * See the Mulan PSL v2 for more details. 15ca2f90a6SLemover ***************************************************************************************/ 16ca2f90a6SLemover 17ca2f90a6SLemoverpackage xiangshan.backend.fu 18ca2f90a6SLemover 19ca2f90a6SLemoverimport chisel3._ 20ca2f90a6SLemoverimport chisel3.util._ 21*98c71602SJiawei Linimport freechips.rocketchip.regmapper.{RegField, RegFieldDesc, RegReadFn, RegWriteFn} 22*98c71602SJiawei Linimport utils.{ParallelPriorityMux, ZeroExt} 23ca2f90a6SLemoverimport xiangshan.cache.mmu.TlbCmd 24ca2f90a6SLemover 25*98c71602SJiawei Lin/* Memory Mapped PMA */ 26*98c71602SJiawei Lincase class MMPMAConfig 27*98c71602SJiawei Lin( 28*98c71602SJiawei Lin address: BigInt, 29*98c71602SJiawei Lin mask: BigInt, 30*98c71602SJiawei Lin lgMaxSize: Int, 31*98c71602SJiawei Lin sameCycle: Boolean, 32*98c71602SJiawei Lin num: Int 33*98c71602SJiawei Lin) 34*98c71602SJiawei Lin 35*98c71602SJiawei Lintrait PMAConst extends PMPConst 36*98c71602SJiawei Lin 37*98c71602SJiawei Lintrait MMPMAMethod extends PMAConst with PMAMethod with PMPReadWriteMethodBare { 38*98c71602SJiawei Lin def gen_mmpma_mapping(num: Int) = { 39*98c71602SJiawei Lin val pmaCfgPerCSR = PMXLEN / new PMPConfig().getWidth 40*98c71602SJiawei Lin def pmaCfgLogicIndex(i: Int) = (PMXLEN / 32) * (i / pmaCfgPerCSR) 41*98c71602SJiawei Lin def pmaCfgIndex(i: Int) = (i / pmaCfgPerCSR) 42*98c71602SJiawei Lin 43*98c71602SJiawei Lin val pma = Wire(Vec(num, new PMPEntry)) 44*98c71602SJiawei Lin 45*98c71602SJiawei Lin /* pma init value */ 46*98c71602SJiawei Lin val init_value = pma_init() 47*98c71602SJiawei Lin 48*98c71602SJiawei Lin val pmaCfgMerged = RegInit(init_value._1) 49*98c71602SJiawei Lin val addr = RegInit(init_value._2) 50*98c71602SJiawei Lin val mask = RegInit(init_value._3) 51*98c71602SJiawei Lin val cfg = WireInit(pmaCfgMerged).asTypeOf(Vec(num, new PMPConfig())) 52*98c71602SJiawei Lin // pmaMask are implicit regs that just used for timing optimization 53*98c71602SJiawei Lin for (i <- pma.indices) { 54*98c71602SJiawei Lin pma(i).gen(cfg(i), addr(i), mask(i)) 55*98c71602SJiawei Lin } 56*98c71602SJiawei Lin 57*98c71602SJiawei Lin val blankCfg = PMXLEN == 32 58*98c71602SJiawei Lin val cfg_index_wrapper = (0 until num by 4).zip((0 until num by 4).map(a => blankCfg || (a % pmaCfgPerCSR == 0))) 59*98c71602SJiawei Lin val cfg_map = (cfg_index_wrapper).map{ case(i, notempty) => { 60*98c71602SJiawei Lin// println(s"tlbpma i:$i notempty:$notempty") 61*98c71602SJiawei Lin RegField.apply(n = PMXLEN, r = RegReadFn((ivalid, oready) => 62*98c71602SJiawei Lin if (notempty) { (true.B, ivalid, pmaCfgMerged(pmaCfgIndex(i))) } 63*98c71602SJiawei Lin else { (true.B, ivalid, 0.U) } 64*98c71602SJiawei Lin ), w = RegWriteFn((valid, data) => { 65*98c71602SJiawei Lin if (notempty) { when (valid) { pmaCfgMerged(pmaCfgIndex(i)) := write_cfg_vec(mask, addr, i)(data) } } 66*98c71602SJiawei Lin true.B 67*98c71602SJiawei Lin }), desc = RegFieldDesc(s"MMPMA_config_${i}", s"pma config register #${i}")) 68*98c71602SJiawei Lin }} 69*98c71602SJiawei Lin 70*98c71602SJiawei Lin val addr_map = (0 until num).map{ i => { 71*98c71602SJiawei Lin val next_cfg = if (i == 0) 0.U.asTypeOf(new PMPConfig()) else cfg(i-1) 72*98c71602SJiawei Lin RegField( 73*98c71602SJiawei Lin n = PMXLEN, 74*98c71602SJiawei Lin r = ZeroExt(read_addr(cfg(i))(addr(i)), PMXLEN), 75*98c71602SJiawei Lin w = RegWriteFn((valid, data) => { 76*98c71602SJiawei Lin when (valid) { addr(i) := write_addr(next_cfg, mask(i))(data(addr(0).getWidth-1, 0), cfg(i), addr(i))} 77*98c71602SJiawei Lin true.B 78*98c71602SJiawei Lin }), 79*98c71602SJiawei Lin desc = RegFieldDesc(s"MMPMA_addr_${i}", s"pma addr register #${i}") 80*98c71602SJiawei Lin ) 81*98c71602SJiawei Lin }} 82*98c71602SJiawei Lin 83*98c71602SJiawei Lin (cfg_map, addr_map, pma) 84*98c71602SJiawei Lin } 85*98c71602SJiawei Lin 86*98c71602SJiawei Lin} 87*98c71602SJiawei Lin 88*98c71602SJiawei Lintrait PMAMethod extends PMAConst { 89ca2f90a6SLemover /** 90ca2f90a6SLemover def SimpleMemMapList = List( 91ca2f90a6SLemover // Base address Top address Width Description Mode (RWXIDSAC) 92ca2f90a6SLemover MemMap("h00_0000_0000", "h00_0FFF_FFFF", "h0", "Reserved", "RW"), 93ca2f90a6SLemover MemMap("h00_1000_0000", "h00_1FFF_FFFF", "h0", "QSPI_Flash", "RWX"), 94ca2f90a6SLemover MemMap("h00_2000_0000", "h00_2FFF_FFFF", "h0", "Reserved", "RW"), 95ca2f90a6SLemover MemMap("h00_3000_0000", "h00_3000_FFFF", "h0", "DMA", "RW"), 96ca2f90a6SLemover MemMap("h00_3001_0000", "h00_3004_FFFF", "h0", "GPU", "RWC"), 97ca2f90a6SLemover MemMap("h00_3005_0000", "h00_3006_FFFF", "h0", "USB/SDMMC", "RW"), 98ca2f90a6SLemover MemMap("h00_3007_0000", "h00_30FF_FFFF", "h0", "Reserved", "RW"), 99ca2f90a6SLemover MemMap("h00_3100_0000", "h00_3111_FFFF", "h0", "MMIO", "RW"), 100ca2f90a6SLemover MemMap("h00_3112_0000", "h00_37FF_FFFF", "h0", "Reserved", "RW"), 101ca2f90a6SLemover MemMap("h00_3800_0000", "h00_3800_FFFF", "h0", "CLINT", "RW"), 102ca2f90a6SLemover MemMap("h00_3801_0000", "h00_3801_FFFF", "h0", "BEU", "RW"), 103ca2f90a6SLemover MemMap("h00_3802_0000", "h00_3802_0FFF", "h0", "DebugModule", "RWX"), 104*98c71602SJiawei Lin MemMap("h00_3802_1000", "h00_3802_11FF", "h0", "MMPMA", "RW"), 105*98c71602SJiawei Lin MemMap("h00_3802_1200", "h00_3900_0FFF", "h0", "Reserved", ""), 106*98c71602SJiawei Lin MemMap("h00_3900_1000", "h00_3900_103F", "h0", "Core_reset", "RW"), 107aec79401SLemover MemMap("h00_3900_1020", "h00_39FF_FFFF", "h0", "Reserved", ""), 108*98c71602SJiawei Lin MemMap("h00_3A00_0000", "h00_3A00_003F", "h0", "PLL0", "RW), 109aec79401SLemover MemMap('h00_3A00_0020", "h00_3BFF_FFFF", "h0", "Reserved", ""), 110ca2f90a6SLemover MemMap("h00_3C00_0000", "h00_3FFF_FFFF", "h0", "PLIC", "RW"), 111ca2f90a6SLemover MemMap("h00_4000_0000", "h00_7FFF_FFFF", "h0", "PCIe", "RW"), 1122f30d658SYinan Xu MemMap("h00_8000_0000", "h0F_FFFF_FFFF", "h0", "DDR", "RWXIDSA"), 113ca2f90a6SLemover ) 114ca2f90a6SLemover */ 115ca2f90a6SLemover 116ca2f90a6SLemover def pma_init() : (Vec[UInt], Vec[UInt], Vec[UInt]) = { 117ca2f90a6SLemover // the init value is zero 118ca2f90a6SLemover // from 0 to num(default 16) - 1, lower priority 119ca2f90a6SLemover // according to simple map, 9 entries is needed, pick 6-14, leave 0-5 & 15 unusedcfgMerged.map(_ := 0.U) 120ca2f90a6SLemover 121ca2f90a6SLemover val num = NumPMA 122ca2f90a6SLemover require(num >= 16) 123ca2f90a6SLemover val cfg = WireInit(0.U.asTypeOf(Vec(num, new PMPConfig()))) 124ca2f90a6SLemover 125*98c71602SJiawei Lin val addr = Wire(Vec(num, UInt((PMPAddrBits-PMPOffBits).W))) 126*98c71602SJiawei Lin val mask = Wire(Vec(num, UInt(PMPAddrBits.W))) 127ca2f90a6SLemover addr := DontCare 128ca2f90a6SLemover mask := DontCare 129ca2f90a6SLemover 130aec79401SLemover var idx = num-1 1312f30d658SYinan Xu 132aec79401SLemover // TODO: turn to napot to save entries 133ca2f90a6SLemover // use tor instead of napot, for napot may be confusing and hard to understand 134aec79401SLemover // NOTE: all the addr space are default set to DDR, RWXCA 135aec79401SLemover idx = idx - 1 136aec79401SLemover addr(idx) := shift_addr(0xFFFFFFFFFL) // all the addr are default ddr, whicn means rwxca 137aec79401SLemover cfg(idx).a := 3.U; cfg(idx).r := true.B; cfg(idx).w := true.B; cfg(idx).x := true.B; cfg(idx).c := true.B; cfg(idx).atomic := true.B 138aec79401SLemover mask(idx) := match_mask(addr(idx), cfg(idx)) 139aec79401SLemover idx = idx - 1 140ca2f90a6SLemover 141aec79401SLemover // NOTE: (0x0_0000_0000L, 0x0_8000_0000L) are default set to MMIO, only RW 142aec79401SLemover addr(idx) := get_napot(0x00000000L, 0x80000000L) 143aec79401SLemover cfg(idx).a := 3.U; cfg(idx).r := true.B; cfg(idx).w := true.B 144aec79401SLemover mask(idx) := match_mask(addr(idx), cfg(idx)) 145aec79401SLemover idx = idx - 1 146ca2f90a6SLemover 147aec79401SLemover addr(idx) := shift_addr(0x3C000000) 148aec79401SLemover cfg(idx).a := 1.U 149aec79401SLemover idx = idx - 1 150ca2f90a6SLemover 151630aeed7Srvcoresjw addr(idx) := shift_addr(0x3A000040) 152630aeed7Srvcoresjw cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B 153aec79401SLemover idx = idx - 1 154ca2f90a6SLemover 155aec79401SLemover addr(idx) := shift_addr(0x3A000000) 156aec79401SLemover cfg(idx).a := 1.U 157aec79401SLemover idx = idx - 1 158ca2f90a6SLemover 159630aeed7Srvcoresjw addr(idx) := shift_addr(0x39001040) 160630aeed7Srvcoresjw cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B 161aec79401SLemover idx = idx - 1 162ca2f90a6SLemover 163aec79401SLemover addr(idx) := shift_addr(0x39001000) 164aec79401SLemover cfg(idx).a := 1.U 165aec79401SLemover idx = idx - 1 166ca2f90a6SLemover 167*98c71602SJiawei Lin addr(idx) := shift_addr(0x38021200) 168*98c71602SJiawei Lin cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B 169*98c71602SJiawei Lin idx = idx - 1 170*98c71602SJiawei Lin 171aec79401SLemover addr(idx) := shift_addr(0x38021000) 172aec79401SLemover cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B; cfg(idx).x := true.B 173aec79401SLemover idx = idx - 1 174ca2f90a6SLemover 175aec79401SLemover addr(idx) := shift_addr(0x38020000) 176aec79401SLemover cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B 177aec79401SLemover idx = idx - 1 178ca2f90a6SLemover 179aec79401SLemover addr(idx) := shift_addr( 0x30050000) 180aec79401SLemover cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B; cfg(idx).c := true.B 181aec79401SLemover idx = idx - 1 182aec79401SLemover 183aec79401SLemover addr(idx) := shift_addr( 0x30010000) 184aec79401SLemover cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B 185aec79401SLemover idx = idx - 1 186aec79401SLemover 187aec79401SLemover addr(idx) := shift_addr( 0x20000000) 188aec79401SLemover cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B; cfg(idx).x := true.B 189aec79401SLemover idx = idx - 1 190aec79401SLemover 191aec79401SLemover addr(idx) := shift_addr( 0x10000000) 192aec79401SLemover cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B 193aec79401SLemover idx = idx - 1 194aec79401SLemover 195aec79401SLemover addr(idx) := shift_addr(0) 196aec79401SLemover 197aec79401SLemover require(idx >= 0) 198ca2f90a6SLemover 199*98c71602SJiawei Lin val cfgInitMerge = cfg.asTypeOf(Vec(num/8, UInt(PMXLEN.W))) 200ca2f90a6SLemover (cfgInitMerge, addr, mask) 201ca2f90a6SLemover } 202ca2f90a6SLemover 203aec79401SLemover def get_napot(base: BigInt, range: BigInt) = { 204aec79401SLemover val PlatformGrainBytes = (1 << PlatformGrain) 205aec79401SLemover if ((base % PlatformGrainBytes) != 0) { 206aec79401SLemover println("base:%x", base) 207aec79401SLemover } 208aec79401SLemover if ((range % PlatformGrainBytes) != 0) { 209aec79401SLemover println("range: %x", range) 210aec79401SLemover } 211aec79401SLemover require((base % PlatformGrainBytes) == 0) 212aec79401SLemover require((range % PlatformGrainBytes) == 0) 213aec79401SLemover 214aec79401SLemover ((base + (range/2 - 1)) >> PMPOffBits).U 215aec79401SLemover } 216aec79401SLemover 217aec79401SLemover def match_mask(paddr: UInt, cfg: PMPConfig) = { 218aec79401SLemover val match_mask_addr: UInt = Cat(paddr, cfg.a(0)).asUInt() | (((1 << PlatformGrain) - 1) >> PMPOffBits).U((paddr.getWidth + 1).W) 219aec79401SLemover Cat(match_mask_addr & ~(match_mask_addr + 1.U), ((1 << PMPOffBits) - 1).U(PMPOffBits.W)) 220aec79401SLemover } 221aec79401SLemover 222ca2f90a6SLemover def shift_addr(addr: BigInt) = { 223ca2f90a6SLemover (addr >> 2).U 224ca2f90a6SLemover } 225ca2f90a6SLemover} 226ca2f90a6SLemover 227*98c71602SJiawei Lintrait PMACheckMethod extends PMPConst { 228ca2f90a6SLemover def pma_check(cmd: UInt, cfg: PMPConfig) = { 229ca2f90a6SLemover val resp = Wire(new PMPRespBundle) 230ca2f90a6SLemover resp.ld := TlbCmd.isRead(cmd) && !TlbCmd.isAtom(cmd) && !cfg.r 231ca2f90a6SLemover resp.st := (TlbCmd.isWrite(cmd) || TlbCmd.isAtom(cmd) && cfg.atomic) && !cfg.w 232ca2f90a6SLemover resp.instr := TlbCmd.isExec(cmd) && !cfg.x 233ca2f90a6SLemover resp.mmio := !cfg.c 234ca2f90a6SLemover resp 235ca2f90a6SLemover } 236ca2f90a6SLemover 2375cf62c1aSLemover def pma_match_res(leaveHitMux: Boolean = false, valid: Bool = true.B)( 2385cf62c1aSLemover addr: UInt, 2395cf62c1aSLemover size: UInt, 2405cf62c1aSLemover pmaEntries: Vec[PMPEntry], 2415cf62c1aSLemover mode: UInt, 2425cf62c1aSLemover lgMaxSize: Int 2435cf62c1aSLemover ) = { 244ca2f90a6SLemover val num = pmaEntries.size 245ca2f90a6SLemover require(num == NumPMA) 246ca2f90a6SLemover // pma should always be checked, could not be ignored 247ca2f90a6SLemover // like amo and cached, it is the attribute not protection 248ca2f90a6SLemover // so it must have initialization. 249ca2f90a6SLemover require(!pmaEntries.isEmpty) 250ca2f90a6SLemover 251a15116bdSLemover val pmaDefault = WireInit(0.U.asTypeOf(new PMPEntry())) 252a15116bdSLemover val match_vec = Wire(Vec(num+1, Bool())) 253a15116bdSLemover val cfg_vec = Wire(Vec(num+1, new PMPEntry())) 254a15116bdSLemover 255a15116bdSLemover pmaEntries.zip(pmaDefault +: pmaEntries.take(num-1)).zipWithIndex.foreach{ case ((pma, last_pma), i) => 256ca2f90a6SLemover val is_match = pma.is_match(addr, size, lgMaxSize, last_pma) 257ca2f90a6SLemover val aligned = pma.aligned(addr, size, lgMaxSize, last_pma) 258ca2f90a6SLemover 259ca2f90a6SLemover val cur = WireInit(pma) 260ca2f90a6SLemover cur.cfg.r := aligned && pma.cfg.r 261ca2f90a6SLemover cur.cfg.w := aligned && pma.cfg.w 262ca2f90a6SLemover cur.cfg.x := aligned && pma.cfg.x 263ca2f90a6SLemover cur.cfg.atomic := aligned && pma.cfg.atomic 264ca2f90a6SLemover cur.cfg.c := aligned && pma.cfg.c 265ca2f90a6SLemover 266a15116bdSLemover match_vec(i) := is_match 267a15116bdSLemover cfg_vec(i) := cur 268ca2f90a6SLemover } 269a15116bdSLemover 270a15116bdSLemover match_vec(num) := true.B 271a15116bdSLemover cfg_vec(num) := pmaDefault 2725cf62c1aSLemover if (leaveHitMux) { 2735cf62c1aSLemover ParallelPriorityMux(match_vec.map(RegEnable(_, init = false.B, valid)), RegEnable(cfg_vec, valid)) 2745cf62c1aSLemover } else { 275a15116bdSLemover ParallelPriorityMux(match_vec, cfg_vec) 276ca2f90a6SLemover } 277ca2f90a6SLemover } 2785cf62c1aSLemover} 279