1ca2f90a6SLemover/*************************************************************************************** 2ca2f90a6SLemover * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3ca2f90a6SLemover * Copyright (c) 2020-2021 Peng Cheng Laboratory 4ca2f90a6SLemover * 5ca2f90a6SLemover * XiangShan is licensed under Mulan PSL v2. 6ca2f90a6SLemover * You can use this software according to the terms and conditions of the Mulan PSL v2. 7ca2f90a6SLemover * You may obtain a copy of Mulan PSL v2 at: 8ca2f90a6SLemover * http://license.coscl.org.cn/MulanPSL2 9ca2f90a6SLemover * 10ca2f90a6SLemover * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11ca2f90a6SLemover * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12ca2f90a6SLemover * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13ca2f90a6SLemover * 14ca2f90a6SLemover * See the Mulan PSL v2 for more details. 15ca2f90a6SLemover ***************************************************************************************/ 16ca2f90a6SLemover 17ca2f90a6SLemoverpackage xiangshan.backend.fu 18ca2f90a6SLemover 19ca2f90a6SLemoverimport chipsalliance.rocketchip.config.Parameters 20ca2f90a6SLemoverimport chisel3._ 21ca2f90a6SLemoverimport chisel3.internal.naming.chiselName 22ca2f90a6SLemoverimport chisel3.util._ 23a15116bdSLemoverimport utils.ParallelPriorityMux 24ca2f90a6SLemoverimport xiangshan.{HasXSParameter, XSModule} 25ca2f90a6SLemoverimport xiangshan.backend.fu.util.HasCSRConst 26ca2f90a6SLemoverimport xiangshan.cache.mmu.TlbCmd 27ca2f90a6SLemover 28ca2f90a6SLemovertrait PMAMethod extends HasXSParameter with PMPConst { this: XSModule => 29ca2f90a6SLemover /** 30ca2f90a6SLemover def SimpleMemMapList = List( 31ca2f90a6SLemover // Base address Top address Width Description Mode (RWXIDSAC) 32ca2f90a6SLemover MemMap("h00_0000_0000", "h00_0FFF_FFFF", "h0", "Reserved", "RW"), 33ca2f90a6SLemover MemMap("h00_1000_0000", "h00_1FFF_FFFF", "h0", "QSPI_Flash", "RWX"), 34ca2f90a6SLemover MemMap("h00_2000_0000", "h00_2FFF_FFFF", "h0", "Reserved", "RW"), 35ca2f90a6SLemover MemMap("h00_3000_0000", "h00_3000_FFFF", "h0", "DMA", "RW"), 36ca2f90a6SLemover MemMap("h00_3001_0000", "h00_3004_FFFF", "h0", "GPU", "RWC"), 37ca2f90a6SLemover MemMap("h00_3005_0000", "h00_3006_FFFF", "h0", "USB/SDMMC", "RW"), 38ca2f90a6SLemover MemMap("h00_3007_0000", "h00_30FF_FFFF", "h0", "Reserved", "RW"), 39ca2f90a6SLemover MemMap("h00_3100_0000", "h00_3111_FFFF", "h0", "MMIO", "RW"), 40ca2f90a6SLemover MemMap("h00_3112_0000", "h00_37FF_FFFF", "h0", "Reserved", "RW"), 41ca2f90a6SLemover MemMap("h00_3800_0000", "h00_3800_FFFF", "h0", "CLINT", "RW"), 42ca2f90a6SLemover MemMap("h00_3801_0000", "h00_3801_FFFF", "h0", "BEU", "RW"), 43ca2f90a6SLemover MemMap("h00_3802_0000", "h00_3802_0FFF", "h0", "DebugModule", "RWX"), 44aec79401SLemover MemMap("h00_3802_1000", "h00_3900_0FFF", "h0", "Reserved", ""), 45aec79401SLemover MemMap("h00_3900_1000", "h00_3900_101F", "h0", "Core_reset", "RW"), 46aec79401SLemover MemMap("h00_3900_1020", "h00_39FF_FFFF", "h0", "Reserved", ""), 47aec79401SLemover MemMap("h00_3A00_0000", "h00_3A00_0020", "h0", "PLL0", "RW), 48aec79401SLemover MemMap('h00_3A00_0020", "h00_3BFF_FFFF", "h0", "Reserved", ""), 49ca2f90a6SLemover MemMap("h00_3C00_0000", "h00_3FFF_FFFF", "h0", "PLIC", "RW"), 50ca2f90a6SLemover MemMap("h00_4000_0000", "h00_7FFF_FFFF", "h0", "PCIe", "RW"), 512f30d658SYinan Xu MemMap("h00_8000_0000", "h0F_FFFF_FFFF", "h0", "DDR", "RWXIDSA"), 52ca2f90a6SLemover ) 53ca2f90a6SLemover */ 54ca2f90a6SLemover 55ca2f90a6SLemover def pma_init() : (Vec[UInt], Vec[UInt], Vec[UInt]) = { 56ca2f90a6SLemover // the init value is zero 57ca2f90a6SLemover // from 0 to num(default 16) - 1, lower priority 58ca2f90a6SLemover // according to simple map, 9 entries is needed, pick 6-14, leave 0-5 & 15 unusedcfgMerged.map(_ := 0.U) 59ca2f90a6SLemover 60ca2f90a6SLemover val num = NumPMA 61ca2f90a6SLemover require(num >= 16) 62ca2f90a6SLemover val cfg = WireInit(0.U.asTypeOf(Vec(num, new PMPConfig()))) 63ca2f90a6SLemover 64ca2f90a6SLemover val addr = Wire(Vec(num, UInt((PAddrBits-PMPOffBits).W))) 65ca2f90a6SLemover val mask = Wire(Vec(NumPMP, UInt(PAddrBits.W))) 66ca2f90a6SLemover addr := DontCare 67ca2f90a6SLemover mask := DontCare 68ca2f90a6SLemover 69aec79401SLemover var idx = num-1 702f30d658SYinan Xu 71aec79401SLemover // TODO: turn to napot to save entries 72ca2f90a6SLemover // use tor instead of napot, for napot may be confusing and hard to understand 73aec79401SLemover // NOTE: all the addr space are default set to DDR, RWXCA 74aec79401SLemover idx = idx - 1 75aec79401SLemover addr(idx) := shift_addr(0xFFFFFFFFFL) // all the addr are default ddr, whicn means rwxca 76aec79401SLemover cfg(idx).a := 3.U; cfg(idx).r := true.B; cfg(idx).w := true.B; cfg(idx).x := true.B; cfg(idx).c := true.B; cfg(idx).atomic := true.B 77aec79401SLemover mask(idx) := match_mask(addr(idx), cfg(idx)) 78aec79401SLemover idx = idx - 1 79ca2f90a6SLemover 80aec79401SLemover // NOTE: (0x0_0000_0000L, 0x0_8000_0000L) are default set to MMIO, only RW 81aec79401SLemover addr(idx) := get_napot(0x00000000L, 0x80000000L) 82aec79401SLemover cfg(idx).a := 3.U; cfg(idx).r := true.B; cfg(idx).w := true.B 83aec79401SLemover mask(idx) := match_mask(addr(idx), cfg(idx)) 84aec79401SLemover idx = idx - 1 85ca2f90a6SLemover 86aec79401SLemover addr(idx) := shift_addr(0x3C000000) 87aec79401SLemover cfg(idx).a := 1.U 88aec79401SLemover idx = idx - 1 89ca2f90a6SLemover 90630aeed7Srvcoresjw addr(idx) := shift_addr(0x3A000040) 91630aeed7Srvcoresjw cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B 92aec79401SLemover idx = idx - 1 93ca2f90a6SLemover 94aec79401SLemover addr(idx) := shift_addr(0x3A000000) 95aec79401SLemover cfg(idx).a := 1.U 96aec79401SLemover idx = idx - 1 97ca2f90a6SLemover 98630aeed7Srvcoresjw addr(idx) := shift_addr(0x39001040) 99630aeed7Srvcoresjw cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B 100aec79401SLemover idx = idx - 1 101ca2f90a6SLemover 102aec79401SLemover addr(idx) := shift_addr(0x39001000) 103aec79401SLemover cfg(idx).a := 1.U 104aec79401SLemover idx = idx - 1 105ca2f90a6SLemover 106aec79401SLemover addr(idx) := shift_addr(0x38021000) 107aec79401SLemover cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B; cfg(idx).x := true.B 108aec79401SLemover idx = idx - 1 109ca2f90a6SLemover 110aec79401SLemover addr(idx) := shift_addr(0x38020000) 111aec79401SLemover cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B 112aec79401SLemover idx = idx - 1 113ca2f90a6SLemover 114aec79401SLemover addr(idx) := shift_addr( 0x30050000) 115aec79401SLemover cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B; cfg(idx).c := true.B 116aec79401SLemover idx = idx - 1 117aec79401SLemover 118aec79401SLemover addr(idx) := shift_addr( 0x30010000) 119aec79401SLemover cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B 120aec79401SLemover idx = idx - 1 121aec79401SLemover 122aec79401SLemover addr(idx) := shift_addr( 0x20000000) 123aec79401SLemover cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B; cfg(idx).x := true.B 124aec79401SLemover idx = idx - 1 125aec79401SLemover 126aec79401SLemover addr(idx) := shift_addr( 0x10000000) 127aec79401SLemover cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B 128aec79401SLemover idx = idx - 1 129aec79401SLemover 130aec79401SLemover addr(idx) := shift_addr(0) 131aec79401SLemover 132aec79401SLemover require(idx >= 0) 133ca2f90a6SLemover 134ca2f90a6SLemover val cfgInitMerge = cfg.asTypeOf(Vec(num/8, UInt(XLEN.W))) 135ca2f90a6SLemover (cfgInitMerge, addr, mask) 136ca2f90a6SLemover } 137ca2f90a6SLemover 138aec79401SLemover def get_napot(base: BigInt, range: BigInt) = { 139aec79401SLemover val PlatformGrainBytes = (1 << PlatformGrain) 140aec79401SLemover if ((base % PlatformGrainBytes) != 0) { 141aec79401SLemover println("base:%x", base) 142aec79401SLemover } 143aec79401SLemover if ((range % PlatformGrainBytes) != 0) { 144aec79401SLemover println("range: %x", range) 145aec79401SLemover } 146aec79401SLemover require((base % PlatformGrainBytes) == 0) 147aec79401SLemover require((range % PlatformGrainBytes) == 0) 148aec79401SLemover 149aec79401SLemover ((base + (range/2 - 1)) >> PMPOffBits).U 150aec79401SLemover } 151aec79401SLemover 152aec79401SLemover def match_mask(paddr: UInt, cfg: PMPConfig) = { 153aec79401SLemover val match_mask_addr: UInt = Cat(paddr, cfg.a(0)).asUInt() | (((1 << PlatformGrain) - 1) >> PMPOffBits).U((paddr.getWidth + 1).W) 154aec79401SLemover Cat(match_mask_addr & ~(match_mask_addr + 1.U), ((1 << PMPOffBits) - 1).U(PMPOffBits.W)) 155aec79401SLemover } 156aec79401SLemover 157ca2f90a6SLemover def shift_addr(addr: BigInt) = { 158ca2f90a6SLemover (addr >> 2).U 159ca2f90a6SLemover } 160ca2f90a6SLemover} 161ca2f90a6SLemover 162ca2f90a6SLemovertrait PMACheckMethod extends HasXSParameter with HasCSRConst { this: PMPChecker => 163ca2f90a6SLemover def pma_check(cmd: UInt, cfg: PMPConfig) = { 164ca2f90a6SLemover val resp = Wire(new PMPRespBundle) 165ca2f90a6SLemover resp.ld := TlbCmd.isRead(cmd) && !TlbCmd.isAtom(cmd) && !cfg.r 166ca2f90a6SLemover resp.st := (TlbCmd.isWrite(cmd) || TlbCmd.isAtom(cmd) && cfg.atomic) && !cfg.w 167ca2f90a6SLemover resp.instr := TlbCmd.isExec(cmd) && !cfg.x 168ca2f90a6SLemover resp.mmio := !cfg.c 169ca2f90a6SLemover resp 170ca2f90a6SLemover } 171ca2f90a6SLemover 172*5cf62c1aSLemover def pma_match_res(leaveHitMux: Boolean = false, valid: Bool = true.B)( 173*5cf62c1aSLemover addr: UInt, 174*5cf62c1aSLemover size: UInt, 175*5cf62c1aSLemover pmaEntries: Vec[PMPEntry], 176*5cf62c1aSLemover mode: UInt, 177*5cf62c1aSLemover lgMaxSize: Int 178*5cf62c1aSLemover ) = { 179ca2f90a6SLemover val num = pmaEntries.size 180ca2f90a6SLemover require(num == NumPMA) 181ca2f90a6SLemover // pma should always be checked, could not be ignored 182ca2f90a6SLemover // like amo and cached, it is the attribute not protection 183ca2f90a6SLemover // so it must have initialization. 184ca2f90a6SLemover require(!pmaEntries.isEmpty) 185ca2f90a6SLemover 186a15116bdSLemover val pmaDefault = WireInit(0.U.asTypeOf(new PMPEntry())) 187a15116bdSLemover val match_vec = Wire(Vec(num+1, Bool())) 188a15116bdSLemover val cfg_vec = Wire(Vec(num+1, new PMPEntry())) 189a15116bdSLemover 190a15116bdSLemover pmaEntries.zip(pmaDefault +: pmaEntries.take(num-1)).zipWithIndex.foreach{ case ((pma, last_pma), i) => 191ca2f90a6SLemover val is_match = pma.is_match(addr, size, lgMaxSize, last_pma) 192ca2f90a6SLemover val aligned = pma.aligned(addr, size, lgMaxSize, last_pma) 193ca2f90a6SLemover 194ca2f90a6SLemover val cur = WireInit(pma) 195ca2f90a6SLemover cur.cfg.r := aligned && pma.cfg.r 196ca2f90a6SLemover cur.cfg.w := aligned && pma.cfg.w 197ca2f90a6SLemover cur.cfg.x := aligned && pma.cfg.x 198ca2f90a6SLemover cur.cfg.atomic := aligned && pma.cfg.atomic 199ca2f90a6SLemover cur.cfg.c := aligned && pma.cfg.c 200ca2f90a6SLemover 201a15116bdSLemover match_vec(i) := is_match 202a15116bdSLemover cfg_vec(i) := cur 203ca2f90a6SLemover } 204a15116bdSLemover 205a15116bdSLemover match_vec(num) := true.B 206a15116bdSLemover cfg_vec(num) := pmaDefault 207*5cf62c1aSLemover if (leaveHitMux) { 208*5cf62c1aSLemover ParallelPriorityMux(match_vec.map(RegEnable(_, init = false.B, valid)), RegEnable(cfg_vec, valid)) 209*5cf62c1aSLemover } else { 210a15116bdSLemover ParallelPriorityMux(match_vec, cfg_vec) 211ca2f90a6SLemover } 212ca2f90a6SLemover } 213*5cf62c1aSLemover} 214