1*ca2f90a6SLemover/*************************************************************************************** 2*ca2f90a6SLemover * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3*ca2f90a6SLemover * Copyright (c) 2020-2021 Peng Cheng Laboratory 4*ca2f90a6SLemover * 5*ca2f90a6SLemover * XiangShan is licensed under Mulan PSL v2. 6*ca2f90a6SLemover * You can use this software according to the terms and conditions of the Mulan PSL v2. 7*ca2f90a6SLemover * You may obtain a copy of Mulan PSL v2 at: 8*ca2f90a6SLemover * http://license.coscl.org.cn/MulanPSL2 9*ca2f90a6SLemover * 10*ca2f90a6SLemover * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11*ca2f90a6SLemover * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12*ca2f90a6SLemover * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13*ca2f90a6SLemover * 14*ca2f90a6SLemover * See the Mulan PSL v2 for more details. 15*ca2f90a6SLemover ***************************************************************************************/ 16*ca2f90a6SLemover 17*ca2f90a6SLemoverpackage xiangshan.backend.fu 18*ca2f90a6SLemover 19*ca2f90a6SLemoverimport chipsalliance.rocketchip.config.Parameters 20*ca2f90a6SLemoverimport chisel3._ 21*ca2f90a6SLemoverimport chisel3.internal.naming.chiselName 22*ca2f90a6SLemoverimport chisel3.util._ 23*ca2f90a6SLemoverimport xiangshan.{HasXSParameter, XSModule} 24*ca2f90a6SLemoverimport xiangshan.backend.fu.util.HasCSRConst 25*ca2f90a6SLemoverimport xiangshan.cache.mmu.TlbCmd 26*ca2f90a6SLemover 27*ca2f90a6SLemovertrait PMAMethod extends HasXSParameter with PMPConst { this: XSModule => 28*ca2f90a6SLemover /** 29*ca2f90a6SLemover def SimpleMemMapList = List( 30*ca2f90a6SLemover // Base address Top address Width Description Mode (RWXIDSAC) 31*ca2f90a6SLemover MemMap("h00_0000_0000", "h00_0FFF_FFFF", "h0", "Reserved", "RW"), 32*ca2f90a6SLemover MemMap("h00_1000_0000", "h00_1FFF_FFFF", "h0", "QSPI_Flash", "RWX"), 33*ca2f90a6SLemover MemMap("h00_2000_0000", "h00_2FFF_FFFF", "h0", "Reserved", "RW"), 34*ca2f90a6SLemover MemMap("h00_3000_0000", "h00_3000_FFFF", "h0", "DMA", "RW"), 35*ca2f90a6SLemover MemMap("h00_3001_0000", "h00_3004_FFFF", "h0", "GPU", "RWC"), 36*ca2f90a6SLemover MemMap("h00_3005_0000", "h00_3006_FFFF", "h0", "USB/SDMMC", "RW"), 37*ca2f90a6SLemover MemMap("h00_3007_0000", "h00_30FF_FFFF", "h0", "Reserved", "RW"), 38*ca2f90a6SLemover MemMap("h00_3100_0000", "h00_3111_FFFF", "h0", "MMIO", "RW"), 39*ca2f90a6SLemover MemMap("h00_3112_0000", "h00_37FF_FFFF", "h0", "Reserved", "RW"), 40*ca2f90a6SLemover MemMap("h00_3800_0000", "h00_3800_FFFF", "h0", "CLINT", "RW"), 41*ca2f90a6SLemover MemMap("h00_3801_0000", "h00_3801_FFFF", "h0", "BEU", "RW"), 42*ca2f90a6SLemover MemMap("h00_3802_0000", "h00_3802_0FFF", "h0", "DebugModule", "RWX"), 43*ca2f90a6SLemover MemMap("h00_3802_1000", "h00_3BFF_FFFF", "h0", "Reserved", ""), 44*ca2f90a6SLemover MemMap("h00_3C00_0000", "h00_3FFF_FFFF", "h0", "PLIC", "RW"), 45*ca2f90a6SLemover MemMap("h00_4000_0000", "h00_7FFF_FFFF", "h0", "PCIe", "RW"), 46*ca2f90a6SLemover MemMap("h00_8000_0000", "h1F_FFFF_FFFF", "h0", "DDR", "RWXIDSA"), 47*ca2f90a6SLemover ) 48*ca2f90a6SLemover */ 49*ca2f90a6SLemover 50*ca2f90a6SLemover def pma_init() : (Vec[UInt], Vec[UInt], Vec[UInt]) = { 51*ca2f90a6SLemover // the init value is zero 52*ca2f90a6SLemover // from 0 to num(default 16) - 1, lower priority 53*ca2f90a6SLemover // according to simple map, 9 entries is needed, pick 6-14, leave 0-5 & 15 unusedcfgMerged.map(_ := 0.U) 54*ca2f90a6SLemover 55*ca2f90a6SLemover val num = NumPMA 56*ca2f90a6SLemover require(num >= 16) 57*ca2f90a6SLemover val cfg = WireInit(0.U.asTypeOf(Vec(num, new PMPConfig()))) 58*ca2f90a6SLemover 59*ca2f90a6SLemover val addr = Wire(Vec(num, UInt((PAddrBits-PMPOffBits).W))) 60*ca2f90a6SLemover val mask = Wire(Vec(NumPMP, UInt(PAddrBits.W))) 61*ca2f90a6SLemover addr := DontCare 62*ca2f90a6SLemover mask := DontCare 63*ca2f90a6SLemover 64*ca2f90a6SLemover // use tor instead of napot, for napot may be confusing and hard to understand 65*ca2f90a6SLemover addr(14) := shift_addr( 0x2000000000L) 66*ca2f90a6SLemover cfg(14).a := 1.U; cfg(14).r := true.B; cfg(14).w := true.B; cfg(14).x := true.B; cfg(14).c := true.B; cfg(14).atomic := true.B 67*ca2f90a6SLemover 68*ca2f90a6SLemover addr(13) := shift_addr(0x80000000L) 69*ca2f90a6SLemover cfg(13).a := 1.U; cfg(13).r := true.B; cfg(13).w := true.B 70*ca2f90a6SLemover 71*ca2f90a6SLemover addr(12) := shift_addr(0x3C000000) 72*ca2f90a6SLemover cfg(12).a := 1.U 73*ca2f90a6SLemover 74*ca2f90a6SLemover addr(11) := shift_addr(0x38021000) 75*ca2f90a6SLemover cfg(11).a := 1.U; cfg(11).r := true.B; cfg(11).w := true.B; cfg(11).x := true.B 76*ca2f90a6SLemover 77*ca2f90a6SLemover addr(10) := shift_addr(0x38020000) 78*ca2f90a6SLemover cfg(10).a := 1.U; cfg(10).r := true.B; cfg(10).w := true.B 79*ca2f90a6SLemover 80*ca2f90a6SLemover addr(9) := shift_addr( 0x30050000) 81*ca2f90a6SLemover cfg(9).a := 1.U; cfg(9).r := true.B; cfg(9).w := true.B; cfg(8).c := true.B 82*ca2f90a6SLemover 83*ca2f90a6SLemover addr(8) := shift_addr( 0x30010000) 84*ca2f90a6SLemover cfg(8).a := 1.U; cfg(8).r := true.B; cfg(8).w := true.B 85*ca2f90a6SLemover 86*ca2f90a6SLemover addr(7) := shift_addr( 0x20000000) 87*ca2f90a6SLemover cfg(7).a := 1.U; cfg(7).r := true.B; cfg(7).w := true.B; cfg(7).x := true.B 88*ca2f90a6SLemover 89*ca2f90a6SLemover addr(6) := shift_addr( 0x10000000) 90*ca2f90a6SLemover cfg(6).a := 1.U; cfg(6).r := true.B; cfg(6).w := true.B 91*ca2f90a6SLemover 92*ca2f90a6SLemover addr(5) := shift_addr(0) 93*ca2f90a6SLemover 94*ca2f90a6SLemover val cfgInitMerge = cfg.asTypeOf(Vec(num/8, UInt(XLEN.W))) 95*ca2f90a6SLemover (cfgInitMerge, addr, mask) 96*ca2f90a6SLemover } 97*ca2f90a6SLemover 98*ca2f90a6SLemover def shift_addr(addr: BigInt) = { 99*ca2f90a6SLemover (addr >> 2).U 100*ca2f90a6SLemover } 101*ca2f90a6SLemover} 102*ca2f90a6SLemover 103*ca2f90a6SLemovertrait PMACheckMethod extends HasXSParameter with HasCSRConst { this: PMPChecker => 104*ca2f90a6SLemover def pma_check(cmd: UInt, cfg: PMPConfig) = { 105*ca2f90a6SLemover val resp = Wire(new PMPRespBundle) 106*ca2f90a6SLemover resp.ld := TlbCmd.isRead(cmd) && !TlbCmd.isAtom(cmd) && !cfg.r 107*ca2f90a6SLemover resp.st := (TlbCmd.isWrite(cmd) || TlbCmd.isAtom(cmd) && cfg.atomic) && !cfg.w 108*ca2f90a6SLemover resp.instr := TlbCmd.isExec(cmd) && !cfg.x 109*ca2f90a6SLemover resp.mmio := !cfg.c 110*ca2f90a6SLemover resp 111*ca2f90a6SLemover } 112*ca2f90a6SLemover 113*ca2f90a6SLemover def pma_match_res(addr: UInt, size: UInt, pmaEntries: Vec[PMPEntry], mode: UInt, lgMaxSize: Int) = { 114*ca2f90a6SLemover val num = pmaEntries.size 115*ca2f90a6SLemover require(num == NumPMA) 116*ca2f90a6SLemover // pma should always be checked, could not be ignored 117*ca2f90a6SLemover // like amo and cached, it is the attribute not protection 118*ca2f90a6SLemover // so it must have initialization. 119*ca2f90a6SLemover require(!pmaEntries.isEmpty) 120*ca2f90a6SLemover val default = if (pmaEntries.isEmpty) true.B else (mode > ModeS) 121*ca2f90a6SLemover val pmpMinuxOne = WireInit(0.U.asTypeOf(new PMPEntry())) 122*ca2f90a6SLemover 123*ca2f90a6SLemover val res = pmaEntries.zip(pmpMinuxOne +: pmaEntries.take(num-1)).zipWithIndex 124*ca2f90a6SLemover .reverse.foldLeft(pmpMinuxOne) { case (prev, ((pma, last_pma), i)) => 125*ca2f90a6SLemover val is_match = pma.is_match(addr, size, lgMaxSize, last_pma) 126*ca2f90a6SLemover val aligned = pma.aligned(addr, size, lgMaxSize, last_pma) 127*ca2f90a6SLemover 128*ca2f90a6SLemover val cur = WireInit(pma) 129*ca2f90a6SLemover cur.cfg.r := aligned && pma.cfg.r 130*ca2f90a6SLemover cur.cfg.w := aligned && pma.cfg.w 131*ca2f90a6SLemover cur.cfg.x := aligned && pma.cfg.x 132*ca2f90a6SLemover cur.cfg.atomic := aligned && pma.cfg.atomic 133*ca2f90a6SLemover cur.cfg.c := aligned && pma.cfg.c 134*ca2f90a6SLemover 135*ca2f90a6SLemover Mux(is_match, cur, prev) 136*ca2f90a6SLemover } 137*ca2f90a6SLemover res 138*ca2f90a6SLemover } 139*ca2f90a6SLemover}