1package xiangshan.mem 2 3import chisel3._ 4import chisel3.util._ 5import utils._ 6import xiangshan._ 7import xiangshan.cache._ 8import xiangshan.cache.{DCacheWordIO, DCacheLineIO, TlbRequestIO, MemoryOpConstants} 9import xiangshan.backend.LSUOpType 10import xiangshan.mem._ 11import xiangshan.backend.roq.RoqPtr 12 13 14// Data module define 15// These data modules are like SyncDataModuleTemplate, but support cam-like ops 16class SQPaddrModule(numEntries: Int, numRead: Int, numWrite: Int, numForward: Int) extends XSModule with HasDCacheParameters { 17 val io = IO(new Bundle { 18 val raddr = Input(Vec(numRead, UInt(log2Up(numEntries).W))) 19 val rdata = Output(Vec(numRead, UInt((PAddrBits).W))) 20 val wen = Input(Vec(numWrite, Bool())) 21 val waddr = Input(Vec(numWrite, UInt(log2Up(numEntries).W))) 22 val wdata = Input(Vec(numWrite, UInt((PAddrBits).W))) 23 val forwardMdata = Input(Vec(numForward, UInt((PAddrBits).W))) 24 val forwardMmask = Output(Vec(numForward, Vec(numEntries, Bool()))) 25 }) 26 27 val data = Reg(Vec(numEntries, UInt((PAddrBits).W))) 28 29 // read ports 30 for (i <- 0 until numRead) { 31 io.rdata(i) := data(RegNext(io.raddr(i))) 32 } 33 34 // below is the write ports (with priorities) 35 for (i <- 0 until numWrite) { 36 when (io.wen(i)) { 37 data(io.waddr(i)) := io.wdata(i) 38 } 39 } 40 41 // content addressed match 42 for (i <- 0 until numForward) { 43 for (j <- 0 until numEntries) { 44 io.forwardMmask(i)(j) := io.forwardMdata(i)(PAddrBits-1, 3) === data(j)(PAddrBits-1, 3) 45 } 46 } 47 48 // DataModuleTemplate should not be used when there're any write conflicts 49 for (i <- 0 until numWrite) { 50 for (j <- i+1 until numWrite) { 51 assert(!(io.wen(i) && io.wen(j) && io.waddr(i) === io.waddr(j))) 52 } 53 } 54} 55 56class SQDataEntry extends XSBundle { 57 // val paddr = UInt(PAddrBits.W) 58 val mask = UInt(8.W) 59 val data = UInt(XLEN.W) 60} 61 62class StoreQueueData(size: Int, numRead: Int, numWrite: Int, numForward: Int) extends XSModule with HasDCacheParameters with HasCircularQueuePtrHelper { 63 val io = IO(new Bundle() { 64 val raddr = Vec(numRead, Input(UInt(log2Up(size).W))) 65 val rdata = Vec(numRead, Output(new SQDataEntry)) 66 val wen = Vec(numWrite, Input(Bool())) 67 val waddr = Vec(numWrite, Input(UInt(log2Up(size).W))) 68 val wdata = Vec(numWrite, Input(new SQDataEntry)) 69 val debug = Vec(size, Output(new SQDataEntry)) 70 71 val needForward = Input(Vec(numForward, Vec(2, UInt(size.W)))) 72 val forwardMask = Vec(numForward, Output(Vec(8, Bool()))) 73 val forwardData = Vec(numForward, Output(Vec(8, UInt(8.W)))) 74 }) 75 76 io := DontCare 77 78 val data = Reg(Vec(size, new SQDataEntry)) 79 80 // writeback to lq/sq 81 (0 until numWrite).map(i => { 82 when(io.wen(i)){ 83 data(io.waddr(i)) := io.wdata(i) 84 } 85 }) 86 87 // destorequeue read data 88 (0 until numRead).map(i => { 89 io.rdata(i) := data(RegNext(io.raddr(i))) 90 }) 91 92 // DataModuleTemplate should not be used when there're any write conflicts 93 for (i <- 0 until numWrite) { 94 for (j <- i+1 until numWrite) { 95 assert(!(io.wen(i) && io.wen(j) && io.waddr(i) === io.waddr(j))) 96 } 97 } 98 99 // forwarding 100 // Compare ringBufferTail (deqPtr) and forward.sqIdx, we have two cases: 101 // (1) if they have the same flag, we need to check range(tail, sqIdx) 102 // (2) if they have different flags, we need to check range(tail, LoadQueueSize) and range(0, sqIdx) 103 // Forward1: Mux(same_flag, range(tail, sqIdx), range(tail, LoadQueueSize)) 104 // Forward2: Mux(same_flag, 0.U, range(0, sqIdx) ) 105 // i.e. forward1 is the target entries with the same flag bits and forward2 otherwise 106 107 // entry with larger index should have higher priority since it's data is younger 108 109 (0 until numForward).map(i => { 110 // parallel fwd logic 111 val matchResultVec = Wire(Vec(size * 2, new FwdEntry)) 112 113 def parallelFwd(xs: Seq[Data]): Data = { 114 ParallelOperation(xs, (a: Data, b: Data) => { 115 val l = a.asTypeOf(new FwdEntry) 116 val r = b.asTypeOf(new FwdEntry) 117 val res = Wire(new FwdEntry) 118 (0 until 8).map(p => { 119 res.mask(p) := l.mask(p) || r.mask(p) 120 res.data(p) := Mux(r.mask(p), r.data(p), l.data(p)) 121 }) 122 res 123 }) 124 } 125 126 // paddrMatch is now included in io.needForward 127 // for (j <- 0 until size) { 128 // paddrMatch(j) := io.forward(i).paddr(PAddrBits - 1, 3) === data(j).paddr(PAddrBits - 1, 3) 129 // } 130 131 for (j <- 0 until size) { 132 val needCheck0 = RegNext(io.needForward(i)(0)(j)) 133 val needCheck1 = RegNext(io.needForward(i)(1)(j)) 134 (0 until XLEN / 8).foreach(k => { 135 matchResultVec(j).mask(k) := needCheck0 && data(j).mask(k) 136 matchResultVec(j).data(k) := data(j).data(8 * (k + 1) - 1, 8 * k) 137 matchResultVec(size + j).mask(k) := needCheck1 && data(j).mask(k) 138 matchResultVec(size + j).data(k) := data(j).data(8 * (k + 1) - 1, 8 * k) 139 }) 140 } 141 142 val parallelFwdResult = parallelFwd(matchResultVec).asTypeOf(new FwdEntry) 143 144 io.forwardMask(i) := parallelFwdResult.mask 145 io.forwardData(i) := parallelFwdResult.data 146 147 }) 148 149 io.debug := data 150}