xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LoadQueueRAR.scala (revision 94aa21c6009c2f39c5c5dae9c87260c78887efcc)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16package xiangshan.mem
17
18import chisel3._
19import chisel3.util._
20import org.chipsalliance.cde.config._
21import xiangshan._
22import xiangshan.backend.rob.RobPtr
23import xiangshan.cache._
24import utils._
25import utility._
26import xiangshan.backend.Bundles.DynInst
27
28class LoadQueueRAR(implicit p: Parameters) extends XSModule
29  with HasDCacheParameters
30  with HasCircularQueuePtrHelper
31  with HasLoadHelper
32  with HasPerfEvents
33{
34  val io = IO(new Bundle() {
35    // control
36    val redirect = Flipped(Valid(new Redirect))
37    val vecFeedback = Vec(VecLoadPipelineWidth, Flipped(ValidIO(new FeedbackToLsqIO)))
38
39    // violation query
40    val query = Vec(LoadPipelineWidth, Flipped(new LoadNukeQueryIO))
41
42    // release cacheline
43    val release = Flipped(Valid(new Release))
44
45    // from VirtualLoadQueue
46    val ldWbPtr = Input(new LqPtr)
47
48    // global
49    val lqFull = Output(Bool())
50  })
51
52  private val PartialPAddrStride: Int = 6
53  private val PartialPAddrBits: Int = 16
54  private val PartialPAddrLowBits: Int = (PartialPAddrBits - PartialPAddrStride) / 2 // avoid overlap
55  private val PartialPAddrHighBits: Int = PartialPAddrBits - PartialPAddrLowBits
56  private def boundary(x: Int, h: Int) = if (x < h) Some(x) else None
57  private def lowMapping = (0 until PartialPAddrLowBits).map(i => Seq(
58      boundary(PartialPAddrStride + i  , PartialPAddrBits),
59      boundary(PartialPAddrBits - i - 1, PartialPAddrBits)
60    )
61  )
62  private def highMapping = (0 until PartialPAddrHighBits).map(i => Seq(
63      boundary(i + PartialPAddrStride     , PAddrBits),
64      boundary(i + PartialPAddrStride + 11, PAddrBits),
65      boundary(i + PartialPAddrStride + 22, PAddrBits),
66      boundary(i + PartialPAddrStride + 33, PAddrBits)
67    )
68  )
69  private def genPartialPAddr(paddr: UInt) = {
70    val ppaddr_low = Wire(Vec(PartialPAddrLowBits, Bool()))
71    ppaddr_low.zip(lowMapping).foreach {
72      case (bit, mapping) =>
73        bit := mapping.filter(_.isDefined).map(x => paddr(x.get)).reduce(_^_)
74    }
75
76    val ppaddr_high = Wire(Vec(PartialPAddrHighBits, Bool()))
77    ppaddr_high.zip(highMapping).foreach {
78      case (bit, mapping) =>
79        bit := mapping.filter(_.isDefined).map(x => paddr(x.get)).reduce(_^_)
80    }
81    Cat(ppaddr_high.asUInt, ppaddr_low.asUInt)
82  }
83
84  println("LoadQueueRAR: size: " + LoadQueueRARSize)
85  //  LoadQueueRAR field
86  //  +-------+-------+-------+----------+
87  //  | Valid |  Uop  | PAddr | Released |
88  //  +-------+-------+-------+----------+
89  //
90  //  Field descriptions:
91  //  Allocated   : entry is valid.
92  //  MicroOp     : Micro-op
93  //  PAddr       : physical address.
94  //  Released    : DCache released.
95  val allocated = RegInit(VecInit(List.fill(LoadQueueRARSize)(false.B))) // The control signals need to explicitly indicate the initial value
96  val uop = Reg(Vec(LoadQueueRARSize, new DynInst))
97  val paddrModule = Module(new LqPAddrModule(
98    gen = UInt(PartialPAddrBits.W),
99    numEntries = LoadQueueRARSize,
100    numRead = LoadPipelineWidth,
101    numWrite = LoadPipelineWidth,
102    numWBank = LoadQueueNWriteBanks,
103    numWDelay = 2,
104    numCamPort = LoadPipelineWidth
105  ))
106  paddrModule.io := DontCare
107  val released = RegInit(VecInit(List.fill(LoadQueueRARSize)(false.B)))
108
109  // freeliset: store valid entries index.
110  // +---+---+--------------+-----+-----+
111  // | 0 | 1 |      ......  | n-2 | n-1 |
112  // +---+---+--------------+-----+-----+
113  val freeList = Module(new FreeList(
114    size = LoadQueueRARSize,
115    allocWidth = LoadPipelineWidth,
116    freeWidth = 4,
117    enablePreAlloc = true,
118    moduleName = "LoadQueueRAR freelist"
119  ))
120  freeList.io := DontCare
121
122  // Real-allocation: load_s2
123  // PAddr write needs 2 cycles, release signal should delay 1 cycle so that
124  // load enqueue can catch release.
125  val release1Cycle = io.release
126  // val release2Cycle = RegNext(io.release)
127  // val release2Cycle_dup_lsu = RegNext(io.release)
128  val release2Cycle = RegEnable(io.release, io.release.valid)
129  release2Cycle.valid := RegNext(io.release.valid)
130  //val release2Cycle_dup_lsu = RegEnable(io.release, io.release.valid)
131
132  // LoadQueueRAR enqueue condition:
133  // There are still not completed load instructions before the current load instruction.
134  // (e.g. "not completed" means that load instruction get the data or exception).
135  val canEnqueue = io.query.map(_.req.valid)
136  val cancelEnqueue = io.query.map(_.req.bits.uop.robIdx.needFlush(io.redirect))
137  val hasNotWritebackedLoad = io.query.map(_.req.bits.uop.lqIdx).map(lqIdx => isAfter(lqIdx, io.ldWbPtr))
138  val needEnqueue = canEnqueue.zip(hasNotWritebackedLoad).zip(cancelEnqueue).map { case ((v, r), c) => v && r && !c }
139
140  // Allocate logic
141  val acceptedVec = Wire(Vec(LoadPipelineWidth, Bool()))
142  val enqIndexVec = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueRARSize).W)))
143
144  for ((enq, w) <- io.query.map(_.req).zipWithIndex) {
145    acceptedVec(w) := false.B
146    paddrModule.io.wen(w) := false.B
147    freeList.io.doAllocate(w) := false.B
148
149    freeList.io.allocateReq(w) := true.B
150
151    //  Allocate ready
152    val offset = PopCount(needEnqueue.take(w))
153    val canAccept = freeList.io.canAllocate(offset)
154    val enqIndex = freeList.io.allocateSlot(offset)
155    enq.ready := Mux(needEnqueue(w), canAccept, true.B)
156
157    enqIndexVec(w) := enqIndex
158    when (needEnqueue(w) && enq.ready) {
159      acceptedVec(w) := true.B
160
161      freeList.io.doAllocate(w) := true.B
162      //  Allocate new entry
163      allocated(enqIndex) := true.B
164
165      //  Write paddr
166      paddrModule.io.wen(w) := true.B
167      paddrModule.io.waddr(w) := enqIndex
168      paddrModule.io.wdata(w) := genPartialPAddr(enq.bits.paddr)
169
170      //  Fill info
171      uop(enqIndex) := enq.bits.uop
172      //  NC is uncachable and will not be explicitly released.
173      //  So NC requests are not allowed to have RAR
174      released(enqIndex) := enq.bits.is_nc || (
175        enq.bits.data_valid &&
176        (release2Cycle.valid &&
177        enq.bits.paddr(PAddrBits-1, DCacheLineOffset) === release2Cycle.bits.paddr(PAddrBits-1, DCacheLineOffset) ||
178        release1Cycle.valid &&
179        enq.bits.paddr(PAddrBits-1, DCacheLineOffset) === release1Cycle.bits.paddr(PAddrBits-1, DCacheLineOffset))
180      )
181    }
182    val debug_robIdx = enq.bits.uop.robIdx.asUInt
183    XSError(
184      needEnqueue(w) && enq.ready && allocated(enqIndex),
185      p"LoadQueueRAR: You can not write an valid entry! check: ldu $w, robIdx $debug_robIdx")
186  }
187
188  //  LoadQueueRAR deallocate
189  val freeMaskVec = Wire(Vec(LoadQueueRARSize, Bool()))
190
191  // init
192  freeMaskVec.map(e => e := false.B)
193
194  // when the loads that "older than" current load were writebacked,
195  // current load will be released.
196  val vecLdCanceltmp = Wire(Vec(LoadQueueRARSize, Vec(VecLoadPipelineWidth, Bool())))
197  val vecLdCancel = Wire(Vec(LoadQueueRARSize, Bool()))
198  for (i <- 0 until LoadQueueRARSize) {
199    val deqNotBlock = !isBefore(io.ldWbPtr, uop(i).lqIdx)
200    val needFlush = uop(i).robIdx.needFlush(io.redirect)
201    val fbk = io.vecFeedback
202    for (j <- 0 until VecLoadPipelineWidth) {
203      vecLdCanceltmp(i)(j) := allocated(i) && fbk(j).valid && fbk(j).bits.isFlush && uop(i).robIdx === fbk(j).bits.robidx && uop(i).uopIdx === fbk(j).bits.uopidx
204    }
205    vecLdCancel(i) := vecLdCanceltmp(i).reduce(_ || _)
206
207    when (allocated(i) && (deqNotBlock || needFlush || vecLdCancel(i))) {
208      allocated(i) := false.B
209      freeMaskVec(i) := true.B
210    }
211  }
212
213  // if need replay revoke entry
214  val lastCanAccept = GatedRegNext(acceptedVec)
215  val lastAllocIndex = GatedRegNext(enqIndexVec)
216
217  for ((revoke, w) <- io.query.map(_.revoke).zipWithIndex) {
218    val revokeValid = revoke && lastCanAccept(w)
219    val revokeIndex = lastAllocIndex(w)
220
221    when (allocated(revokeIndex) && revokeValid) {
222      allocated(revokeIndex) := false.B
223      freeMaskVec(revokeIndex) := true.B
224    }
225  }
226
227  freeList.io.free := freeMaskVec.asUInt
228
229  // LoadQueueRAR Query
230  // Load-to-Load violation check condition:
231  // 1. Physical address match by CAM port.
232  // 2. release or nc_with_data is set.
233  // 3. Younger than current load instruction.
234  val ldLdViolation = Wire(Vec(LoadPipelineWidth, Bool()))
235  //val allocatedUInt = RegNext(allocated.asUInt)
236  for ((query, w) <- io.query.zipWithIndex) {
237    ldLdViolation(w) := false.B
238    paddrModule.io.releaseViolationMdata(w) := genPartialPAddr(query.req.bits.paddr)
239
240    query.resp.valid := RegNext(query.req.valid)
241    // Generate real violation mask
242    val robIdxMask = VecInit(uop.map(_.robIdx).map(isAfter(_, query.req.bits.uop.robIdx)))
243    val matchMaskReg = Wire(Vec(LoadQueueRARSize, Bool()))
244    for(i <- 0 until LoadQueueRARSize) {
245      matchMaskReg(i) := (allocated(i) &
246                         paddrModule.io.releaseViolationMmask(w)(i) &
247                         robIdxMask(i) &&
248                         released(i))
249      }
250    val matchMask = GatedValidRegNext(matchMaskReg)
251    //  Load-to-Load violation check result
252    val ldLdViolationMask = matchMask
253    ldLdViolationMask.suggestName("ldLdViolationMask_" + w)
254    query.resp.bits.rep_frm_fetch := ParallelORR(ldLdViolationMask)
255  }
256
257
258  // When io.release.valid (release1cycle.valid), it uses the last ld-ld paddr cam port to
259  // update release flag in 1 cycle
260  val releaseVioMask = Reg(Vec(LoadQueueRARSize, Bool()))
261  when (release1Cycle.valid) {
262    paddrModule.io.releaseMdata.takeRight(1)(0) := genPartialPAddr(release1Cycle.bits.paddr)
263  }
264
265  (0 until LoadQueueRARSize).map(i => {
266    when (RegNext((paddrModule.io.releaseMmask.takeRight(1)(0)(i)) && allocated(i) && release1Cycle.valid)) {
267      // Note: if a load has missed in dcache and is waiting for refill in load queue,
268      // its released flag still needs to be set as true if addr matches.
269      released(i) := true.B
270    }
271  })
272
273  io.lqFull := freeList.io.empty
274
275  // perf cnt
276  val canEnqCount = PopCount(io.query.map(_.req.fire))
277  val validCount = freeList.io.validCount
278  val allowEnqueue = validCount <= (LoadQueueRARSize - LoadPipelineWidth).U
279  val ldLdViolationCount = PopCount(io.query.map(_.resp).map(resp => resp.valid && resp.bits.rep_frm_fetch))
280
281  QueuePerf(LoadQueueRARSize, validCount, !allowEnqueue)
282  XSPerfAccumulate("enq", canEnqCount)
283  XSPerfAccumulate("ld_ld_violation", ldLdViolationCount)
284  val perfEvents: Seq[(String, UInt)] = Seq(
285    ("enq", canEnqCount),
286    ("ld_ld_violation", ldLdViolationCount)
287  )
288  generatePerfEvent()
289  // End
290}