xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/StoreMisalignBuffer.scala (revision 94aa21c6009c2f39c5c5dae9c87260c78887efcc)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.mem
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import utils._
23import utility._
24import xiangshan._
25import xiangshan.backend.fu.FuConfig._
26import xiangshan.backend.fu.fpu.FPU
27import xiangshan.backend.rob.RobLsqIO
28import xiangshan.cache._
29import xiangshan.frontend.FtqPtr
30import xiangshan.ExceptionNO._
31import xiangshan.cache.wpu.ReplayCarry
32import xiangshan.backend.rob.RobPtr
33import xiangshan.backend.Bundles._
34import xiangshan.backend.fu.FuConfig.StaCfg
35import xiangshan.backend.fu.FuType.isVStore
36
37class StoreMisalignBuffer(implicit p: Parameters) extends XSModule
38  with HasCircularQueuePtrHelper
39{
40  private val enqPortNum = StorePipelineWidth
41  private val maxSplitNum = 2
42
43  require(maxSplitNum == 2)
44
45  private val SB = "b00".U(2.W)
46  private val SH = "b01".U(2.W)
47  private val SW = "b10".U(2.W)
48  private val SD = "b11".U(2.W)
49
50  // encode of how many bytes to shift or truncate
51  private val BYTE0 = "b000".U(3.W)
52  private val BYTE1 = "b001".U(3.W)
53  private val BYTE2 = "b010".U(3.W)
54  private val BYTE3 = "b011".U(3.W)
55  private val BYTE4 = "b100".U(3.W)
56  private val BYTE5 = "b101".U(3.W)
57  private val BYTE6 = "b110".U(3.W)
58  private val BYTE7 = "b111".U(3.W)
59
60  def getMask(sizeEncode: UInt) = LookupTree(sizeEncode, List(
61    SB -> 0x1.U,
62    SH -> 0x3.U,
63    SW -> 0xf.U,
64    SD -> 0xff.U
65  ))
66
67  def selectOldest[T <: LsPipelineBundle](valid: Seq[Bool], bits: Seq[T], index: Seq[UInt]): (Seq[Bool], Seq[T], Seq[UInt]) = {
68    assert(valid.length == bits.length)
69    if (valid.length == 0 || valid.length == 1) {
70      (valid, bits, index)
71    } else if (valid.length == 2) {
72      val res = Seq.fill(2)(Wire(ValidIO(chiselTypeOf(bits(0)))))
73      val resIndex = Seq.fill(2)(Wire(chiselTypeOf(index(0))))
74      for (i <- res.indices) {
75        res(i).valid := valid(i)
76        res(i).bits := bits(i)
77        resIndex(i) := index(i)
78      }
79      val oldest = Mux(valid(0) && valid(1),
80        Mux(isAfter(bits(0).uop.robIdx, bits(1).uop.robIdx) ||
81          (isNotBefore(bits(0).uop.robIdx, bits(1).uop.robIdx) && bits(0).uop.uopIdx > bits(1).uop.uopIdx), res(1), res(0)),
82        Mux(valid(0) && !valid(1), res(0), res(1)))
83
84      val oldestIndex = Mux(valid(0) && valid(1),
85        Mux(isAfter(bits(0).uop.robIdx, bits(1).uop.robIdx) ||
86          (bits(0).uop.robIdx === bits(1).uop.robIdx && bits(0).uop.uopIdx > bits(1).uop.uopIdx), resIndex(1), resIndex(0)),
87        Mux(valid(0) && !valid(1), resIndex(0), resIndex(1)))
88      (Seq(oldest.valid), Seq(oldest.bits), Seq(oldestIndex))
89    } else {
90      val left = selectOldest(valid.take(valid.length / 2), bits.take(bits.length / 2), index.take(index.length / 2))
91      val right = selectOldest(valid.takeRight(valid.length - (valid.length / 2)), bits.takeRight(bits.length - (bits.length / 2)), index.takeRight(index.length - (index.length / 2)))
92      selectOldest(left._1 ++ right._1, left._2 ++ right._2, left._3 ++ right._3)
93    }
94  }
95
96  val io = IO(new Bundle() {
97    val redirect        = Flipped(Valid(new Redirect))
98    val req             = Vec(enqPortNum, Flipped(Decoupled(new LsPipelineBundle)))
99    val rob             = Flipped(new RobLsqIO)
100    val splitStoreReq   = Decoupled(new LsPipelineBundle)
101    val splitStoreResp  = Flipped(Valid(new SqWriteBundle))
102    val writeBack       = Decoupled(new MemExuOutput)
103    val vecWriteBack    = Vec(VecStorePipelineWidth, Decoupled(new VecPipelineFeedbackIO(isVStore = true)))
104    val storeOutValid    = Input(Bool())
105    val storeVecOutValid = Input(Bool())
106    val overwriteExpBuf = Output(new XSBundle {
107      val valid = Bool()
108      val vaddr = UInt(XLEN.W)
109      val isHyper = Bool()
110      val gpaddr = UInt(XLEN.W)
111      val isForVSnonLeafPTE = Bool()
112    })
113    val sqControl       = new StoreMaBufToSqControlIO
114
115    val toVecStoreMergeBuffer = Vec(VecStorePipelineWidth, new StoreMaBufToVecStoreMergeBufferIO)
116    val full = Bool()
117  })
118
119  io.rob.mmio := 0.U.asTypeOf(Vec(LoadPipelineWidth, Bool()))
120  io.rob.uop  := 0.U.asTypeOf(Vec(LoadPipelineWidth, new DynInst))
121
122  class StoreMisalignBufferEntry(implicit p: Parameters) extends LsPipelineBundle {
123    val portIndex = UInt(log2Up(enqPortNum).W)
124  }
125  val req_valid = RegInit(false.B)
126  val req = Reg(new StoreMisalignBufferEntry)
127
128  val cross4KBPageBoundary = Wire(Bool())
129  val needFlushPipe = RegInit(false.B)
130
131  // buffer control:
132  //  - s_idle:  Idle
133  //  - s_split: Split miss-aligned store into aligned stores
134  //  - s_req:   Send split store to sta and get result from sta
135  //  - s_resp:  Responds to a split store access request
136  //  - s_wb:    writeback yo rob/vecMergeBuffer
137  //  - s_block: Wait for this instr to reach the head of Rob.
138  val s_idle :: s_split :: s_req :: s_resp :: s_wb :: s_block :: Nil = Enum(6)
139  val bufferState    = RegInit(s_idle)
140
141  // enqueue
142  // s1:
143  val s1_req = VecInit(io.req.map(_.bits))
144  val s1_valid = VecInit(io.req.map(x => x.valid))
145
146  val s1_index = (0 until io.req.length).map(_.asUInt)
147  val reqSel = selectOldest(s1_valid, s1_req, s1_index)
148
149  val reqSelValid = reqSel._1(0)
150  val reqSelBits  = reqSel._2(0)
151  val reqSelPort  = reqSel._3(0)
152
153  val reqRedirect = reqSelBits.uop.robIdx.needFlush(io.redirect)
154
155  val canEnq = !req_valid && !reqRedirect && reqSelValid
156  val robMatch = req_valid && io.rob.pendingst && (io.rob.pendingPtr === req.uop.robIdx)
157
158  when(canEnq) {
159    connectSamePort(req, reqSelBits)
160    req.portIndex := reqSelPort
161    req_valid := true.B
162  }
163  val cross4KBPageEnq = WireInit(false.B)
164  when (cross4KBPageBoundary && !reqRedirect) {
165    when(
166      reqSelValid &&
167      (isAfter(req.uop.robIdx, reqSelBits.uop.robIdx) || (isNotBefore(req.uop.robIdx, reqSelBits.uop.robIdx) && req.uop.uopIdx > reqSelBits.uop.uopIdx)) &&
168      bufferState === s_idle
169    ) {
170      connectSamePort(req, reqSelBits)
171      req.portIndex := reqSelPort
172      cross4KBPageEnq := true.B
173      needFlushPipe   := true.B
174    } .otherwise {
175      req := req
176      cross4KBPageEnq := false.B
177    }
178  }
179
180  val reqSelCanEnq = UIntToOH(reqSelPort)
181
182  io.req.zipWithIndex.map{
183    case (reqPort, index) => reqPort.ready := reqSelCanEnq(index) && (!req_valid || cross4KBPageBoundary && cross4KBPageEnq)
184  }
185
186  io.toVecStoreMergeBuffer.zipWithIndex.map{
187    case (toStMB, index) => {
188      toStMB.flush   := req_valid && cross4KBPageBoundary && cross4KBPageEnq && UIntToOH(req.portIndex)(index)
189      toStMB.mbIndex := req.mbIndex
190    }
191  }
192  io.full := req_valid
193
194  //logic
195  val splitStoreReqs = RegInit(VecInit(List.fill(maxSplitNum)(0.U.asTypeOf(new LsPipelineBundle))))
196  val splitStoreResp = RegInit(VecInit(List.fill(maxSplitNum)(0.U.asTypeOf(new SqWriteBundle))))
197  val isCrossPage    = RegInit(false.B)
198  val exceptionVec   = RegInit(0.U.asTypeOf(ExceptionVec()))
199  val unSentStores   = RegInit(0.U(maxSplitNum.W))
200  val unWriteStores  = RegInit(0.U(maxSplitNum.W))
201  val curPtr = RegInit(0.U(log2Ceil(maxSplitNum).W))
202
203  // if there is exception or mmio in split store
204  val globalException = RegInit(false.B)
205  val globalMMIO = RegInit(false.B)
206
207  val hasException = ExceptionNO.selectByFu(io.splitStoreResp.bits.uop.exceptionVec, StaCfg).asUInt.orR && !io.splitStoreResp.bits.need_rep
208  val isMMIO = io.splitStoreResp.bits.mmio && !io.splitStoreResp.bits.need_rep
209
210  io.sqControl.toStoreQueue.crossPageWithHit := io.sqControl.toStoreMisalignBuffer.sqPtr === req.uop.sqIdx && isCrossPage
211  io.sqControl.toStoreQueue.crossPageCanDeq := !isCrossPage || bufferState === s_block
212  io.sqControl.toStoreQueue.paddr := Cat(splitStoreResp(1).paddr(splitStoreResp(1).paddr.getWidth - 1, 3), 0.U(3.W))
213
214  io.sqControl.toStoreQueue.withSameUop := io.sqControl.toStoreMisalignBuffer.uop.robIdx === req.uop.robIdx && io.sqControl.toStoreMisalignBuffer.uop.uopIdx === req.uop.uopIdx && req.isvec && robMatch && isCrossPage
215
216  //state transition
217  switch(bufferState) {
218    is (s_idle) {
219      when(cross4KBPageBoundary) {
220        when(robMatch) {
221          bufferState := s_split
222          isCrossPage := true.B
223        }
224      } .otherwise {
225        when (req_valid) {
226          bufferState := s_split
227          isCrossPage := false.B
228        }
229      }
230
231    }
232
233    is (s_split) {
234      bufferState := s_req
235    }
236
237    is (s_req) {
238      when (io.splitStoreReq.fire) {
239        bufferState := s_resp
240      }
241    }
242
243    is (s_resp) {
244      when (io.splitStoreResp.valid) {
245        val clearOh = UIntToOH(curPtr)
246        when (hasException || isMMIO) {
247          // commit directly when exception ocurs
248          // if any split store reaches mmio space, delegate to software storeAddrMisaligned exception
249          bufferState := s_wb
250          globalException := hasException
251          globalMMIO := isMMIO
252        } .elsewhen(io.splitStoreResp.bits.need_rep || (unSentStores & (~clearOh).asUInt).orR) {
253          // need replay or still has unsent requests
254          bufferState := s_req
255        } .otherwise {
256          // got result, goto calculate data and control sq
257          bufferState := s_wb
258        }
259      }
260    }
261
262    is (s_wb) {
263      when (req.isvec) {
264        when (io.vecWriteBack.map(x => x.fire).reduce( _ || _)) {
265          bufferState := s_idle
266          req_valid := false.B
267          curPtr := 0.U
268          unSentStores := 0.U
269          unWriteStores := 0.U
270          globalException := false.B
271          globalMMIO := false.B
272          isCrossPage := false.B
273          needFlushPipe := false.B
274        }
275      }
276      when (io.writeBack.fire && (!isCrossPage || globalMMIO || globalException)) {
277        bufferState := s_idle
278        req_valid := false.B
279        curPtr := 0.U
280        unSentStores := 0.U
281        unWriteStores := 0.U
282        globalException := false.B
283        globalMMIO := false.B
284        isCrossPage := false.B
285        needFlushPipe := false.B
286      } .elsewhen(io.writeBack.fire && isCrossPage) {
287        bufferState := s_block
288      } .otherwise {
289        bufferState := s_wb
290      }
291    }
292
293    is (s_block) {
294      when (io.sqControl.toStoreMisalignBuffer.doDeq) {
295        bufferState := s_idle
296        req_valid := false.B
297        curPtr := 0.U
298        unSentStores := 0.U
299        unWriteStores := 0.U
300        globalException := false.B
301        globalMMIO := false.B
302        isCrossPage := false.B
303      }
304    }
305  }
306
307  val alignedType = Mux(req.isvec, req.alignedType(1,0), req.uop.fuOpType(1, 0))
308
309  val highAddress = LookupTree(alignedType, List(
310    SB -> 0.U,
311    SH -> 1.U,
312    SW -> 3.U,
313    SD -> 7.U
314  )) + req.vaddr(4, 0)
315
316  val highPageAddress = LookupTree(alignedType, List(
317    SB -> 0.U,
318    SH -> 1.U,
319    SW -> 3.U,
320    SD -> 7.U
321  )) + req.vaddr(12, 0)
322  // to see if (vaddr + opSize - 1) and vaddr are in the same 16 bytes region
323  val cross16BytesBoundary = req_valid && (highAddress(4) =/= req.vaddr(4))
324  cross4KBPageBoundary := req_valid && (highPageAddress(12) =/= req.vaddr(12))
325  val aligned16BytesAddr   = (req.vaddr >> 4) << 4// req.vaddr & ~("b1111".U)
326  val aligned16BytesSel    = req.vaddr(3, 0)
327
328  // meta of 128 bit store
329  val new128Store = WireInit(0.U.asTypeOf(new LsPipelineBundle))
330  // meta of split loads
331  val lowAddrStore  = WireInit(0.U.asTypeOf(new LsPipelineBundle))
332  val highAddrStore = WireInit(0.U.asTypeOf(new LsPipelineBundle))
333  // final lowResult = Cat(`lowResultWidth` of store data, 0.U(make it to fill total length of Vlen))
334  val lowResultWidth = RegInit(0.U(3.W)) // how many bytes should we take from the store data
335  // final highResult = Zero extend to Vlen(`highResultWidth` of (store data >> lowResultWidth))
336  val highResultWidth = RegInit(0.U(3.W)) // how many bytes should we take from the store data
337
338  when (bufferState === s_split) {
339    when (!cross16BytesBoundary) {
340      assert(false.B, s"There should be no non-aligned access that does not cross 16Byte boundaries.")
341    } .otherwise {
342      // split this unaligned store into `maxSplitNum` aligned stores
343      unWriteStores := Fill(maxSplitNum, 1.U(1.W))
344      unSentStores := Fill(maxSplitNum, 1.U(1.W))
345      curPtr := 0.U
346      lowAddrStore.uop := req.uop
347      lowAddrStore.uop.exceptionVec(storeAddrMisaligned) := false.B
348      highAddrStore.uop := req.uop
349      highAddrStore.uop.exceptionVec(storeAddrMisaligned) := false.B
350
351      switch (alignedType(1, 0)) {
352        is (SB) {
353          assert(false.B, "lb should not trigger miss align")
354        }
355
356        is (SH) {
357          lowAddrStore.uop.fuOpType := SB
358          lowAddrStore.vaddr := req.vaddr
359          lowAddrStore.mask  := 0x1.U << lowAddrStore.vaddr(3, 0)
360          lowResultWidth    := BYTE1
361
362          highAddrStore.uop.fuOpType := SB
363          highAddrStore.vaddr := req.vaddr + 1.U
364          highAddrStore.mask  := 0x1.U << highAddrStore.vaddr(3, 0)
365          highResultWidth    := BYTE1
366        }
367
368        is (SW) {
369          switch (req.vaddr(1, 0)) {
370            is ("b00".U) {
371              assert(false.B, "should not trigger miss align")
372            }
373
374            is ("b01".U) {
375              lowAddrStore.uop.fuOpType := SW
376              lowAddrStore.vaddr := req.vaddr - 1.U
377              lowAddrStore.mask  := 0xf.U << lowAddrStore.vaddr(3, 0)
378              lowResultWidth    := BYTE3
379
380              highAddrStore.uop.fuOpType := SB
381              highAddrStore.vaddr := req.vaddr + 3.U
382              highAddrStore.mask  := 0x1.U << highAddrStore.vaddr(3, 0)
383              highResultWidth    := BYTE1
384            }
385
386            is ("b10".U) {
387              lowAddrStore.uop.fuOpType := SH
388              lowAddrStore.vaddr := req.vaddr
389              lowAddrStore.mask  := 0x3.U << lowAddrStore.vaddr(3, 0)
390              lowResultWidth    := BYTE2
391
392              highAddrStore.uop.fuOpType := SH
393              highAddrStore.vaddr := req.vaddr + 2.U
394              highAddrStore.mask  := 0x3.U << highAddrStore.vaddr(3, 0)
395              highResultWidth    := BYTE2
396            }
397
398            is ("b11".U) {
399              lowAddrStore.uop.fuOpType := SB
400              lowAddrStore.vaddr := req.vaddr
401              lowAddrStore.mask  := 0x1.U << lowAddrStore.vaddr(3, 0)
402              lowResultWidth    := BYTE1
403
404              highAddrStore.uop.fuOpType := SW
405              highAddrStore.vaddr := req.vaddr + 1.U
406              highAddrStore.mask  := 0xf.U << highAddrStore.vaddr(3, 0)
407              highResultWidth    := BYTE3
408            }
409          }
410        }
411
412        is (SD) {
413          switch (req.vaddr(2, 0)) {
414            is ("b000".U) {
415              assert(false.B, "should not trigger miss align")
416            }
417
418            is ("b001".U) {
419              lowAddrStore.uop.fuOpType := SD
420              lowAddrStore.vaddr := req.vaddr - 1.U
421              lowAddrStore.mask  := 0xff.U << lowAddrStore.vaddr(3, 0)
422              lowResultWidth    := BYTE7
423
424              highAddrStore.uop.fuOpType := SB
425              highAddrStore.vaddr := req.vaddr + 7.U
426              highAddrStore.mask  := 0x1.U << highAddrStore.vaddr(3, 0)
427              highResultWidth    := BYTE1
428            }
429
430            is ("b010".U) {
431              lowAddrStore.uop.fuOpType := SD
432              lowAddrStore.vaddr := req.vaddr - 2.U
433              lowAddrStore.mask  := 0xff.U << lowAddrStore.vaddr(3, 0)
434              lowResultWidth    := BYTE6
435
436              highAddrStore.uop.fuOpType := SH
437              highAddrStore.vaddr := req.vaddr + 6.U
438              highAddrStore.mask  := 0x3.U << highAddrStore.vaddr(3, 0)
439              highResultWidth    := BYTE2
440            }
441
442            is ("b011".U) {
443              lowAddrStore.uop.fuOpType := SD
444              lowAddrStore.vaddr := req.vaddr - 3.U
445              lowAddrStore.mask  := 0xff.U << lowAddrStore.vaddr(3, 0)
446              lowResultWidth    := BYTE5
447
448              highAddrStore.uop.fuOpType := SW
449              highAddrStore.vaddr := req.vaddr + 5.U
450              highAddrStore.mask  := 0xf.U << highAddrStore.vaddr(3, 0)
451              highResultWidth    := BYTE3
452            }
453
454            is ("b100".U) {
455              lowAddrStore.uop.fuOpType := SW
456              lowAddrStore.vaddr := req.vaddr
457              lowAddrStore.mask  := 0xf.U << lowAddrStore.vaddr(3, 0)
458              lowResultWidth    := BYTE4
459
460              highAddrStore.uop.fuOpType := SW
461              highAddrStore.vaddr := req.vaddr + 4.U
462              highAddrStore.mask  := 0xf.U << highAddrStore.vaddr(3, 0)
463              highResultWidth    := BYTE4
464            }
465
466            is ("b101".U) {
467              lowAddrStore.uop.fuOpType := SD
468              lowAddrStore.vaddr := req.vaddr - 5.U
469              lowAddrStore.mask  := 0xff.U << lowAddrStore.vaddr(3, 0)
470              lowResultWidth    := BYTE3
471
472              highAddrStore.uop.fuOpType := SD
473              highAddrStore.vaddr := req.vaddr + 3.U
474              highAddrStore.mask  := 0xff.U << highAddrStore.vaddr(3, 0)
475              highResultWidth    := BYTE5
476            }
477
478            is ("b110".U) {
479              lowAddrStore.uop.fuOpType := SD
480              lowAddrStore.vaddr := req.vaddr - 6.U
481              lowAddrStore.mask  := 0xff.U << lowAddrStore.vaddr(3, 0)
482              lowResultWidth    := BYTE2
483
484              highAddrStore.uop.fuOpType := SD
485              highAddrStore.vaddr := req.vaddr + 2.U
486              highAddrStore.mask  := 0xff.U << highAddrStore.vaddr(3, 0)
487              highResultWidth    := BYTE6
488            }
489
490            is ("b111".U) {
491              lowAddrStore.uop.fuOpType := SD
492              lowAddrStore.vaddr := req.vaddr - 7.U
493              lowAddrStore.mask  := 0xff.U << lowAddrStore.vaddr(3, 0)
494              lowResultWidth    := BYTE1
495
496              highAddrStore.uop.fuOpType := SD
497              highAddrStore.vaddr := req.vaddr + 1.U
498              highAddrStore.mask  := 0xff.U << highAddrStore.vaddr(3, 0)
499              highResultWidth    := BYTE7
500            }
501          }
502        }
503      }
504
505      splitStoreReqs(0) := lowAddrStore
506      splitStoreReqs(1) := highAddrStore
507    }
508  }
509
510  io.splitStoreReq.valid := req_valid && (bufferState === s_req)
511  io.splitStoreReq.bits  := splitStoreReqs(curPtr)
512  io.splitStoreReq.bits.isvec  := req.isvec
513  // Restore the information of H extension store
514  // bit encoding: | hsv 1 | store 00 | size(2bit) |
515  val reqIsHsv  = LSUOpType.isHsv(req.uop.fuOpType)
516  io.splitStoreReq.bits.uop.fuOpType := Mux(req.isvec, req.uop.fuOpType, Cat(reqIsHsv, 0.U(2.W), splitStoreReqs(curPtr).uop.fuOpType(1, 0)))
517  io.splitStoreReq.bits.alignedType  := Mux(req.isvec, splitStoreReqs(curPtr).uop.fuOpType(1, 0), req.alignedType)
518  io.splitStoreReq.bits.isFinalSplit := curPtr(0)
519
520  when (io.splitStoreResp.valid) {
521    val resp = io.splitStoreResp.bits
522    splitStoreResp(curPtr) := io.splitStoreResp.bits
523    when (isMMIO) {
524      unWriteStores := 0.U
525      unSentStores := 0.U
526      exceptionVec := ExceptionNO.selectByFu(0.U.asTypeOf(exceptionVec.cloneType), StaCfg)
527      // delegate to software
528      exceptionVec(storeAddrMisaligned) := true.B
529    } .elsewhen (hasException) {
530      unWriteStores := 0.U
531      unSentStores := 0.U
532      StaCfg.exceptionOut.map(no => exceptionVec(no) := exceptionVec(no) || resp.uop.exceptionVec(no))
533    } .elsewhen (!io.splitStoreResp.bits.need_rep) {
534      unSentStores := unSentStores & (~UIntToOH(curPtr)).asUInt
535      curPtr := curPtr + 1.U
536      exceptionVec := 0.U.asTypeOf(ExceptionVec())
537    }
538  }
539
540  val splitStoreData = RegInit(VecInit(List.fill(maxSplitNum)(0.U.asTypeOf(new XSBundle {
541    val wdata = UInt(VLEN.W)
542    val wmask = UInt((VLEN / 8).W)
543  }))))
544
545  val wmaskLow  = Wire(Vec(VLEN / 8, Bool()))
546  val wmaskHigh = Wire(Vec(VLEN / 8, Bool()))
547  (0 until (VLEN / 8)).map {
548    case i  => {
549      when (i.U < highResultWidth) {
550        wmaskHigh(i) := true.B
551      } .otherwise {
552        wmaskHigh(i) := false.B
553      }
554      when (i.U < lowResultWidth) {
555        wmaskLow(i) := true.B
556      } .otherwise {
557        wmaskLow(i) := false.B
558      }
559    }
560  }
561
562  io.writeBack.valid := req_valid && (bufferState === s_wb) && !io.storeOutValid && !req.isvec
563  io.writeBack.bits.uop := req.uop
564  io.writeBack.bits.uop.exceptionVec := DontCare
565  StaCfg.exceptionOut.map(no => io.writeBack.bits.uop.exceptionVec(no) := (globalMMIO || globalException) && exceptionVec(no))
566  io.writeBack.bits.uop.flushPipe := needFlushPipe
567  io.writeBack.bits.uop.replayInst := false.B
568  io.writeBack.bits.data := DontCare
569  io.writeBack.bits.isFromLoadUnit := DontCare
570  io.writeBack.bits.debug.isMMIO := globalMMIO
571  // FIXME lyq: temporarily set to false
572  io.writeBack.bits.debug.isNC := false.B
573  io.writeBack.bits.debug.isPerfCnt := false.B
574  io.writeBack.bits.debug.paddr := req.paddr
575  io.writeBack.bits.debug.vaddr := req.vaddr
576
577  io.vecWriteBack.zipWithIndex.map{
578    case (wb, index) => {
579      wb.valid := req_valid && (bufferState === s_wb) && req.isvec && !io.storeVecOutValid && UIntToOH(req.portIndex)(index)
580
581      wb.bits.mBIndex           := req.mbIndex
582      wb.bits.hit               := true.B
583      wb.bits.isvec             := true.B
584      wb.bits.sourceType        := RSFeedbackType.tlbMiss
585      wb.bits.flushState        := DontCare
586      wb.bits.trigger           := TriggerAction.None
587      wb.bits.mmio              := globalMMIO
588      wb.bits.exceptionVec      := ExceptionNO.selectByFu(exceptionVec, VstuCfg)
589      wb.bits.usSecondInv       := req.usSecondInv
590      wb.bits.vecFeedback       := true.B
591      wb.bits.elemIdx           := req.elemIdx
592      wb.bits.alignedType       := req.alignedType
593      wb.bits.mask              := req.mask
594      wb.bits.vaddr             := req.vaddr
595      wb.bits.vaNeedExt         := req.vaNeedExt
596      wb.bits.gpaddr            := req.gpaddr
597      wb.bits.isForVSnonLeafPTE := req.isForVSnonLeafPTE
598      wb.bits.vstart            := req.uop.vpu.vstart
599      wb.bits.vecTriggerMask    := 0.U
600      wb.bits.nc                := false.B
601    }
602  }
603
604  val flush = req_valid && req.uop.robIdx.needFlush(io.redirect)
605
606  when (flush) {
607    bufferState := s_idle
608    req_valid := Mux(cross4KBPageEnq && cross4KBPageBoundary && !reqRedirect, req_valid, false.B)
609    curPtr := 0.U
610    unSentStores := 0.U
611    unWriteStores := 0.U
612    globalException := false.B
613    globalMMIO := false.B
614    isCrossPage := false.B
615    needFlushPipe := false.B
616  }
617
618  // NOTE: spectial case (unaligned store cross page, page fault happens in next page)
619  // if exception happens in the higher page address part, overwrite the storeExceptionBuffer vaddr
620  val shouldOverwrite = req_valid && cross16BytesBoundary && globalException && (curPtr === 1.U)
621  val overwriteExpBuf = GatedValidRegNext(shouldOverwrite)
622  val overwriteVaddr = RegEnable(splitStoreResp(curPtr).vaddr, shouldOverwrite)
623  val overwriteIsHyper = RegEnable(splitStoreResp(curPtr).isHyper, shouldOverwrite)
624  val overwriteGpaddr = RegEnable(splitStoreResp(curPtr).gpaddr, shouldOverwrite)
625  val overwriteIsForVSnonLeafPTE = RegEnable(splitStoreResp(curPtr).isForVSnonLeafPTE, shouldOverwrite)
626
627  //TODO In theory, there is no need to overwrite, but for now, the signal is retained in the code in this way.
628  // and the signal will be removed after sufficient verification.
629  io.overwriteExpBuf.valid := false.B
630  io.overwriteExpBuf.vaddr := overwriteVaddr
631  io.overwriteExpBuf.isHyper := overwriteIsHyper
632  io.overwriteExpBuf.gpaddr := overwriteGpaddr
633  io.overwriteExpBuf.isForVSnonLeafPTE := overwriteIsForVSnonLeafPTE
634
635  XSPerfAccumulate("alloc",                  RegNext(!req_valid) && req_valid)
636  XSPerfAccumulate("flush",                  flush)
637  XSPerfAccumulate("flush_idle",             flush && (bufferState === s_idle))
638  XSPerfAccumulate("flush_non_idle",         flush && (bufferState =/= s_idle))
639}