xref: /XiangShan/src/main/scala/xiangshan/mem/pipeline/AtomicsUnit.scala (revision deb6421e9ab9b7980dc6c429456fc7bd2161357b)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.mem
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import utils._
23import xiangshan._
24import xiangshan.cache.{AtomicWordIO, MemoryOpConstants, HasDCacheParameters}
25import xiangshan.cache.mmu.{TlbCmd, TlbRequestIO}
26import difftest._
27import xiangshan.ExceptionNO._
28import xiangshan.backend.fu.PMPRespBundle
29
30class AtomicsUnit(implicit p: Parameters) extends XSModule with MemoryOpConstants with HasDCacheParameters{
31  val io = IO(new Bundle() {
32    val hartId = Input(UInt(8.W))
33    val in            = Flipped(Decoupled(new ExuInput))
34    val storeDataIn   = Flipped(Valid(new ExuOutput)) // src2 from rs
35    val out           = Decoupled(new ExuOutput)
36    val dcache        = new AtomicWordIO
37    val dtlb          = new TlbRequestIO(2)
38    val pmpResp       = Flipped(new PMPRespBundle())
39    val rsIdx         = Input(UInt(log2Up(IssQueSize).W))
40    val flush_sbuffer = new SbufferFlushBundle
41    val feedbackSlow  = ValidIO(new RSFeedback)
42    val redirect      = Flipped(ValidIO(new Redirect))
43    val exceptionAddr = ValidIO(UInt(VAddrBits.W))
44    val csrCtrl       = Flipped(new CustomCSRCtrlIO)
45  })
46
47  //-------------------------------------------------------
48  // Atomics Memory Accsess FSM
49  //-------------------------------------------------------
50  val s_invalid :: s_tlb_and_flush_sbuffer_req :: s_pm :: s_wait_flush_sbuffer_resp :: s_cache_req :: s_cache_resp :: s_cache_resp_latch :: s_finish :: Nil = Enum(8)
51  val state = RegInit(s_invalid)
52  val out_valid = RegInit(false.B)
53  val data_valid = RegInit(false.B)
54  val in = Reg(new ExuInput())
55  val exceptionVec = RegInit(0.U.asTypeOf(ExceptionVec()))
56  val atom_override_xtval = RegInit(false.B)
57  val isLr = in.uop.ctrl.fuOpType === LSUOpType.lr_w || in.uop.ctrl.fuOpType === LSUOpType.lr_d
58  // paddr after translation
59  val paddr = Reg(UInt())
60  val vaddr = in.src(0)
61  val is_mmio = Reg(Bool())
62  // pmp check
63  val static_pm = Reg(Valid(Bool())) // valid for static, bits for mmio
64  // dcache response data
65  val resp_data = Reg(UInt())
66  val resp_data_wire = WireInit(0.U)
67  val is_lrsc_valid = Reg(Bool())
68  // sbuffer is empty or not
69  val sbuffer_empty = io.flush_sbuffer.empty
70
71
72  // Difftest signals
73  val paddr_reg = Reg(UInt(64.W))
74  val data_reg = Reg(UInt(64.W))
75  val mask_reg = Reg(UInt(8.W))
76  val fuop_reg = Reg(UInt(8.W))
77
78  io.exceptionAddr.valid := atom_override_xtval
79  io.exceptionAddr.bits  := in.src(0)
80
81  // assign default value to output signals
82  io.in.ready          := false.B
83
84  io.dcache.req.valid  := false.B
85  io.dcache.req.bits   := DontCare
86
87  io.dtlb.req.valid    := false.B
88  io.dtlb.req.bits     := DontCare
89  io.dtlb.req_kill     := false.B
90  io.dtlb.resp.ready   := true.B
91
92  io.flush_sbuffer.valid := false.B
93
94  XSDebug("state: %d\n", state)
95
96  when (state === s_invalid) {
97    io.in.ready := true.B
98    when (io.in.fire) {
99      in := io.in.bits
100      in.src(1) := in.src(1) // leave src2 unchanged
101      state := s_tlb_and_flush_sbuffer_req
102    }
103  }
104
105  when (io.storeDataIn.fire) {
106    in.src(1) := io.storeDataIn.bits.data
107    data_valid := true.B
108  }
109
110  assert(!(io.storeDataIn.fire && data_valid), "atomic unit re-receive data")
111
112  // Send TLB feedback to store issue queue
113  // we send feedback right after we receives request
114  // also, we always treat amo as tlb hit
115  // since we will continue polling tlb all by ourself
116  io.feedbackSlow.valid       := RegNext(RegNext(io.in.valid))
117  io.feedbackSlow.bits.hit    := true.B
118  io.feedbackSlow.bits.rsIdx  := RegEnable(io.rsIdx, io.in.valid)
119  io.feedbackSlow.bits.flushState := DontCare
120  io.feedbackSlow.bits.sourceType := DontCare
121  io.feedbackSlow.bits.dataInvalidSqIdx := DontCare
122
123  // tlb translation, manipulating signals && deal with exception
124  // at the same time, flush sbuffer
125  when (state === s_tlb_and_flush_sbuffer_req) {
126    // send req to dtlb
127    // keep firing until tlb hit
128    io.dtlb.req.valid       := true.B
129    io.dtlb.req.bits.vaddr  := in.src(0)
130    io.dtlb.resp.ready      := true.B
131    io.dtlb.req.bits.cmd    := Mux(isLr, TlbCmd.atom_read, TlbCmd.atom_write)
132    io.dtlb.req.bits.debug.pc := in.uop.cf.pc
133    io.dtlb.req.bits.debug.isFirstIssue := false.B
134
135    // send req to sbuffer to flush it if it is not empty
136    io.flush_sbuffer.valid := Mux(sbuffer_empty, false.B, true.B)
137
138    when(io.dtlb.resp.fire){
139      paddr := io.dtlb.resp.bits.paddr(0)
140      // exception handling
141      val addrAligned = LookupTree(in.uop.ctrl.fuOpType(1,0), List(
142        "b00".U   -> true.B,              //b
143        "b01".U   -> (in.src(0)(0) === 0.U),   //h
144        "b10".U   -> (in.src(0)(1,0) === 0.U), //w
145        "b11".U   -> (in.src(0)(2,0) === 0.U)  //d
146      ))
147      exceptionVec(loadAddrMisaligned)  := !addrAligned && isLr
148      exceptionVec(storeAddrMisaligned) := !addrAligned && !isLr
149      exceptionVec(storePageFault)      := io.dtlb.resp.bits.excp(0).pf.st
150      exceptionVec(loadPageFault)       := io.dtlb.resp.bits.excp(0).pf.ld
151      exceptionVec(storeAccessFault)    := io.dtlb.resp.bits.excp(0).af.st
152      exceptionVec(loadAccessFault)     := io.dtlb.resp.bits.excp(0).af.ld
153      static_pm := io.dtlb.resp.bits.static_pm
154
155      when (!io.dtlb.resp.bits.miss) {
156        when (!addrAligned) {
157          // NOTE: when addrAligned, do not need to wait tlb actually
158          // check for miss aligned exceptions, tlb exception are checked next cycle for timing
159          // if there are exceptions, no need to execute it
160          state := s_finish
161          out_valid := true.B
162          atom_override_xtval := true.B
163        } .otherwise {
164          state := s_pm
165        }
166      }
167    }
168  }
169
170  when (state === s_pm) {
171    val pmp = WireInit(io.pmpResp)
172    when (static_pm.valid) {
173      pmp.ld := false.B
174      pmp.st := false.B
175      pmp.instr := false.B
176      pmp.mmio := static_pm.bits
177    }
178    is_mmio := pmp.mmio
179    // NOTE: only handle load/store exception here, if other exception happens, don't send here
180    val exception_va = exceptionVec(storePageFault) || exceptionVec(loadPageFault) ||
181      exceptionVec(storeAccessFault) || exceptionVec(loadAccessFault)
182    val exception_pa = pmp.st || pmp.ld
183    when (exception_va || exception_pa) {
184      state := s_finish
185      out_valid := true.B
186      atom_override_xtval := true.B
187    }.otherwise {
188      // if sbuffer has been flushed, go to query dcache, otherwise wait for sbuffer.
189      state := Mux(sbuffer_empty, s_cache_req, s_wait_flush_sbuffer_resp);
190    }
191    // update storeAccessFault bit
192    exceptionVec(loadAccessFault) := exceptionVec(loadAccessFault) || pmp.ld && isLr
193    exceptionVec(storeAccessFault) := exceptionVec(storeAccessFault) || pmp.st || pmp.ld && !isLr
194  }
195
196  when (state === s_wait_flush_sbuffer_resp) {
197    when (sbuffer_empty) {
198      state := s_cache_req
199    }
200  }
201
202  when (state === s_cache_req) {
203    val pipe_req = io.dcache.req.bits
204    pipe_req := DontCare
205
206    pipe_req.cmd := LookupTree(in.uop.ctrl.fuOpType, List(
207      LSUOpType.lr_w      -> M_XLR,
208      LSUOpType.sc_w      -> M_XSC,
209      LSUOpType.amoswap_w -> M_XA_SWAP,
210      LSUOpType.amoadd_w  -> M_XA_ADD,
211      LSUOpType.amoxor_w  -> M_XA_XOR,
212      LSUOpType.amoand_w  -> M_XA_AND,
213      LSUOpType.amoor_w   -> M_XA_OR,
214      LSUOpType.amomin_w  -> M_XA_MIN,
215      LSUOpType.amomax_w  -> M_XA_MAX,
216      LSUOpType.amominu_w -> M_XA_MINU,
217      LSUOpType.amomaxu_w -> M_XA_MAXU,
218
219      LSUOpType.lr_d      -> M_XLR,
220      LSUOpType.sc_d      -> M_XSC,
221      LSUOpType.amoswap_d -> M_XA_SWAP,
222      LSUOpType.amoadd_d  -> M_XA_ADD,
223      LSUOpType.amoxor_d  -> M_XA_XOR,
224      LSUOpType.amoand_d  -> M_XA_AND,
225      LSUOpType.amoor_d   -> M_XA_OR,
226      LSUOpType.amomin_d  -> M_XA_MIN,
227      LSUOpType.amomax_d  -> M_XA_MAX,
228      LSUOpType.amominu_d -> M_XA_MINU,
229      LSUOpType.amomaxu_d -> M_XA_MAXU
230    ))
231    pipe_req.miss := false.B
232    pipe_req.probe := false.B
233    pipe_req.probe_need_data := false.B
234    pipe_req.source := AMO_SOURCE.U
235    pipe_req.addr   := get_block_addr(paddr)
236    pipe_req.vaddr  := get_block_addr(in.src(0)) // vaddr
237    pipe_req.word_idx  := get_word(paddr)
238    pipe_req.amo_data  := genWdata(in.src(1), in.uop.ctrl.fuOpType(1,0))
239    pipe_req.amo_mask  := genWmask(paddr, in.uop.ctrl.fuOpType(1,0))
240
241    io.dcache.req.valid := Mux(
242      io.dcache.req.bits.cmd === M_XLR,
243      !io.dcache.block_lr, // block lr to survive in lr storm
244      data_valid // wait until src(1) is ready
245    )
246
247    when(io.dcache.req.fire){
248      state := s_cache_resp
249      paddr_reg := paddr
250      data_reg := io.dcache.req.bits.amo_data
251      mask_reg := io.dcache.req.bits.amo_mask
252      fuop_reg := in.uop.ctrl.fuOpType
253    }
254  }
255
256  val dcache_resp_data  = Reg(UInt())
257  val dcache_resp_id    = Reg(UInt())
258  val dcache_resp_error = Reg(Bool())
259
260  when (state === s_cache_resp) {
261    // when not miss
262    // everything is OK, simply send response back to sbuffer
263    // when miss and not replay
264    // wait for missQueue to handling miss and replaying our request
265    // when miss and replay
266    // req missed and fail to enter missQueue, manually replay it later
267    // TODO: add assertions:
268    // 1. add a replay delay counter?
269    // 2. when req gets into MissQueue, it should not miss any more
270    when(io.dcache.resp.fire()) {
271      when(io.dcache.resp.bits.miss) {
272        when(io.dcache.resp.bits.replay) {
273          state := s_cache_req
274        }
275      } .otherwise {
276        dcache_resp_data := io.dcache.resp.bits.data
277        dcache_resp_id := io.dcache.resp.bits.id
278        dcache_resp_error := io.dcache.resp.bits.error
279        state := s_cache_resp_latch
280      }
281    }
282  }
283
284  when (state === s_cache_resp_latch) {
285    is_lrsc_valid :=  dcache_resp_id
286    val rdataSel = LookupTree(paddr(2, 0), List(
287      "b000".U -> dcache_resp_data(63, 0),
288      "b001".U -> dcache_resp_data(63, 8),
289      "b010".U -> dcache_resp_data(63, 16),
290      "b011".U -> dcache_resp_data(63, 24),
291      "b100".U -> dcache_resp_data(63, 32),
292      "b101".U -> dcache_resp_data(63, 40),
293      "b110".U -> dcache_resp_data(63, 48),
294      "b111".U -> dcache_resp_data(63, 56)
295    ))
296
297    resp_data_wire := LookupTree(in.uop.ctrl.fuOpType, List(
298      LSUOpType.lr_w      -> SignExt(rdataSel(31, 0), XLEN),
299      LSUOpType.sc_w      -> dcache_resp_data,
300      LSUOpType.amoswap_w -> SignExt(rdataSel(31, 0), XLEN),
301      LSUOpType.amoadd_w  -> SignExt(rdataSel(31, 0), XLEN),
302      LSUOpType.amoxor_w  -> SignExt(rdataSel(31, 0), XLEN),
303      LSUOpType.amoand_w  -> SignExt(rdataSel(31, 0), XLEN),
304      LSUOpType.amoor_w   -> SignExt(rdataSel(31, 0), XLEN),
305      LSUOpType.amomin_w  -> SignExt(rdataSel(31, 0), XLEN),
306      LSUOpType.amomax_w  -> SignExt(rdataSel(31, 0), XLEN),
307      LSUOpType.amominu_w -> SignExt(rdataSel(31, 0), XLEN),
308      LSUOpType.amomaxu_w -> SignExt(rdataSel(31, 0), XLEN),
309
310      LSUOpType.lr_d      -> SignExt(rdataSel(63, 0), XLEN),
311      LSUOpType.sc_d      -> dcache_resp_data,
312      LSUOpType.amoswap_d -> SignExt(rdataSel(63, 0), XLEN),
313      LSUOpType.amoadd_d  -> SignExt(rdataSel(63, 0), XLEN),
314      LSUOpType.amoxor_d  -> SignExt(rdataSel(63, 0), XLEN),
315      LSUOpType.amoand_d  -> SignExt(rdataSel(63, 0), XLEN),
316      LSUOpType.amoor_d   -> SignExt(rdataSel(63, 0), XLEN),
317      LSUOpType.amomin_d  -> SignExt(rdataSel(63, 0), XLEN),
318      LSUOpType.amomax_d  -> SignExt(rdataSel(63, 0), XLEN),
319      LSUOpType.amominu_d -> SignExt(rdataSel(63, 0), XLEN),
320      LSUOpType.amomaxu_d -> SignExt(rdataSel(63, 0), XLEN)
321    ))
322
323    when (dcache_resp_error && io.csrCtrl.cache_error_enable) {
324      exceptionVec(loadAccessFault)  := isLr
325      exceptionVec(storeAccessFault) := !isLr
326      assert(!exceptionVec(loadAccessFault))
327      assert(!exceptionVec(storeAccessFault))
328    }
329
330    resp_data := resp_data_wire
331    state := s_finish
332    out_valid := true.B
333  }
334
335  io.out.valid := out_valid
336  XSError((state === s_finish) =/= out_valid, "out_valid reg error\n")
337  io.out.bits := DontCare
338  io.out.bits.uop := in.uop
339  io.out.bits.uop.cf.exceptionVec := exceptionVec
340  io.out.bits.data := resp_data
341  io.out.bits.redirectValid := false.B
342  io.out.bits.debug.isMMIO := is_mmio
343  io.out.bits.debug.paddr := paddr
344  when (io.out.fire) {
345    XSDebug("atomics writeback: pc %x data %x\n", io.out.bits.uop.cf.pc, io.dcache.resp.bits.data)
346    state := s_invalid
347    out_valid := false.B
348  }
349
350  when (state === s_finish) {
351    data_valid := false.B
352  }
353
354  when (io.redirect.valid) {
355    atom_override_xtval := false.B
356  }
357
358  // atomic trigger
359  val csrCtrl = io.csrCtrl
360  val tdata = Reg(Vec(6, new MatchTriggerIO))
361  val tEnable = RegInit(VecInit(Seq.fill(6)(false.B)))
362  val en = csrCtrl.trigger_enable
363  tEnable := VecInit(en(2), en (3), en(7), en(4), en(5), en(9))
364  when(csrCtrl.mem_trigger.t.valid) {
365    tdata(csrCtrl.mem_trigger.t.bits.addr) := csrCtrl.mem_trigger.t.bits.tdata
366  }
367  val lTriggerMapping = Map(0 -> 2, 1 -> 3, 2 -> 5)
368  val sTriggerMapping = Map(0 -> 0, 1 -> 1, 2 -> 4)
369
370  val backendTriggerHitReg = Reg(Vec(6, Bool()))
371  backendTriggerHitReg := VecInit(Seq.fill(6)(false.B))
372
373  when(state === s_cache_req){
374    // store trigger
375    val store_hit = Wire(Vec(3, Bool()))
376    for (j <- 0 until 3) {
377        store_hit(j) := !tdata(sTriggerMapping(j)).select && TriggerCmp(
378          vaddr,
379          tdata(sTriggerMapping(j)).tdata2,
380          tdata(sTriggerMapping(j)).matchType,
381          tEnable(sTriggerMapping(j))
382        )
383       backendTriggerHitReg(sTriggerMapping(j)) := store_hit(j)
384     }
385
386    when(tdata(0).chain) {
387      backendTriggerHitReg(0) := store_hit(0) && store_hit(1)
388      backendTriggerHitReg(1) := store_hit(0) && store_hit(1)
389    }
390
391    when(!in.uop.cf.trigger.backendEn(0)) {
392      backendTriggerHitReg(4) := false.B
393    }
394
395    // load trigger
396    val load_hit = Wire(Vec(3, Bool()))
397    for (j <- 0 until 3) {
398
399      val addrHit = TriggerCmp(
400        vaddr,
401        tdata(lTriggerMapping(j)).tdata2,
402        tdata(lTriggerMapping(j)).matchType,
403        tEnable(lTriggerMapping(j))
404      )
405      load_hit(j) := addrHit && !tdata(lTriggerMapping(j)).select
406      backendTriggerHitReg(lTriggerMapping(j)) := load_hit(j)
407    }
408    when(tdata(2).chain) {
409      backendTriggerHitReg(2) := load_hit(0) && load_hit(1)
410      backendTriggerHitReg(3) := load_hit(0) && load_hit(1)
411    }
412    when(!in.uop.cf.trigger.backendEn(1)) {
413      backendTriggerHitReg(5) := false.B
414    }
415  }
416
417  // addr trigger do cmp at s_cache_req
418  // trigger result is used at s_finish
419  // thus we can delay it safely
420  io.out.bits.uop.cf.trigger.backendHit := VecInit(Seq.fill(6)(false.B))
421  when(isLr){
422    // enable load trigger
423    io.out.bits.uop.cf.trigger.backendHit(2) := backendTriggerHitReg(2)
424    io.out.bits.uop.cf.trigger.backendHit(3) := backendTriggerHitReg(3)
425    io.out.bits.uop.cf.trigger.backendHit(5) := backendTriggerHitReg(5)
426  }.otherwise{
427    // enable store trigger
428    io.out.bits.uop.cf.trigger.backendHit(0) := backendTriggerHitReg(0)
429    io.out.bits.uop.cf.trigger.backendHit(1) := backendTriggerHitReg(1)
430    io.out.bits.uop.cf.trigger.backendHit(4) := backendTriggerHitReg(4)
431  }
432
433  if (env.EnableDifftest) {
434    val difftest = Module(new DifftestAtomicEvent)
435    difftest.io.clock      := clock
436    difftest.io.coreid     := io.hartId
437    difftest.io.atomicResp := state === s_cache_resp_latch
438    difftest.io.atomicAddr := paddr_reg
439    difftest.io.atomicData := data_reg
440    difftest.io.atomicMask := mask_reg
441    difftest.io.atomicFuop := fuop_reg
442    difftest.io.atomicOut  := resp_data_wire
443  }
444
445  if (env.EnableDifftest || env.AlwaysBasicDiff) {
446    val uop = io.out.bits.uop
447    val difftest = Module(new DifftestLrScEvent)
448    difftest.io.clock := clock
449    difftest.io.coreid := io.hartId
450    difftest.io.valid := io.out.fire &&
451      (uop.ctrl.fuOpType === LSUOpType.sc_d || uop.ctrl.fuOpType === LSUOpType.sc_w)
452    difftest.io.success := is_lrsc_valid
453  }
454}
455