1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.mem 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import utility._ 24import xiangshan._ 25import xiangshan.cache.{AtomicWordIO, HasDCacheParameters, MemoryOpConstants} 26import xiangshan.cache.mmu.{TlbCmd, TlbRequestIO} 27import difftest._ 28import xiangshan.ExceptionNO._ 29import xiangshan.backend.fu.PMPRespBundle 30import xiangshan.backend.Bundles.{MemExuInput, MemExuOutput} 31import xiangshan.backend.fu.util.SdtrigExt 32 33class AtomicsUnit(implicit p: Parameters) extends XSModule 34 with MemoryOpConstants 35 with HasDCacheParameters 36 with SdtrigExt{ 37 val io = IO(new Bundle() { 38 val hartId = Input(UInt(hartIdLen.W)) 39 val in = Flipped(Decoupled(new MemExuInput)) 40 val storeDataIn = Flipped(Valid(new MemExuOutput)) // src2 from rs 41 val out = Decoupled(new MemExuOutput) 42 val dcache = new AtomicWordIO 43 val dtlb = new TlbRequestIO(2) 44 val pmpResp = Flipped(new PMPRespBundle()) 45 val flush_sbuffer = new SbufferFlushBundle 46 val feedbackSlow = ValidIO(new RSFeedback) 47 val redirect = Flipped(ValidIO(new Redirect)) 48 val exceptionAddr = ValidIO(new Bundle { 49 val vaddr = UInt(VAddrBits.W) 50 val gpaddr = UInt(GPAddrBits.W) 51 }) 52 val csrCtrl = Flipped(new CustomCSRCtrlIO) 53 }) 54 55 //------------------------------------------------------- 56 // Atomics Memory Accsess FSM 57 //------------------------------------------------------- 58 val s_invalid :: s_tlb_and_flush_sbuffer_req :: s_pm :: s_wait_flush_sbuffer_resp :: s_cache_req :: s_cache_resp :: s_cache_resp_latch :: s_finish :: Nil = Enum(8) 59 val state = RegInit(s_invalid) 60 val out_valid = RegInit(false.B) 61 val data_valid = RegInit(false.B) 62 val in = Reg(new MemExuInput()) 63 val exceptionVec = RegInit(0.U.asTypeOf(ExceptionVec())) 64 val atom_override_xtval = RegInit(false.B) 65 val have_sent_first_tlb_req = RegInit(false.B) 66 val isLr = in.uop.fuOpType === LSUOpType.lr_w || in.uop.fuOpType === LSUOpType.lr_d 67 // paddr after translation 68 val paddr = Reg(UInt()) 69 val gpaddr = Reg(UInt()) 70 val vaddr = in.src(0) 71 val is_mmio = Reg(Bool()) 72 73 // dcache response data 74 val resp_data = Reg(UInt()) 75 val resp_data_wire = WireInit(0.U) 76 val is_lrsc_valid = Reg(Bool()) 77 // sbuffer is empty or not 78 val sbuffer_empty = io.flush_sbuffer.empty 79 80 81 // Difftest signals 82 val paddr_reg = Reg(UInt(64.W)) 83 val data_reg = Reg(UInt(64.W)) 84 val mask_reg = Reg(UInt(8.W)) 85 val fuop_reg = Reg(UInt(8.W)) 86 87 io.exceptionAddr.valid := atom_override_xtval 88 io.exceptionAddr.bits.vaddr := in.src(0) 89 io.exceptionAddr.bits.gpaddr := gpaddr 90 91 // assign default value to output signals 92 io.in.ready := false.B 93 94 io.dcache.req.valid := false.B 95 io.dcache.req.bits := DontCare 96 97 io.dtlb.req.valid := false.B 98 io.dtlb.req.bits := DontCare 99 io.dtlb.req_kill := false.B 100 io.dtlb.resp.ready := true.B 101 102 io.flush_sbuffer.valid := false.B 103 104 XSDebug("state: %d\n", state) 105 106 when (state === s_invalid) { 107 io.in.ready := true.B 108 when (io.in.fire) { 109 in := io.in.bits 110 in.src(1) := in.src(1) // leave src2 unchanged 111 state := s_tlb_and_flush_sbuffer_req 112 have_sent_first_tlb_req := false.B 113 } 114 } 115 116 when (io.storeDataIn.fire) { 117 in.src(1) := io.storeDataIn.bits.data 118 data_valid := true.B 119 } 120 121 assert(!(io.storeDataIn.fire && data_valid), "atomic unit re-receive data") 122 123 // Send TLB feedback to store issue queue 124 // we send feedback right after we receives request 125 // also, we always treat amo as tlb hit 126 // since we will continue polling tlb all by ourself 127 io.feedbackSlow.valid := RegNext(RegNext(io.in.valid)) 128 io.feedbackSlow.bits.hit := true.B 129 io.feedbackSlow.bits.robIdx := RegEnable(io.in.bits.uop.robIdx, io.in.valid) 130 io.feedbackSlow.bits.flushState := DontCare 131 io.feedbackSlow.bits.sourceType := DontCare 132 io.feedbackSlow.bits.dataInvalidSqIdx := DontCare 133 134 // tlb translation, manipulating signals && deal with exception 135 // at the same time, flush sbuffer 136 when (state === s_tlb_and_flush_sbuffer_req) { 137 // send req to dtlb 138 // keep firing until tlb hit 139 io.dtlb.req.valid := true.B 140 io.dtlb.req.bits.vaddr := in.src(0) 141 io.dtlb.resp.ready := true.B 142 io.dtlb.req.bits.cmd := Mux(isLr, TlbCmd.atom_read, TlbCmd.atom_write) 143 io.dtlb.req.bits.debug.pc := in.uop.pc 144 io.dtlb.req.bits.debug.robIdx := in.uop.robIdx 145 io.dtlb.req.bits.debug.isFirstIssue := false.B 146 io.out.bits.uop.debugInfo.tlbFirstReqTime := GTimer() // FIXME lyq: it will be always assigned 147 148 // send req to sbuffer to flush it if it is not empty 149 io.flush_sbuffer.valid := Mux(sbuffer_empty, false.B, true.B) 150 151 // do not accept tlb resp in the first cycle 152 // this limition is for hw prefetcher 153 // when !have_sent_first_tlb_req, tlb resp may come from hw prefetch 154 have_sent_first_tlb_req := true.B 155 156 when(io.dtlb.resp.fire && have_sent_first_tlb_req){ 157 paddr := io.dtlb.resp.bits.paddr(0) 158 gpaddr := io.dtlb.resp.bits.gpaddr(0) 159 // exception handling 160 val addrAligned = LookupTree(in.uop.fuOpType(1,0), List( 161 "b00".U -> true.B, //b 162 "b01".U -> (in.src(0)(0) === 0.U), //h 163 "b10".U -> (in.src(0)(1,0) === 0.U), //w 164 "b11".U -> (in.src(0)(2,0) === 0.U) //d 165 )) 166 exceptionVec(loadAddrMisaligned) := !addrAligned && isLr 167 exceptionVec(storeAddrMisaligned) := !addrAligned && !isLr 168 exceptionVec(storePageFault) := io.dtlb.resp.bits.excp(0).pf.st 169 exceptionVec(loadPageFault) := io.dtlb.resp.bits.excp(0).pf.ld 170 exceptionVec(storeAccessFault) := io.dtlb.resp.bits.excp(0).af.st 171 exceptionVec(loadAccessFault) := io.dtlb.resp.bits.excp(0).af.ld 172 exceptionVec(storeGuestPageFault) := io.dtlb.resp.bits.excp(0).gpf.st 173 exceptionVec(loadGuestPageFault) := io.dtlb.resp.bits.excp(0).gpf.ld 174 175 when (!io.dtlb.resp.bits.miss) { 176 io.out.bits.uop.debugInfo.tlbRespTime := GTimer() 177 when (!addrAligned) { 178 // NOTE: when addrAligned, do not need to wait tlb actually 179 // check for miss aligned exceptions, tlb exception are checked next cycle for timing 180 // if there are exceptions, no need to execute it 181 state := s_finish 182 out_valid := true.B 183 atom_override_xtval := true.B 184 } .otherwise { 185 state := s_pm 186 } 187 } 188 } 189 } 190 191 when (state === s_pm) { 192 val pmp = WireInit(io.pmpResp) 193 is_mmio := pmp.mmio 194 195 // NOTE: only handle load/store exception here, if other exception happens, don't send here 196 val exception_va = exceptionVec(storePageFault) || exceptionVec(loadPageFault) || 197 exceptionVec(storeGuestPageFault) || exceptionVec(loadGuestPageFault) || 198 exceptionVec(storeAccessFault) || exceptionVec(loadAccessFault) 199 val exception_pa = pmp.st || pmp.ld 200 when (exception_va || exception_pa) { 201 state := s_finish 202 out_valid := true.B 203 atom_override_xtval := true.B 204 }.otherwise { 205 // if sbuffer has been flushed, go to query dcache, otherwise wait for sbuffer. 206 state := Mux(sbuffer_empty, s_cache_req, s_wait_flush_sbuffer_resp); 207 } 208 // update storeAccessFault bit 209 exceptionVec(loadAccessFault) := exceptionVec(loadAccessFault) || pmp.ld && isLr 210 exceptionVec(storeAccessFault) := exceptionVec(storeAccessFault) || pmp.st || pmp.ld && !isLr 211 } 212 213 when (state === s_wait_flush_sbuffer_resp) { 214 when (sbuffer_empty) { 215 state := s_cache_req 216 } 217 } 218 219 when (state === s_cache_req) { 220 val pipe_req = io.dcache.req.bits 221 pipe_req := DontCare 222 223 pipe_req.cmd := LookupTree(in.uop.fuOpType, List( 224 LSUOpType.lr_w -> M_XLR, 225 LSUOpType.sc_w -> M_XSC, 226 LSUOpType.amoswap_w -> M_XA_SWAP, 227 LSUOpType.amoadd_w -> M_XA_ADD, 228 LSUOpType.amoxor_w -> M_XA_XOR, 229 LSUOpType.amoand_w -> M_XA_AND, 230 LSUOpType.amoor_w -> M_XA_OR, 231 LSUOpType.amomin_w -> M_XA_MIN, 232 LSUOpType.amomax_w -> M_XA_MAX, 233 LSUOpType.amominu_w -> M_XA_MINU, 234 LSUOpType.amomaxu_w -> M_XA_MAXU, 235 236 LSUOpType.lr_d -> M_XLR, 237 LSUOpType.sc_d -> M_XSC, 238 LSUOpType.amoswap_d -> M_XA_SWAP, 239 LSUOpType.amoadd_d -> M_XA_ADD, 240 LSUOpType.amoxor_d -> M_XA_XOR, 241 LSUOpType.amoand_d -> M_XA_AND, 242 LSUOpType.amoor_d -> M_XA_OR, 243 LSUOpType.amomin_d -> M_XA_MIN, 244 LSUOpType.amomax_d -> M_XA_MAX, 245 LSUOpType.amominu_d -> M_XA_MINU, 246 LSUOpType.amomaxu_d -> M_XA_MAXU 247 )) 248 pipe_req.miss := false.B 249 pipe_req.probe := false.B 250 pipe_req.probe_need_data := false.B 251 pipe_req.source := AMO_SOURCE.U 252 pipe_req.addr := get_block_addr(paddr) 253 pipe_req.vaddr := get_block_addr(in.src(0)) // vaddr 254 pipe_req.word_idx := get_word(paddr) 255 pipe_req.amo_data := genWdata(in.src(1), in.uop.fuOpType(1,0)) 256 pipe_req.amo_mask := genWmask(paddr, in.uop.fuOpType(1,0)) 257 258 io.dcache.req.valid := Mux( 259 io.dcache.req.bits.cmd === M_XLR, 260 !io.dcache.block_lr, // block lr to survive in lr storm 261 data_valid // wait until src(1) is ready 262 ) 263 264 when(io.dcache.req.fire){ 265 state := s_cache_resp 266 paddr_reg := paddr 267 data_reg := io.dcache.req.bits.amo_data 268 mask_reg := io.dcache.req.bits.amo_mask 269 fuop_reg := in.uop.fuOpType 270 } 271 } 272 273 val dcache_resp_data = Reg(UInt()) 274 val dcache_resp_id = Reg(UInt()) 275 val dcache_resp_error = Reg(Bool()) 276 277 when (state === s_cache_resp) { 278 // when not miss 279 // everything is OK, simply send response back to sbuffer 280 // when miss and not replay 281 // wait for missQueue to handling miss and replaying our request 282 // when miss and replay 283 // req missed and fail to enter missQueue, manually replay it later 284 // TODO: add assertions: 285 // 1. add a replay delay counter? 286 // 2. when req gets into MissQueue, it should not miss any more 287 when(io.dcache.resp.fire) { 288 when(io.dcache.resp.bits.miss) { 289 when(io.dcache.resp.bits.replay) { 290 state := s_cache_req 291 } 292 } .otherwise { 293 dcache_resp_data := io.dcache.resp.bits.data 294 dcache_resp_id := io.dcache.resp.bits.id 295 dcache_resp_error := io.dcache.resp.bits.error 296 state := s_cache_resp_latch 297 } 298 } 299 } 300 301 when (state === s_cache_resp_latch) { 302 is_lrsc_valid := dcache_resp_id 303 val rdataSel = LookupTree(paddr(2, 0), List( 304 "b000".U -> dcache_resp_data(63, 0), 305 "b001".U -> dcache_resp_data(63, 8), 306 "b010".U -> dcache_resp_data(63, 16), 307 "b011".U -> dcache_resp_data(63, 24), 308 "b100".U -> dcache_resp_data(63, 32), 309 "b101".U -> dcache_resp_data(63, 40), 310 "b110".U -> dcache_resp_data(63, 48), 311 "b111".U -> dcache_resp_data(63, 56) 312 )) 313 314 resp_data_wire := LookupTree(in.uop.fuOpType, List( 315 LSUOpType.lr_w -> SignExt(rdataSel(31, 0), XLEN), 316 LSUOpType.sc_w -> dcache_resp_data, 317 LSUOpType.amoswap_w -> SignExt(rdataSel(31, 0), XLEN), 318 LSUOpType.amoadd_w -> SignExt(rdataSel(31, 0), XLEN), 319 LSUOpType.amoxor_w -> SignExt(rdataSel(31, 0), XLEN), 320 LSUOpType.amoand_w -> SignExt(rdataSel(31, 0), XLEN), 321 LSUOpType.amoor_w -> SignExt(rdataSel(31, 0), XLEN), 322 LSUOpType.amomin_w -> SignExt(rdataSel(31, 0), XLEN), 323 LSUOpType.amomax_w -> SignExt(rdataSel(31, 0), XLEN), 324 LSUOpType.amominu_w -> SignExt(rdataSel(31, 0), XLEN), 325 LSUOpType.amomaxu_w -> SignExt(rdataSel(31, 0), XLEN), 326 327 LSUOpType.lr_d -> SignExt(rdataSel(63, 0), XLEN), 328 LSUOpType.sc_d -> dcache_resp_data, 329 LSUOpType.amoswap_d -> SignExt(rdataSel(63, 0), XLEN), 330 LSUOpType.amoadd_d -> SignExt(rdataSel(63, 0), XLEN), 331 LSUOpType.amoxor_d -> SignExt(rdataSel(63, 0), XLEN), 332 LSUOpType.amoand_d -> SignExt(rdataSel(63, 0), XLEN), 333 LSUOpType.amoor_d -> SignExt(rdataSel(63, 0), XLEN), 334 LSUOpType.amomin_d -> SignExt(rdataSel(63, 0), XLEN), 335 LSUOpType.amomax_d -> SignExt(rdataSel(63, 0), XLEN), 336 LSUOpType.amominu_d -> SignExt(rdataSel(63, 0), XLEN), 337 LSUOpType.amomaxu_d -> SignExt(rdataSel(63, 0), XLEN) 338 )) 339 340 when (dcache_resp_error && io.csrCtrl.cache_error_enable) { 341 exceptionVec(loadAccessFault) := isLr 342 exceptionVec(storeAccessFault) := !isLr 343 assert(!exceptionVec(loadAccessFault)) 344 assert(!exceptionVec(storeAccessFault)) 345 } 346 347 resp_data := resp_data_wire 348 state := s_finish 349 out_valid := true.B 350 } 351 352 io.out.valid := out_valid 353 XSError((state === s_finish) =/= out_valid, "out_valid reg error\n") 354 io.out.bits := DontCare 355 io.out.bits.uop := in.uop 356 io.out.bits.uop.exceptionVec := exceptionVec 357 io.out.bits.data := resp_data 358 io.out.bits.debug.isMMIO := is_mmio 359 io.out.bits.debug.paddr := paddr 360 when (io.out.fire) { 361 XSDebug("atomics writeback: pc %x data %x\n", io.out.bits.uop.pc, io.dcache.resp.bits.data) 362 state := s_invalid 363 out_valid := false.B 364 } 365 366 when (state === s_finish) { 367 data_valid := false.B 368 } 369 370 when (io.redirect.valid) { 371 atom_override_xtval := false.B 372 } 373 374 /* 375 // atomic trigger 376 val csrCtrl = io.csrCtrl 377 val tdata = Reg(Vec(TriggerNum, new MatchTriggerIO)) 378 val tEnableVec = RegInit(VecInit(Seq.fill(TriggerNum)(false.B))) 379 tEnableVec := csrCtrl.mem_trigger.tEnableVec 380 when(csrCtrl.mem_trigger.tUpdate.valid) { 381 tdata(csrCtrl.mem_trigger.tUpdate.bits.addr) := csrCtrl.mem_trigger.tUpdate.bits.tdata 382 } 383 384 val backendTriggerTimingVec = VecInit(tdata.map(_.timing)) 385 val backendTriggerChainVec = VecInit(tdata.map(_.chain)) 386 val backendTriggerHitVec = WireInit(VecInit(Seq.fill(TriggerNum)(false.B))) 387 val backendTriggerCanFireVec = RegInit(VecInit(Seq.fill(TriggerNum)(false.B))) 388 389 when(state === s_cache_req) { 390 // store trigger 391 val store_hit = Wire(Vec(TriggerNum, Bool())) 392 for (j <- 0 until TriggerNum) { 393 store_hit(j) := !tdata(j).select && TriggerCmp( 394 vaddr, 395 tdata(j).tdata2, 396 tdata(j).matchType, 397 tEnableVec(j) && tdata(j).store 398 ) 399 } 400 // load trigger 401 val load_hit = Wire(Vec(TriggerNum, Bool())) 402 for (j <- 0 until TriggerNum) { 403 load_hit(j) := !tdata(j).select && TriggerCmp( 404 vaddr, 405 tdata(j).tdata2, 406 tdata(j).matchType, 407 tEnableVec(j) && tdata(j).load 408 ) 409 } 410 backendTriggerHitVec := store_hit.zip(load_hit).map { case (sh, lh) => sh || lh } 411 // triggerCanFireVec will update at T+1 412 TriggerCheckCanFire(TriggerNum, backendTriggerCanFireVec, backendTriggerHitVec, backendTriggerTimingVec, backendTriggerChainVec) 413 } 414 415 // addr trigger do cmp at s_cache_req 416 // trigger result is used at s_finish 417 // thus we can delay it safely 418 io.out.bits.uop.trigger.backendHit := backendTriggerHitVec 419 io.out.bits.uop.trigger.backendCanFire := backendTriggerCanFireVec 420 421 */ 422 423 if (env.EnableDifftest) { 424 val difftest = DifftestModule(new DiffAtomicEvent) 425 difftest.coreid := io.hartId 426 difftest.valid := state === s_cache_resp_latch 427 difftest.addr := paddr_reg 428 difftest.data := data_reg 429 difftest.mask := mask_reg 430 difftest.fuop := fuop_reg 431 difftest.out := resp_data_wire 432 } 433 434 if (env.EnableDifftest || env.AlwaysBasicDiff) { 435 val uop = io.out.bits.uop 436 val difftest = DifftestModule(new DiffLrScEvent) 437 difftest.coreid := io.hartId 438 difftest.valid := io.out.fire && 439 (uop.fuOpType === LSUOpType.sc_d || uop.fuOpType === LSUOpType.sc_w) 440 difftest.success := is_lrsc_valid 441 } 442} 443