1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15* 16* 17* Acknowledgement 18* 19* This implementation is inspired by several key papers: 20* [1] Alex Ramirez, Oliverio J. Santana, Josep L. Larriba-Pey, and Mateo Valero. "[Fetching instruction streams.] 21* (https://doi.org/10.1109/MICRO.2002.1176264)" 35th Annual IEEE/ACM International Symposium on Microarchitecture 22* (MICRO). 2002. 23* [2] Yasuo Ishii, Jaekyu Lee, Krishnendra Nathella, and Dam Sunwoo. "[Rebasing instruction prefetching: An industry 24* perspective.](https://doi.org/10.1109/LCA.2020.3035068)" IEEE Computer Architecture Letters 19.2: 147-150. 2020. 25* [3] Yasuo Ishii, Jaekyu Lee, Krishnendra Nathella, and Dam Sunwoo. "[Re-establishing fetch-directed instruction 26* prefetching: An industry perspective.](https://doi.org/10.1109/ISPASS51385.2021.00034)" 2021 IEEE International 27* Symposium on Performance Analysis of Systems and Software (ISPASS). 2021. 28***************************************************************************************/ 29 30package xiangshan.frontend 31import chisel3._ 32import chisel3.util._ 33import freechips.rocketchip.diplomacy.LazyModule 34import freechips.rocketchip.diplomacy.LazyModuleImp 35import org.chipsalliance.cde.config.Parameters 36import utility._ 37import utility.mbist.MbistInterface 38import utility.mbist.MbistPipeline 39import utility.sram.SramBroadcastBundle 40import utility.sram.SramHelper 41import xiangshan._ 42import xiangshan.backend.fu.NewCSR.PFEvent 43import xiangshan.backend.fu.PMP 44import xiangshan.backend.fu.PMPChecker 45import xiangshan.backend.fu.PMPReqBundle 46import xiangshan.cache.mmu._ 47import xiangshan.frontend.icache._ 48 49class Frontend()(implicit p: Parameters) extends LazyModule with HasXSParameter { 50 override def shouldBeInlined: Boolean = false 51 val inner = LazyModule(new FrontendInlined) 52 lazy val module = new FrontendImp(this) 53} 54 55class FrontendImp(wrapper: Frontend)(implicit p: Parameters) extends LazyModuleImp(wrapper) { 56 val io = IO(wrapper.inner.module.io.cloneType) 57 val io_perf = IO(wrapper.inner.module.io_perf.cloneType) 58 io <> wrapper.inner.module.io 59 io_perf <> wrapper.inner.module.io_perf 60 if (p(DebugOptionsKey).ResetGen) { 61 ResetGen(ResetGenNode(Seq(ModuleNode(wrapper.inner.module))), reset, sim = false, io.dft_reset) 62 } 63} 64 65class FrontendInlined()(implicit p: Parameters) extends LazyModule with HasXSParameter { 66 override def shouldBeInlined: Boolean = true 67 68 val instrUncache = LazyModule(new InstrUncache()) 69 val icache = LazyModule(new ICache()) 70 71 lazy val module = new FrontendInlinedImp(this) 72} 73 74class FrontendInlinedImp(outer: FrontendInlined) extends LazyModuleImp(outer) 75 with HasXSParameter 76 with HasPerfEvents { 77 val io = IO(new Bundle() { 78 val hartId = Input(UInt(hartIdLen.W)) 79 val reset_vector = Input(UInt(PAddrBits.W)) 80 val fencei = Input(Bool()) 81 val ptw = new TlbPtwIO() 82 val backend = new FrontendToCtrlIO 83 val softPrefetch = Vec(backendParams.LduCnt, Flipped(Valid(new SoftIfetchPrefetchBundle))) 84 val sfence = Input(new SfenceBundle) 85 val tlbCsr = Input(new TlbCsrBundle) 86 val csrCtrl = Input(new CustomCSRCtrlIO) 87 val error = ValidIO(new L1CacheErrorInfo) 88 val frontendInfo = new Bundle { 89 val ibufFull = Output(Bool()) 90 val bpuInfo = new Bundle { 91 val bpRight = Output(UInt(XLEN.W)) 92 val bpWrong = Output(UInt(XLEN.W)) 93 } 94 } 95 val resetInFrontend = Output(Bool()) 96 val debugTopDown = new Bundle { 97 val robHeadVaddr = Flipped(Valid(UInt(VAddrBits.W))) 98 } 99 val dft = Option.when(hasDFT)(Input(new SramBroadcastBundle)) 100 val dft_reset = Option.when(hasMbist)(Input(new DFTResetSignals())) 101 }) 102 103 // decouped-frontend modules 104 val instrUncache = outer.instrUncache.module 105 val icache = outer.icache.module 106 val bpu = Module(new Predictor) 107 val ifu = Module(new NewIFU) 108 val ibuffer = Module(new IBuffer) 109 val ftq = Module(new Ftq) 110 111 val needFlush = RegNext(io.backend.toFtq.redirect.valid) 112 val FlushControlRedirect = RegNext(io.backend.toFtq.redirect.bits.debugIsCtrl) 113 val FlushMemVioRedirect = RegNext(io.backend.toFtq.redirect.bits.debugIsMemVio) 114 val FlushControlBTBMiss = Wire(Bool()) 115 val FlushTAGEMiss = Wire(Bool()) 116 val FlushSCMiss = Wire(Bool()) 117 val FlushITTAGEMiss = Wire(Bool()) 118 val FlushRASMiss = Wire(Bool()) 119 120 val tlbCsr = DelayN(io.tlbCsr, 2) 121 val csrCtrl = DelayN(io.csrCtrl, 2) 122 val sfence = RegNext(RegNext(io.sfence)) 123 124 // trigger 125 ifu.io.frontendTrigger := csrCtrl.frontend_trigger 126 127 // RVCDecoder fsIsOff 128 ifu.io.csr_fsIsOff := csrCtrl.fsIsOff 129 130 // bpu ctrl 131 bpu.io.ctrl := csrCtrl.bp_ctrl 132 bpu.io.reset_vector := io.reset_vector 133 134 // pmp 135 val PortNumber = ICacheParameters().PortNumber 136 val pmp = Module(new PMP()) 137 val pmp_check = VecInit(Seq.fill(coreParams.ipmpPortNum)(Module(new PMPChecker(3, sameCycle = true)).io)) 138 pmp.io.distribute_csr := csrCtrl.distribute_csr 139 val pmp_req_vec = Wire(Vec(coreParams.ipmpPortNum, Valid(new PMPReqBundle()))) 140 (0 until 2 * PortNumber).foreach(i => pmp_req_vec(i) <> icache.io.pmp(i).req) 141 pmp_req_vec.last <> ifu.io.pmp.req 142 143 for (i <- pmp_check.indices) { 144 if (HasBitmapCheck) { 145 pmp_check(i).apply(tlbCsr.mbmc.CMODE.asBool, tlbCsr.priv.imode, pmp.io.pmp, pmp.io.pma, pmp_req_vec(i)) 146 } else { 147 pmp_check(i).apply(tlbCsr.priv.imode, pmp.io.pmp, pmp.io.pma, pmp_req_vec(i)) 148 } 149 } 150 (0 until 2 * PortNumber).foreach(i => icache.io.pmp(i).resp <> pmp_check(i).resp) 151 ifu.io.pmp.resp <> pmp_check.last.resp 152 153 val itlb = 154 Module(new TLB(coreParams.itlbPortNum, nRespDups = 1, Seq.fill(PortNumber)(false) ++ Seq(true), itlbParams)) 155 itlb.io.requestor.take(PortNumber) zip icache.io.itlb foreach { case (a, b) => a <> b } 156 itlb.io.requestor.last <> ifu.io.iTLBInter // mmio may need re-tlb, blocked 157 itlb.io.hartId := io.hartId 158 itlb.io.base_connect(sfence, tlbCsr) 159 itlb.io.flushPipe.foreach(_ := icache.io.itlbFlushPipe) 160 itlb.io.redirect := DontCare // itlb has flushpipe, don't need redirect signal 161 162 val itlb_ptw = Wire(new VectorTlbPtwIO(coreParams.itlbPortNum)) 163 itlb_ptw.connect(itlb.io.ptw) 164 val itlbRepeater1 = PTWFilter(itlbParams.fenceDelay, itlb_ptw, sfence, tlbCsr, l2tlbParams.ifilterSize) 165 val itlbRepeater2 = 166 PTWRepeaterNB(passReady = false, itlbParams.fenceDelay, itlbRepeater1.io.ptw, io.ptw, sfence, tlbCsr) 167 168 icache.io.ftqPrefetch <> ftq.io.toPrefetch 169 icache.io.softPrefetch <> io.softPrefetch 170 171 // IFU-Ftq 172 ifu.io.ftqInter.fromFtq <> ftq.io.toIfu 173 ftq.io.toIfu.req.ready := ifu.io.ftqInter.fromFtq.req.ready && icache.io.fetch.req.ready 174 175 ftq.io.fromIfu <> ifu.io.ftqInter.toFtq 176 bpu.io.ftq_to_bpu <> ftq.io.toBpu 177 ftq.io.fromBpu <> bpu.io.bpu_to_ftq 178 179 ftq.io.mmioCommitRead <> ifu.io.mmioCommitRead 180 181 // IFU-ICache 182 icache.io.fetch.req <> ftq.io.toICache.req 183 ftq.io.toICache.req.ready := ifu.io.ftqInter.fromFtq.req.ready && icache.io.fetch.req.ready 184 185 ifu.io.icacheInter.resp <> icache.io.fetch.resp 186 ifu.io.icacheInter.icacheReady := icache.io.toIFU 187 ifu.io.icacheInter.topdownIcacheMiss := icache.io.fetch.topdownIcacheMiss 188 ifu.io.icacheInter.topdownItlbMiss := icache.io.fetch.topdownItlbMiss 189 icache.io.stop := ifu.io.icacheStop 190 icache.io.flush := ftq.io.icacheFlush 191 192 ifu.io.icachePerfInfo := icache.io.perfInfo 193 194 icache.io.csr_pf_enable := RegNext(csrCtrl.pf_ctrl.l1I_pf_enable) 195 196 icache.io.fencei := RegNext(io.fencei) 197 198 // IFU-Ibuffer 199 ifu.io.toIbuffer <> ibuffer.io.in 200 201 ftq.io.fromBackend <> io.backend.toFtq 202 io.backend.fromFtq := ftq.io.toBackend 203 io.backend.fromIfu := ifu.io.toBackend 204 io.frontendInfo.bpuInfo <> ftq.io.bpuInfo 205 206 val checkPcMem = Reg(Vec(FtqSize, new Ftq_RF_Components)) 207 when(ftq.io.toBackend.pc_mem_wen) { 208 checkPcMem(ftq.io.toBackend.pc_mem_waddr) := ftq.io.toBackend.pc_mem_wdata 209 } 210 211 val checkTargetPtr = Wire(Vec(DecodeWidth, new FtqPtr)) 212 val checkTarget = Wire(Vec(DecodeWidth, UInt(VAddrBits.W))) 213 214 for (i <- 0 until DecodeWidth) { 215 checkTargetPtr(i) := ibuffer.io.out(i).bits.ftqPtr 216 checkTarget(i) := Mux( 217 ftq.io.toBackend.newest_entry_ptr.value === checkTargetPtr(i).value, 218 ftq.io.toBackend.newest_entry_target, 219 checkPcMem((checkTargetPtr(i) + 1.U).value).startAddr 220 ) 221 } 222 223 // commented out for this br could be the last instruction in the fetch block 224 def checkNotTakenConsecutive = { 225 val prevNotTakenValid = RegInit(0.B) 226 val prevNotTakenFtqPtr = Reg(new FtqPtr) 227 for (i <- 0 until DecodeWidth - 1) { 228 // for instrs that is not the last, if a not-taken br, the next instr should have the same ftqPtr 229 // for instrs that is the last, record and check next request 230 when(ibuffer.io.out(i).fire && ibuffer.io.out(i).bits.pd.isBr) { 231 when(ibuffer.io.out(i + 1).fire) { 232 // not last br, check now 233 }.otherwise { 234 // last br, record its info 235 prevNotTakenValid := true.B 236 prevNotTakenFtqPtr := checkTargetPtr(i) 237 } 238 } 239 XSError( 240 ibuffer.io.out(i).fire && ibuffer.io.out(i).bits.pd.isBr && 241 ibuffer.io.out(i + 1).fire && 242 checkTargetPtr(i).value =/= checkTargetPtr(i + 1).value, 243 "not-taken br should have same ftqPtr\n" 244 ) 245 } 246 when(ibuffer.io.out(DecodeWidth - 1).fire && ibuffer.io.out(DecodeWidth - 1).bits.pd.isBr) { 247 // last instr is a br, record its info 248 prevNotTakenValid := true.B 249 prevNotTakenFtqPtr := checkTargetPtr(DecodeWidth - 1) 250 } 251 when(prevNotTakenValid && ibuffer.io.out(0).fire) { 252 prevNotTakenValid := false.B 253 } 254 XSError( 255 prevNotTakenValid && ibuffer.io.out(0).fire && 256 prevNotTakenFtqPtr.value =/= checkTargetPtr(0).value, 257 "not-taken br should have same ftqPtr\n" 258 ) 259 260 when(needFlush) { 261 prevNotTakenValid := false.B 262 } 263 } 264 265 def checkTakenNotConsecutive = { 266 val prevTakenValid = RegInit(0.B) 267 val prevTakenFtqPtr = Reg(new FtqPtr) 268 for (i <- 0 until DecodeWidth - 1) { 269 // for instrs that is not the last, if a taken br, the next instr should not have the same ftqPtr 270 // for instrs that is the last, record and check next request 271 when(ibuffer.io.out(i).fire && ibuffer.io.out(i).bits.pd.isBr && ibuffer.io.out(i).bits.pred_taken) { 272 when(ibuffer.io.out(i + 1).fire) { 273 // not last br, check now 274 }.otherwise { 275 // last br, record its info 276 prevTakenValid := true.B 277 prevTakenFtqPtr := checkTargetPtr(i) 278 } 279 } 280 XSError( 281 ibuffer.io.out(i).fire && ibuffer.io.out(i).bits.pd.isBr && ibuffer.io.out(i).bits.pred_taken && 282 ibuffer.io.out(i + 1).fire && 283 (checkTargetPtr(i) + 1.U).value =/= checkTargetPtr(i + 1).value, 284 "taken br should have consecutive ftqPtr\n" 285 ) 286 } 287 when(ibuffer.io.out(DecodeWidth - 1).fire && ibuffer.io.out(DecodeWidth - 1).bits.pd.isBr && ibuffer.io.out( 288 DecodeWidth - 1 289 ).bits.pred_taken) { 290 // last instr is a br, record its info 291 prevTakenValid := true.B 292 prevTakenFtqPtr := checkTargetPtr(DecodeWidth - 1) 293 } 294 when(prevTakenValid && ibuffer.io.out(0).fire) { 295 prevTakenValid := false.B 296 } 297 XSError( 298 prevTakenValid && ibuffer.io.out(0).fire && 299 (prevTakenFtqPtr + 1.U).value =/= checkTargetPtr(0).value, 300 "taken br should have consecutive ftqPtr\n" 301 ) 302 when(needFlush) { 303 prevTakenValid := false.B 304 } 305 } 306 307 def checkNotTakenPC = { 308 val prevNotTakenPC = Reg(UInt(VAddrBits.W)) 309 val prevIsRVC = Reg(Bool()) 310 val prevNotTakenValid = RegInit(0.B) 311 312 for (i <- 0 until DecodeWidth - 1) { 313 when(ibuffer.io.out(i).fire && ibuffer.io.out(i).bits.pd.isBr && !ibuffer.io.out(i).bits.pred_taken) { 314 when(ibuffer.io.out(i + 1).fire) {}.otherwise { 315 prevNotTakenValid := true.B 316 prevIsRVC := ibuffer.io.out(i).bits.pd.isRVC 317 prevNotTakenPC := ibuffer.io.out(i).bits.pc 318 } 319 } 320 XSError( 321 ibuffer.io.out(i).fire && ibuffer.io.out(i).bits.pd.isBr && !ibuffer.io.out(i).bits.pred_taken && 322 ibuffer.io.out(i + 1).fire && 323 ibuffer.io.out(i).bits.pc + Mux(ibuffer.io.out(i).bits.pd.isRVC, 2.U, 4.U) =/= ibuffer.io.out( 324 i + 1 325 ).bits.pc, 326 "not-taken br should have consecutive pc\n" 327 ) 328 } 329 when(ibuffer.io.out(DecodeWidth - 1).fire && ibuffer.io.out(DecodeWidth - 1).bits.pd.isBr && !ibuffer.io.out( 330 DecodeWidth - 1 331 ).bits.pred_taken) { 332 prevNotTakenValid := true.B 333 prevIsRVC := ibuffer.io.out(DecodeWidth - 1).bits.pd.isRVC 334 prevNotTakenPC := ibuffer.io.out(DecodeWidth - 1).bits.pc 335 } 336 when(prevNotTakenValid && ibuffer.io.out(0).fire) { 337 prevNotTakenValid := false.B 338 } 339 XSError( 340 prevNotTakenValid && ibuffer.io.out(0).fire && 341 prevNotTakenPC + Mux(prevIsRVC, 2.U, 4.U) =/= ibuffer.io.out(0).bits.pc, 342 "not-taken br should have same pc\n" 343 ) 344 when(needFlush) { 345 prevNotTakenValid := false.B 346 } 347 } 348 349 def checkTakenPC = { 350 val prevTakenFtqPtr = Reg(new FtqPtr) 351 val prevTakenValid = RegInit(0.B) 352 val prevTakenTarget = Wire(UInt(VAddrBits.W)) 353 prevTakenTarget := checkPcMem((prevTakenFtqPtr + 1.U).value).startAddr 354 355 for (i <- 0 until DecodeWidth - 1) { 356 when(ibuffer.io.out(i).fire && !ibuffer.io.out(i).bits.pd.notCFI && ibuffer.io.out(i).bits.pred_taken) { 357 when(ibuffer.io.out(i + 1).fire) {}.otherwise { 358 prevTakenValid := true.B 359 prevTakenFtqPtr := checkTargetPtr(i) 360 } 361 } 362 XSError( 363 ibuffer.io.out(i).fire && !ibuffer.io.out(i).bits.pd.notCFI && ibuffer.io.out(i).bits.pred_taken && 364 ibuffer.io.out(i + 1).fire && 365 checkTarget(i) =/= ibuffer.io.out(i + 1).bits.pc, 366 "taken instr should follow target pc\n" 367 ) 368 } 369 when(ibuffer.io.out(DecodeWidth - 1).fire && !ibuffer.io.out(DecodeWidth - 1).bits.pd.notCFI && ibuffer.io.out( 370 DecodeWidth - 1 371 ).bits.pred_taken) { 372 prevTakenValid := true.B 373 prevTakenFtqPtr := checkTargetPtr(DecodeWidth - 1) 374 } 375 when(prevTakenValid && ibuffer.io.out(0).fire) { 376 prevTakenValid := false.B 377 } 378 XSError( 379 prevTakenValid && ibuffer.io.out(0).fire && 380 prevTakenTarget =/= ibuffer.io.out(0).bits.pc, 381 "taken instr should follow target pc\n" 382 ) 383 when(needFlush) { 384 prevTakenValid := false.B 385 } 386 } 387 388 // checkNotTakenConsecutive 389 checkTakenNotConsecutive 390 checkTakenPC 391 checkNotTakenPC 392 393 ifu.io.rob_commits <> io.backend.toFtq.rob_commits 394 395 ibuffer.io.flush := needFlush 396 ibuffer.io.ControlRedirect := FlushControlRedirect 397 ibuffer.io.MemVioRedirect := FlushMemVioRedirect 398 ibuffer.io.ControlBTBMissBubble := FlushControlBTBMiss 399 ibuffer.io.TAGEMissBubble := FlushTAGEMiss 400 ibuffer.io.SCMissBubble := FlushSCMiss 401 ibuffer.io.ITTAGEMissBubble := FlushITTAGEMiss 402 ibuffer.io.RASMissBubble := FlushRASMiss 403 ibuffer.io.decodeCanAccept := io.backend.canAccept 404 405 FlushControlBTBMiss := ftq.io.ControlBTBMissBubble 406 FlushTAGEMiss := ftq.io.TAGEMissBubble 407 FlushSCMiss := ftq.io.SCMissBubble 408 FlushITTAGEMiss := ftq.io.ITTAGEMissBubble 409 FlushRASMiss := ftq.io.RASMissBubble 410 411 io.backend.cfVec <> ibuffer.io.out 412 io.backend.stallReason <> ibuffer.io.stallReason 413 414 instrUncache.io.req <> ifu.io.uncacheInter.toUncache 415 ifu.io.uncacheInter.fromUncache <> instrUncache.io.resp 416 instrUncache.io.flush := false.B 417 io.error <> RegNext(RegNext(icache.io.error)) 418 419 icache.io.hartId := io.hartId 420 421 itlbRepeater1.io.debugTopDown.robHeadVaddr := io.debugTopDown.robHeadVaddr 422 423 io.frontendInfo.ibufFull := RegNext(ibuffer.io.full) 424 io.resetInFrontend := reset.asBool 425 426 // PFEvent 427 val pfevent = Module(new PFEvent) 428 pfevent.io.distribute_csr := io.csrCtrl.distribute_csr 429 val csrevents = pfevent.io.hpmevent.take(8) 430 431 val perfFromUnits = Seq(ifu, ibuffer, icache, ftq, bpu).flatMap(_.getPerfEvents) 432 val perfFromIO = Seq() 433 val perfBlock = Seq() 434 // let index = 0 be no event 435 val allPerfEvents = Seq(("noEvent", 0.U)) ++ perfFromUnits ++ perfFromIO ++ perfBlock 436 437 if (printEventCoding) { 438 for (((name, inc), i) <- allPerfEvents.zipWithIndex) { 439 println("Frontend perfEvents Set", name, inc, i) 440 } 441 } 442 443 val allPerfInc = allPerfEvents.map(_._2.asTypeOf(new PerfEvent)) 444 override val perfEvents = HPerfMonitor(csrevents, allPerfInc).getPerfEvents 445 generatePerfEvent() 446 447 private val mbistPl = MbistPipeline.PlaceMbistPipeline(Int.MaxValue, "MbistPipeFrontend", hasMbist) 448 private val mbistIntf = if (hasMbist) { 449 val params = mbistPl.get.nodeParams 450 val intf = Some(Module(new MbistInterface( 451 params = Seq(params), 452 ids = Seq(mbistPl.get.childrenIds), 453 name = s"MbistIntfFrontend", 454 pipelineNum = 1 455 ))) 456 intf.get.toPipeline.head <> mbistPl.get.mbist 457 mbistPl.get.registerCSV(intf.get.info, "MbistFrontend") 458 intf.get.mbist := DontCare 459 dontTouch(intf.get.mbist) 460 // TODO: add mbist controller connections here 461 intf 462 } else { 463 None 464 } 465 private val sigFromSrams = if (hasDFT) Some(SramHelper.genBroadCastBundleTop()) else None 466 private val cg = ClockGate.genTeSrc 467 dontTouch(cg) 468 469 if (hasMbist) { 470 cg.cgen := io.dft.get.cgen 471 } else { 472 cg.cgen := false.B 473 } 474 475 sigFromSrams.foreach { case sig => sig := DontCare } 476 sigFromSrams.zip(io.dft).foreach { 477 case (sig, dft) => 478 if (hasMbist) { 479 sig.ram_hold := dft.ram_hold 480 sig.ram_bypass := dft.ram_bypass 481 sig.ram_bp_clken := dft.ram_bp_clken 482 sig.ram_aux_clk := dft.ram_aux_clk 483 sig.ram_aux_ckbp := dft.ram_aux_ckbp 484 sig.ram_mcp_hold := dft.ram_mcp_hold 485 sig.cgen := dft.cgen 486 } 487 if (hasSramCtl) { 488 sig.ram_ctl := RegNext(dft.ram_ctl) 489 } 490 } 491} 492