1/**************************************************************************************** 2 * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3 * Copyright (c) 2020-2021 Peng Cheng Laboratory 4 * 5 * XiangShan is licensed under Mulan PSL v2. 6 * You can use this software according to the terms and conditions of the Mulan PSL v2. 7 * You may obtain a copy of Mulan PSL v2 at: 8 * http://license.coscl.org.cn/MulanPSL2 9 * 10 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13 * 14 * See the Mulan PSL v2 for more details. 15 **************************************************************************************** 16 */ 17 18 19package xiangshan.backend.fu.vector 20 21import chipsalliance.rocketchip.config.Parameters 22import chisel3._ 23import chisel3.util._ 24import utils._ 25import utility._ 26import yunsuan.vector.VectorIntAdder 27import yunsuan.{VipuType, VectorElementFormat} 28import xiangshan.{SrcType, SelImm} 29import xiangshan.backend.fu.FunctionUnit 30import xiangshan.XSCoreParamsKey 31 32class VIPU(implicit p: Parameters) extends FunctionUnit(p(XSCoreParamsKey).VLEN) { 33 XSError(io.in.valid && io.in.bits.uop.ctrl.fuOpType === VipuType.dummy, "VIPU OpType not supported") 34 35 val uop = io.in.bits.uop 36 val ctrl = uop.ctrl 37 val vtype = ctrl.vconfig.vtype 38 39 // TODO: mv VecImmExtractor from exe stage to read rf stage(or forward stage). 40 val imm = VecInit(Seq.fill(VLEN/XLEN)(VecImmExtractor(ctrl.selImm, vtype.vsew, ctrl.imm))).asUInt 41 42 val _src1 = Mux(SrcType.isImm(ctrl.srcType(0)), imm, io.in.bits.src(0)) 43 val _src2 = io.in.bits.src(1) 44 val src1 = Mux(VipuType.needReverse(ctrl.fuOpType), _src2, _src1) 45 val src2 = Mux(VipuType.needReverse(ctrl.fuOpType), _src1, _src2) 46 val src4 = io.in.bits.src(3) 47 val mask = src4(7,0) // TODO 48 val carryIn = Mux(ctrl.fuOpType === VipuType.madc0, 0.U(8.W), mask) 49 50 val AdderWidth = XLEN 51 val NumAdder = VLEN / XLEN 52 val adder = Seq.fill(NumAdder)(Module(new VectorIntAdder())) 53 for(i <- 0 until NumAdder) { 54 adder(i).io.in_0 := src1(AdderWidth*(i+1)-1, AdderWidth*i) 55 adder(i).io.in_1 := src2(AdderWidth*(i+1)-1, AdderWidth*i) 56 adder(i).io.int_format := vtype.vsew // TODO 57 adder(i).io.op_code := ctrl.fuOpType 58 adder(i).io.carry_or_borrow_in := carryIn // TODO 59 adder(i).io.uop_index := DontCare // TODO 60 } 61 val adder_result = VecInit(adder.map(_.io.out)).asUInt 62 val adder_carry = LookupTree(vtype.vsew(1,0), List( 63 "b00".U -> Cat(~0.U((VLEN-16).W), VecInit(adder.map(_.io.carry_or_borrow_or_compare_out(7,0))).asUInt), 64 "b01".U -> Cat(~0.U((VLEN-8).W), VecInit(adder.map(_.io.carry_or_borrow_or_compare_out(3,0))).asUInt), 65 "b10".U -> Cat(~0.U((VLEN-4).W), VecInit(adder.map(_.io.carry_or_borrow_or_compare_out(1,0))).asUInt), 66 "b11".U -> Cat(~0.U((VLEN-2).W), VecInit(adder.map(_.io.carry_or_borrow_or_compare_out(0))).asUInt), 67 )) 68 69 io.out.bits.data := Mux(VipuType.outIsCarry(ctrl.fuOpType), adder_carry, adder_result) 70 io.out.bits.uop := io.in.bits.uop 71 io.out.valid := io.in.valid 72 io.in.ready := io.out.ready 73} 74 75object VecImmExtractor { 76 def Imm_OPIVIS(imm: UInt): UInt = { 77 SignExt(imm(4,0), 8) 78 } 79 def Imm_OPIVIU(imm: UInt): UInt = { 80 ZeroExt(imm(4,0), 8) 81 } 82 83 def imm_sew(sew: UInt, imm: UInt): UInt = { 84 val _imm = SignExt(imm(7,0), 64) 85 LookupTree(sew(1,0), List( 86 "b00".U -> VecInit(Seq.fill(8)(_imm(7,0))).asUInt, 87 "b01".U -> VecInit(Seq.fill(4)(_imm(15,0))).asUInt, 88 "b10".U -> VecInit(Seq.fill(2)(_imm(31,0))).asUInt, 89 "b11".U -> _imm(63,0), 90 )) 91 } 92 93 def apply(immType: UInt, sew: UInt, imm: UInt): UInt = { 94 val _imm = Mux(immType === SelImm.IMM_OPIVIS, Imm_OPIVIS(imm), Imm_OPIVIU(imm)) 95 imm_sew(sew, _imm(7,0)) 96 } 97} 98