Lines Matching full:gsi
16 #include "gsi.h"
28 * The generic software interface (GSI) is an integral component of the IPA,
30 * and the IPA core. The modem uses the GSI layer as well.
40 * | GSI |
51 * Each EE uses a set of unidirectional GSI "channels," which allow transfer
63 * Each channel has a GSI "event ring" associated with it. An event ring
67 * The GSI then writes its doorbell for the event ring, causing the target
83 * Note that all GSI registers are little-endian, which is the assumed
169 return channel - &channel->gsi->channel[0]; in gsi_channel_id()
172 /* An initialized channel has a non-null GSI pointer */
175 return !!channel->gsi; in gsi_channel_initialized()
194 /* Update the GSI IRQ type register with the cached value */
195 static void gsi_irq_type_update(struct gsi *gsi, u32 val) in gsi_irq_type_update() argument
197 const struct reg *reg = gsi_reg(gsi, CNTXT_TYPE_IRQ_MSK); in gsi_irq_type_update()
199 gsi->type_enabled_bitmap = val; in gsi_irq_type_update()
200 iowrite32(val, gsi->virt + reg_offset(reg)); in gsi_irq_type_update()
203 static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id) in gsi_irq_type_enable() argument
205 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | type_id); in gsi_irq_type_enable()
208 static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id) in gsi_irq_type_disable() argument
210 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~type_id); in gsi_irq_type_disable()
214 * is signaled by the event ring control GSI interrupt type, which is
218 static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id) in gsi_irq_ev_ctrl_enable() argument
227 reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_CLR); in gsi_irq_ev_ctrl_enable()
228 iowrite32(~0, gsi->virt + reg_offset(reg)); in gsi_irq_ev_ctrl_enable()
230 reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_MSK); in gsi_irq_ev_ctrl_enable()
231 iowrite32(val, gsi->virt + reg_offset(reg)); in gsi_irq_ev_ctrl_enable()
232 gsi_irq_type_enable(gsi, GSI_EV_CTRL); in gsi_irq_ev_ctrl_enable()
236 static void gsi_irq_ev_ctrl_disable(struct gsi *gsi) in gsi_irq_ev_ctrl_disable() argument
240 gsi_irq_type_disable(gsi, GSI_EV_CTRL); in gsi_irq_ev_ctrl_disable()
242 reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_MSK); in gsi_irq_ev_ctrl_disable()
243 iowrite32(0, gsi->virt + reg_offset(reg)); in gsi_irq_ev_ctrl_disable()
247 * signaled by the channel control GSI interrupt type, which is only
251 static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id) in gsi_irq_ch_ctrl_enable() argument
260 reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_CLR); in gsi_irq_ch_ctrl_enable()
261 iowrite32(~0, gsi->virt + reg_offset(reg)); in gsi_irq_ch_ctrl_enable()
263 reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_MSK); in gsi_irq_ch_ctrl_enable()
264 iowrite32(val, gsi->virt + reg_offset(reg)); in gsi_irq_ch_ctrl_enable()
266 gsi_irq_type_enable(gsi, GSI_CH_CTRL); in gsi_irq_ch_ctrl_enable()
270 static void gsi_irq_ch_ctrl_disable(struct gsi *gsi) in gsi_irq_ch_ctrl_disable() argument
274 gsi_irq_type_disable(gsi, GSI_CH_CTRL); in gsi_irq_ch_ctrl_disable()
276 reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_MSK); in gsi_irq_ch_ctrl_disable()
277 iowrite32(0, gsi->virt + reg_offset(reg)); in gsi_irq_ch_ctrl_disable()
280 static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id) in gsi_irq_ieob_enable_one() argument
282 bool enable_ieob = !gsi->ieob_enabled_bitmap; in gsi_irq_ieob_enable_one()
286 gsi->ieob_enabled_bitmap |= BIT(evt_ring_id); in gsi_irq_ieob_enable_one()
288 reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_MSK); in gsi_irq_ieob_enable_one()
289 val = gsi->ieob_enabled_bitmap; in gsi_irq_ieob_enable_one()
290 iowrite32(val, gsi->virt + reg_offset(reg)); in gsi_irq_ieob_enable_one()
294 gsi_irq_type_enable(gsi, GSI_IEOB); in gsi_irq_ieob_enable_one()
297 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask) in gsi_irq_ieob_disable() argument
302 gsi->ieob_enabled_bitmap &= ~event_mask; in gsi_irq_ieob_disable()
305 if (!gsi->ieob_enabled_bitmap) in gsi_irq_ieob_disable()
306 gsi_irq_type_disable(gsi, GSI_IEOB); in gsi_irq_ieob_disable()
308 reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_MSK); in gsi_irq_ieob_disable()
309 val = gsi->ieob_enabled_bitmap; in gsi_irq_ieob_disable()
310 iowrite32(val, gsi->virt + reg_offset(reg)); in gsi_irq_ieob_disable()
313 static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id) in gsi_irq_ieob_disable_one() argument
315 gsi_irq_ieob_disable(gsi, BIT(evt_ring_id)); in gsi_irq_ieob_disable_one()
319 static void gsi_irq_enable(struct gsi *gsi) in gsi_irq_enable() argument
327 reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN); in gsi_irq_enable()
328 iowrite32(ERROR_INT, gsi->virt + reg_offset(reg)); in gsi_irq_enable()
330 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | GSI_GLOB_EE); in gsi_irq_enable()
332 /* General GSI interrupts are reported to all EEs; if they occur in gsi_irq_enable()
337 reg = gsi_reg(gsi, CNTXT_GSI_IRQ_EN); in gsi_irq_enable()
341 iowrite32(val, gsi->virt + reg_offset(reg)); in gsi_irq_enable()
343 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | GSI_GENERAL); in gsi_irq_enable()
346 /* Disable all GSI interrupt types */
347 static void gsi_irq_disable(struct gsi *gsi) in gsi_irq_disable() argument
351 gsi_irq_type_update(gsi, 0); in gsi_irq_disable()
354 reg = gsi_reg(gsi, CNTXT_GSI_IRQ_EN); in gsi_irq_disable()
355 iowrite32(0, gsi->virt + reg_offset(reg)); in gsi_irq_disable()
357 reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN); in gsi_irq_disable()
358 iowrite32(0, gsi->virt + reg_offset(reg)); in gsi_irq_disable()
380 /* Issue a GSI command by writing a value to a register, then wait for
384 static bool gsi_command(struct gsi *gsi, u32 reg, u32 val) in gsi_command() argument
387 struct completion *completion = &gsi->completion; in gsi_command()
391 iowrite32(val, gsi->virt + reg); in gsi_command()
398 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id) in gsi_evt_ring_state() argument
400 const struct reg *reg = gsi_reg(gsi, EV_CH_E_CNTXT_0); in gsi_evt_ring_state()
403 val = ioread32(gsi->virt + reg_n_offset(reg, evt_ring_id)); in gsi_evt_ring_state()
409 static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id, in gsi_evt_ring_command() argument
412 struct device *dev = gsi->dev; in gsi_evt_ring_command()
418 gsi_irq_ev_ctrl_enable(gsi, evt_ring_id); in gsi_evt_ring_command()
420 reg = gsi_reg(gsi, EV_CH_CMD); in gsi_evt_ring_command()
424 timeout = !gsi_command(gsi, reg_offset(reg), val); in gsi_evt_ring_command()
426 gsi_irq_ev_ctrl_disable(gsi); in gsi_evt_ring_command()
431 dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n", in gsi_evt_ring_command()
432 opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id)); in gsi_evt_ring_command()
436 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id) in gsi_evt_ring_alloc_command() argument
441 state = gsi_evt_ring_state(gsi, evt_ring_id); in gsi_evt_ring_alloc_command()
443 dev_err(gsi->dev, "event ring %u bad state %u before alloc\n", in gsi_evt_ring_alloc_command()
448 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE); in gsi_evt_ring_alloc_command()
451 state = gsi_evt_ring_state(gsi, evt_ring_id); in gsi_evt_ring_alloc_command()
455 dev_err(gsi->dev, "event ring %u bad state %u after alloc\n", in gsi_evt_ring_alloc_command()
461 /* Reset a GSI event ring in ALLOCATED or ERROR state. */
462 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id) in gsi_evt_ring_reset_command() argument
466 state = gsi_evt_ring_state(gsi, evt_ring_id); in gsi_evt_ring_reset_command()
469 dev_err(gsi->dev, "event ring %u bad state %u before reset\n", in gsi_evt_ring_reset_command()
474 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET); in gsi_evt_ring_reset_command()
477 state = gsi_evt_ring_state(gsi, evt_ring_id); in gsi_evt_ring_reset_command()
481 dev_err(gsi->dev, "event ring %u bad state %u after reset\n", in gsi_evt_ring_reset_command()
486 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id) in gsi_evt_ring_de_alloc_command() argument
490 state = gsi_evt_ring_state(gsi, evt_ring_id); in gsi_evt_ring_de_alloc_command()
492 dev_err(gsi->dev, "event ring %u state %u before dealloc\n", in gsi_evt_ring_de_alloc_command()
497 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC); in gsi_evt_ring_de_alloc_command()
500 state = gsi_evt_ring_state(gsi, evt_ring_id); in gsi_evt_ring_de_alloc_command()
504 dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n", in gsi_evt_ring_de_alloc_command()
511 const struct reg *reg = gsi_reg(channel->gsi, CH_C_CNTXT_0); in gsi_channel_state()
513 struct gsi *gsi = channel->gsi; in gsi_channel_state() local
514 void __iomem *virt = gsi->virt; in gsi_channel_state()
517 reg = gsi_reg(gsi, CH_C_CNTXT_0); in gsi_channel_state()
528 struct gsi *gsi = channel->gsi; in gsi_channel_command() local
529 struct device *dev = gsi->dev; in gsi_channel_command()
535 gsi_irq_ch_ctrl_enable(gsi, channel_id); in gsi_channel_command()
537 reg = gsi_reg(gsi, CH_CMD); in gsi_channel_command()
541 timeout = !gsi_command(gsi, reg_offset(reg), val); in gsi_channel_command()
543 gsi_irq_ch_ctrl_disable(gsi); in gsi_channel_command()
548 dev_err(dev, "GSI command %u for channel %u timed out, state %u\n", in gsi_channel_command()
552 /* Allocate GSI channel in NOT_ALLOCATED state */
553 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id) in gsi_channel_alloc_command() argument
555 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_alloc_command()
556 struct device *dev = gsi->dev; in gsi_channel_alloc_command()
583 struct device *dev = channel->gsi->dev; in gsi_channel_start_command()
607 /* Stop a GSI channel in STARTED state */
610 struct device *dev = channel->gsi->dev; in gsi_channel_stop_command()
645 /* Reset a GSI channel in ALLOCATED or ERROR state. */
648 struct device *dev = channel->gsi->dev; in gsi_channel_reset_command()
673 /* Deallocate an ALLOCATED GSI channel */
674 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id) in gsi_channel_de_alloc_command() argument
676 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_de_alloc_command()
677 struct device *dev = gsi->dev; in gsi_channel_de_alloc_command()
702 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index) in gsi_evt_ring_doorbell() argument
704 const struct reg *reg = gsi_reg(gsi, EV_CH_E_DOORBELL_0); in gsi_evt_ring_doorbell()
705 struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring; in gsi_evt_ring_doorbell()
712 iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id)); in gsi_evt_ring_doorbell()
716 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id) in gsi_evt_ring_program() argument
718 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; in gsi_evt_ring_program()
723 reg = gsi_reg(gsi, EV_CH_E_CNTXT_0); in gsi_evt_ring_program()
729 iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id)); in gsi_evt_ring_program()
731 reg = gsi_reg(gsi, EV_CH_E_CNTXT_1); in gsi_evt_ring_program()
733 iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id)); in gsi_evt_ring_program()
739 reg = gsi_reg(gsi, EV_CH_E_CNTXT_2); in gsi_evt_ring_program()
741 iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id)); in gsi_evt_ring_program()
743 reg = gsi_reg(gsi, EV_CH_E_CNTXT_3); in gsi_evt_ring_program()
745 iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id)); in gsi_evt_ring_program()
748 reg = gsi_reg(gsi, EV_CH_E_CNTXT_8); in gsi_evt_ring_program()
752 iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id)); in gsi_evt_ring_program()
755 reg = gsi_reg(gsi, EV_CH_E_CNTXT_9); in gsi_evt_ring_program()
756 iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id)); in gsi_evt_ring_program()
758 reg = gsi_reg(gsi, EV_CH_E_CNTXT_10); in gsi_evt_ring_program()
759 iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id)); in gsi_evt_ring_program()
761 reg = gsi_reg(gsi, EV_CH_E_CNTXT_11); in gsi_evt_ring_program()
762 iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id)); in gsi_evt_ring_program()
765 reg = gsi_reg(gsi, EV_CH_E_CNTXT_12); in gsi_evt_ring_program()
766 iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id)); in gsi_evt_ring_program()
768 reg = gsi_reg(gsi, EV_CH_E_CNTXT_13); in gsi_evt_ring_program()
769 iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id)); in gsi_evt_ring_program()
772 gsi_evt_ring_doorbell(gsi, evt_ring_id, ring->index); in gsi_evt_ring_program()
829 struct gsi *gsi = channel->gsi; in gsi_channel_program() local
835 reg = gsi_reg(gsi, CH_C_CNTXT_0); in gsi_channel_program()
838 val = ch_c_cntxt_0_type_encode(gsi->version, reg, GSI_CHANNEL_TYPE_GPI); in gsi_channel_program()
841 if (gsi->version < IPA_VERSION_5_0) in gsi_channel_program()
844 iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id)); in gsi_channel_program()
846 reg = gsi_reg(gsi, CH_C_CNTXT_1); in gsi_channel_program()
848 if (gsi->version >= IPA_VERSION_5_0) in gsi_channel_program()
850 iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id)); in gsi_channel_program()
856 reg = gsi_reg(gsi, CH_C_CNTXT_2); in gsi_channel_program()
858 iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id)); in gsi_channel_program()
860 reg = gsi_reg(gsi, CH_C_CNTXT_3); in gsi_channel_program()
862 iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id)); in gsi_channel_program()
864 reg = gsi_reg(gsi, CH_C_QOS); in gsi_channel_program()
874 if (gsi->version < IPA_VERSION_4_0 && doorbell) in gsi_channel_program()
880 if (gsi->version >= IPA_VERSION_4_0 && !channel->command) { in gsi_channel_program()
882 if (gsi->version < IPA_VERSION_4_5) in gsi_channel_program()
888 if (gsi->version >= IPA_VERSION_4_9) in gsi_channel_program()
891 iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id)); in gsi_channel_program()
899 reg = gsi_reg(gsi, CH_C_SCRATCH_0); in gsi_channel_program()
901 iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id)); in gsi_channel_program()
903 reg = gsi_reg(gsi, CH_C_SCRATCH_1); in gsi_channel_program()
905 iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id)); in gsi_channel_program()
907 reg = gsi_reg(gsi, CH_C_SCRATCH_2); in gsi_channel_program()
909 iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id)); in gsi_channel_program()
915 reg = gsi_reg(gsi, CH_C_SCRATCH_3); in gsi_channel_program()
917 val = ioread32(gsi->virt + offset); in gsi_channel_program()
919 iowrite32(val, gsi->virt + offset); in gsi_channel_program()
926 struct gsi *gsi = channel->gsi; in __gsi_channel_start() local
929 /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */ in __gsi_channel_start()
930 if (resume && gsi->version < IPA_VERSION_4_0) in __gsi_channel_start()
933 mutex_lock(&gsi->mutex); in __gsi_channel_start()
937 mutex_unlock(&gsi->mutex); in __gsi_channel_start()
942 /* Start an allocated GSI channel */
943 int gsi_channel_start(struct gsi *gsi, u32 channel_id) in gsi_channel_start() argument
945 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_start()
950 gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id); in gsi_channel_start()
954 gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id); in gsi_channel_start()
978 struct gsi *gsi = channel->gsi; in __gsi_channel_stop() local
984 /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */ in __gsi_channel_stop()
985 if (suspend && gsi->version < IPA_VERSION_4_0) in __gsi_channel_stop()
988 mutex_lock(&gsi->mutex); in __gsi_channel_stop()
992 mutex_unlock(&gsi->mutex); in __gsi_channel_stop()
998 int gsi_channel_stop(struct gsi *gsi, u32 channel_id) in gsi_channel_stop() argument
1000 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_stop()
1008 gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id); in gsi_channel_stop()
1015 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell) in gsi_channel_reset() argument
1017 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_reset()
1019 mutex_lock(&gsi->mutex); in gsi_channel_reset()
1023 if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa) in gsi_channel_reset()
1031 mutex_unlock(&gsi->mutex); in gsi_channel_reset()
1035 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id) in gsi_channel_suspend() argument
1037 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_suspend()
1051 int gsi_channel_resume(struct gsi *gsi, u32 channel_id) in gsi_channel_resume() argument
1053 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_resume()
1058 /* Prevent all GSI interrupts while suspended */
1059 void gsi_suspend(struct gsi *gsi) in gsi_suspend() argument
1061 disable_irq(gsi->irq); in gsi_suspend()
1064 /* Allow all GSI interrupts again when resuming */
1065 void gsi_resume(struct gsi *gsi) in gsi_resume() argument
1067 enable_irq(gsi->irq); in gsi_resume()
1072 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id]; in gsi_trans_tx_committed()
1084 struct gsi *gsi = trans->gsi; in gsi_trans_tx_queued() local
1089 channel = &gsi->channel[channel_id]; in gsi_trans_tx_queued()
1096 ipa_gsi_channel_tx_queued(gsi, channel_id, trans_count, byte_count); in gsi_trans_tx_queued()
1117 struct gsi *gsi = trans->gsi; in gsi_trans_tx_completed() local
1122 channel = &gsi->channel[channel_id]; in gsi_trans_tx_completed()
1129 ipa_gsi_channel_tx_completed(gsi, channel_id, trans_count, byte_count); in gsi_trans_tx_completed()
1133 static void gsi_isr_chan_ctrl(struct gsi *gsi) in gsi_isr_chan_ctrl() argument
1138 reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ); in gsi_isr_chan_ctrl()
1139 channel_mask = ioread32(gsi->virt + reg_offset(reg)); in gsi_isr_chan_ctrl()
1141 reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_CLR); in gsi_isr_chan_ctrl()
1142 iowrite32(channel_mask, gsi->virt + reg_offset(reg)); in gsi_isr_chan_ctrl()
1149 complete(&gsi->completion); in gsi_isr_chan_ctrl()
1154 static void gsi_isr_evt_ctrl(struct gsi *gsi) in gsi_isr_evt_ctrl() argument
1159 reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ); in gsi_isr_evt_ctrl()
1160 event_mask = ioread32(gsi->virt + reg_offset(reg)); in gsi_isr_evt_ctrl()
1162 reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_CLR); in gsi_isr_evt_ctrl()
1163 iowrite32(event_mask, gsi->virt + reg_offset(reg)); in gsi_isr_evt_ctrl()
1170 complete(&gsi->completion); in gsi_isr_evt_ctrl()
1176 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code) in gsi_isr_glob_chan_err() argument
1179 dev_err(gsi->dev, "channel %u out of resources\n", channel_id); in gsi_isr_glob_chan_err()
1180 complete(&gsi->completion); in gsi_isr_glob_chan_err()
1185 dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n", in gsi_isr_glob_chan_err()
1191 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code) in gsi_isr_glob_evt_err() argument
1194 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; in gsi_isr_glob_evt_err()
1197 complete(&gsi->completion); in gsi_isr_glob_evt_err()
1198 dev_err(gsi->dev, "evt_ring for channel %u out of resources\n", in gsi_isr_glob_evt_err()
1204 dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n", in gsi_isr_glob_evt_err()
1209 static void gsi_isr_glob_err(struct gsi *gsi) in gsi_isr_glob_err() argument
1221 log_reg = gsi_reg(gsi, ERROR_LOG); in gsi_isr_glob_err()
1223 val = ioread32(gsi->virt + offset); in gsi_isr_glob_err()
1224 iowrite32(0, gsi->virt + offset); in gsi_isr_glob_err()
1226 clr_reg = gsi_reg(gsi, ERROR_LOG_CLR); in gsi_isr_glob_err()
1227 iowrite32(~0, gsi->virt + reg_offset(clr_reg)); in gsi_isr_glob_err()
1236 gsi_isr_glob_chan_err(gsi, ee, which, code); in gsi_isr_glob_err()
1238 gsi_isr_glob_evt_err(gsi, ee, which, code); in gsi_isr_glob_err()
1240 dev_err(gsi->dev, "unexpected global error 0x%08x\n", type); in gsi_isr_glob_err()
1244 static void gsi_isr_gp_int1(struct gsi *gsi) in gsi_isr_gp_int1() argument
1250 /* This interrupt is used to handle completions of GENERIC GSI in gsi_isr_gp_int1()
1269 reg = gsi_reg(gsi, CNTXT_SCRATCH_0); in gsi_isr_gp_int1()
1270 val = ioread32(gsi->virt + reg_offset(reg)); in gsi_isr_gp_int1()
1276 gsi->result = 0; in gsi_isr_gp_int1()
1280 gsi->result = -EAGAIN; in gsi_isr_gp_int1()
1284 dev_err(gsi->dev, "global INT1 generic result %u\n", result); in gsi_isr_gp_int1()
1285 gsi->result = -EIO; in gsi_isr_gp_int1()
1289 complete(&gsi->completion); in gsi_isr_gp_int1()
1293 static void gsi_isr_glob_ee(struct gsi *gsi) in gsi_isr_glob_ee() argument
1298 reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_STTS); in gsi_isr_glob_ee()
1299 val = ioread32(gsi->virt + reg_offset(reg)); in gsi_isr_glob_ee()
1302 gsi_isr_glob_err(gsi); in gsi_isr_glob_ee()
1304 reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_CLR); in gsi_isr_glob_ee()
1305 iowrite32(val, gsi->virt + reg_offset(reg)); in gsi_isr_glob_ee()
1311 gsi_isr_gp_int1(gsi); in gsi_isr_glob_ee()
1315 dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val); in gsi_isr_glob_ee()
1319 static void gsi_isr_ieob(struct gsi *gsi) in gsi_isr_ieob() argument
1324 reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ); in gsi_isr_ieob()
1325 event_mask = ioread32(gsi->virt + reg_offset(reg)); in gsi_isr_ieob()
1327 gsi_irq_ieob_disable(gsi, event_mask); in gsi_isr_ieob()
1329 reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_CLR); in gsi_isr_ieob()
1330 iowrite32(event_mask, gsi->virt + reg_offset(reg)); in gsi_isr_ieob()
1337 napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi); in gsi_isr_ieob()
1342 static void gsi_isr_general(struct gsi *gsi) in gsi_isr_general() argument
1344 struct device *dev = gsi->dev; in gsi_isr_general()
1348 reg = gsi_reg(gsi, CNTXT_GSI_IRQ_STTS); in gsi_isr_general()
1349 val = ioread32(gsi->virt + reg_offset(reg)); in gsi_isr_general()
1351 reg = gsi_reg(gsi, CNTXT_GSI_IRQ_CLR); in gsi_isr_general()
1352 iowrite32(val, gsi->virt + reg_offset(reg)); in gsi_isr_general()
1358 * gsi_isr() - Top level GSI interrupt service routine
1360 * @dev_id: GSI pointer supplied to request_irq()
1362 * This is the main handler function registered for the GSI IRQ. Each type
1367 struct gsi *gsi = dev_id; in gsi_isr() local
1373 reg = gsi_reg(gsi, CNTXT_TYPE_IRQ); in gsi_isr()
1376 /* enum gsi_irq_type_id defines GSI interrupt types */ in gsi_isr()
1377 while ((intr_mask = ioread32(gsi->virt + offset))) { in gsi_isr()
1378 /* intr_mask contains bitmask of pending GSI interrupts */ in gsi_isr()
1389 gsi_isr_chan_ctrl(gsi); in gsi_isr()
1392 gsi_isr_evt_ctrl(gsi); in gsi_isr()
1395 gsi_isr_glob_ee(gsi); in gsi_isr()
1398 gsi_isr_ieob(gsi); in gsi_isr()
1401 gsi_isr_general(gsi); in gsi_isr()
1404 dev_err(gsi->dev, in gsi_isr()
1412 dev_err(gsi->dev, "interrupt flood\n"); in gsi_isr()
1420 /* Init function for GSI IRQ lookup; there is no gsi_irq_exit() */
1421 static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev) in gsi_irq_init() argument
1425 ret = platform_get_irq_byname(pdev, "gsi"); in gsi_irq_init()
1429 gsi->irq = ret; in gsi_irq_init()
1436 gsi_event_trans(struct gsi *gsi, struct gsi_event *event) in gsi_event_trans() argument
1444 channel = &gsi->channel[channel_id]; in gsi_event_trans()
1445 if (WARN(!channel->gsi, "event has bad channel %u\n", channel_id)) in gsi_event_trans()
1462 * @gsi: GSI pointer
1474 * This function is called whenever we learn that the GSI hardware has filled
1484 static void gsi_evt_ring_update(struct gsi *gsi, u32 evt_ring_id, u32 index) in gsi_evt_ring_update() argument
1486 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; in gsi_evt_ring_update()
1511 trans = gsi_event_trans(gsi, event); in gsi_evt_ring_update()
1530 gsi_evt_ring_doorbell(gsi, evt_ring_id, index); in gsi_evt_ring_update()
1534 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count) in gsi_ring_alloc() argument
1537 struct device *dev = gsi->dev; in gsi_ring_alloc()
1556 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring) in gsi_ring_free() argument
1560 dma_free_coherent(gsi->dev, size, ring->virt, ring->addr); in gsi_ring_free()
1564 static int gsi_evt_ring_id_alloc(struct gsi *gsi) in gsi_evt_ring_id_alloc() argument
1568 if (gsi->event_bitmap == ~0U) { in gsi_evt_ring_id_alloc()
1569 dev_err(gsi->dev, "event rings exhausted\n"); in gsi_evt_ring_id_alloc()
1573 evt_ring_id = ffz(gsi->event_bitmap); in gsi_evt_ring_id_alloc()
1574 gsi->event_bitmap |= BIT(evt_ring_id); in gsi_evt_ring_id_alloc()
1580 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id) in gsi_evt_ring_id_free() argument
1582 gsi->event_bitmap &= ~BIT(evt_ring_id); in gsi_evt_ring_id_free()
1590 struct gsi *gsi = channel->gsi; in gsi_channel_doorbell() local
1594 reg = gsi_reg(gsi, CH_C_DOORBELL_0); in gsi_channel_doorbell()
1597 iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id)); in gsi_channel_doorbell()
1604 struct gsi *gsi = channel->gsi; in gsi_channel_update() local
1612 evt_ring = &gsi->evt_ring[evt_ring_id]; in gsi_channel_update()
1618 reg = gsi_reg(gsi, EV_CH_E_CNTXT_4); in gsi_channel_update()
1620 index = gsi_ring_index(ring, ioread32(gsi->virt + offset)); in gsi_channel_update()
1625 trans = gsi_event_trans(gsi, gsi_ring_virt(ring, index - 1)); in gsi_channel_update()
1634 gsi_evt_ring_update(gsi, evt_ring_id, index); in gsi_channel_update()
1689 gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id); in gsi_channel_poll()
1709 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id) in gsi_channel_setup_one() argument
1711 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_setup_one()
1718 ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id); in gsi_channel_setup_one()
1722 gsi_evt_ring_program(gsi, evt_ring_id); in gsi_channel_setup_one()
1724 ret = gsi_channel_alloc_command(gsi, channel_id); in gsi_channel_setup_one()
1731 netif_napi_add_tx(gsi->dummy_dev, &channel->napi, in gsi_channel_setup_one()
1734 netif_napi_add(gsi->dummy_dev, &channel->napi, in gsi_channel_setup_one()
1741 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); in gsi_channel_setup_one()
1747 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id) in gsi_channel_teardown_one() argument
1749 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_teardown_one()
1757 gsi_channel_de_alloc_command(gsi, channel_id); in gsi_channel_teardown_one()
1758 gsi_evt_ring_reset_command(gsi, evt_ring_id); in gsi_channel_teardown_one()
1759 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); in gsi_channel_teardown_one()
1766 static int gsi_generic_command(struct gsi *gsi, u32 channel_id, in gsi_generic_command() argument
1778 * A generic EE command completes with a GSI global interrupt of in gsi_generic_command()
1784 reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN); in gsi_generic_command()
1786 iowrite32(val, gsi->virt + reg_offset(reg)); in gsi_generic_command()
1789 reg = gsi_reg(gsi, CNTXT_SCRATCH_0); in gsi_generic_command()
1791 val = ioread32(gsi->virt + offset); in gsi_generic_command()
1794 iowrite32(val, gsi->virt + offset); in gsi_generic_command()
1797 reg = gsi_reg(gsi, GENERIC_CMD); in gsi_generic_command()
1801 if (gsi->version >= IPA_VERSION_4_11) in gsi_generic_command()
1804 timeout = !gsi_command(gsi, reg_offset(reg), val); in gsi_generic_command()
1807 reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN); in gsi_generic_command()
1808 iowrite32(ERROR_INT, gsi->virt + reg_offset(reg)); in gsi_generic_command()
1811 return gsi->result; in gsi_generic_command()
1813 dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n", in gsi_generic_command()
1819 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id) in gsi_modem_channel_alloc() argument
1821 return gsi_generic_command(gsi, channel_id, in gsi_modem_channel_alloc()
1825 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id) in gsi_modem_channel_halt() argument
1831 ret = gsi_generic_command(gsi, channel_id, in gsi_modem_channel_halt()
1836 dev_err(gsi->dev, "error %d halting modem channel %u\n", in gsi_modem_channel_halt()
1840 /* Enable or disable flow control for a modem GSI TX channel (IPA v4.2+) */
1842 gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id, bool enable) in gsi_modem_channel_flow_control() argument
1853 if (!enable && gsi->version >= IPA_VERSION_4_11) in gsi_modem_channel_flow_control()
1857 ret = gsi_generic_command(gsi, channel_id, command, 0); in gsi_modem_channel_flow_control()
1861 dev_err(gsi->dev, in gsi_modem_channel_flow_control()
1867 static int gsi_channel_setup(struct gsi *gsi) in gsi_channel_setup() argument
1873 gsi_irq_enable(gsi); in gsi_channel_setup()
1875 mutex_lock(&gsi->mutex); in gsi_channel_setup()
1878 ret = gsi_channel_setup_one(gsi, channel_id); in gsi_channel_setup()
1881 } while (++channel_id < gsi->channel_count); in gsi_channel_setup()
1885 struct gsi_channel *channel = &gsi->channel[channel_id++]; in gsi_channel_setup()
1891 dev_err(gsi->dev, "channel %u not supported by hardware\n", in gsi_channel_setup()
1893 channel_id = gsi->channel_count; in gsi_channel_setup()
1898 mask = gsi->modem_channel_bitmap; in gsi_channel_setup()
1902 ret = gsi_modem_channel_alloc(gsi, modem_channel_id); in gsi_channel_setup()
1910 mutex_unlock(&gsi->mutex); in gsi_channel_setup()
1916 mask ^= gsi->modem_channel_bitmap; in gsi_channel_setup()
1922 gsi_modem_channel_halt(gsi, channel_id); in gsi_channel_setup()
1927 gsi_channel_teardown_one(gsi, channel_id); in gsi_channel_setup()
1929 mutex_unlock(&gsi->mutex); in gsi_channel_setup()
1931 gsi_irq_disable(gsi); in gsi_channel_setup()
1937 static void gsi_channel_teardown(struct gsi *gsi) in gsi_channel_teardown() argument
1939 u32 mask = gsi->modem_channel_bitmap; in gsi_channel_teardown()
1942 mutex_lock(&gsi->mutex); in gsi_channel_teardown()
1949 gsi_modem_channel_halt(gsi, channel_id); in gsi_channel_teardown()
1952 channel_id = gsi->channel_count - 1; in gsi_channel_teardown()
1954 gsi_channel_teardown_one(gsi, channel_id); in gsi_channel_teardown()
1957 mutex_unlock(&gsi->mutex); in gsi_channel_teardown()
1959 gsi_irq_disable(gsi); in gsi_channel_teardown()
1962 /* Turn off all GSI interrupts initially */
1963 static int gsi_irq_setup(struct gsi *gsi) in gsi_irq_setup() argument
1969 reg = gsi_reg(gsi, CNTXT_INTSET); in gsi_irq_setup()
1970 iowrite32(reg_bit(reg, INTYPE), gsi->virt + reg_offset(reg)); in gsi_irq_setup()
1973 gsi_irq_type_update(gsi, 0); in gsi_irq_setup()
1976 reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_MSK); in gsi_irq_setup()
1977 iowrite32(0, gsi->virt + reg_offset(reg)); in gsi_irq_setup()
1979 reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_MSK); in gsi_irq_setup()
1980 iowrite32(0, gsi->virt + reg_offset(reg)); in gsi_irq_setup()
1982 reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN); in gsi_irq_setup()
1983 iowrite32(0, gsi->virt + reg_offset(reg)); in gsi_irq_setup()
1985 reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_MSK); in gsi_irq_setup()
1986 iowrite32(0, gsi->virt + reg_offset(reg)); in gsi_irq_setup()
1989 if (gsi->version > IPA_VERSION_3_1) { in gsi_irq_setup()
1990 reg = gsi_reg(gsi, INTER_EE_SRC_CH_IRQ_MSK); in gsi_irq_setup()
1991 iowrite32(0, gsi->virt + reg_offset(reg)); in gsi_irq_setup()
1993 reg = gsi_reg(gsi, INTER_EE_SRC_EV_CH_IRQ_MSK); in gsi_irq_setup()
1994 iowrite32(0, gsi->virt + reg_offset(reg)); in gsi_irq_setup()
1997 reg = gsi_reg(gsi, CNTXT_GSI_IRQ_EN); in gsi_irq_setup()
1998 iowrite32(0, gsi->virt + reg_offset(reg)); in gsi_irq_setup()
2000 ret = request_irq(gsi->irq, gsi_isr, 0, "gsi", gsi); in gsi_irq_setup()
2002 dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret); in gsi_irq_setup()
2007 static void gsi_irq_teardown(struct gsi *gsi) in gsi_irq_teardown() argument
2009 free_irq(gsi->irq, gsi); in gsi_irq_teardown()
2013 static int gsi_ring_setup(struct gsi *gsi) in gsi_ring_setup() argument
2015 struct device *dev = gsi->dev; in gsi_ring_setup()
2020 if (gsi->version < IPA_VERSION_3_5_1) { in gsi_ring_setup()
2022 gsi->channel_count = GSI_CHANNEL_COUNT_MAX; in gsi_ring_setup()
2023 gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX; in gsi_ring_setup()
2028 reg = gsi_reg(gsi, HW_PARAM_2); in gsi_ring_setup()
2029 val = ioread32(gsi->virt + reg_offset(reg)); in gsi_ring_setup()
2033 dev_err(dev, "GSI reports zero channels supported\n"); in gsi_ring_setup()
2041 gsi->channel_count = count; in gsi_ring_setup()
2043 if (gsi->version < IPA_VERSION_5_0) { in gsi_ring_setup()
2046 reg = gsi_reg(gsi, HW_PARAM_4); in gsi_ring_setup()
2050 dev_err(dev, "GSI reports zero event rings supported\n"); in gsi_ring_setup()
2059 gsi->evt_ring_count = count; in gsi_ring_setup()
2064 /* Setup function for GSI. GSI firmware must be loaded and initialized */
2065 int gsi_setup(struct gsi *gsi) in gsi_setup() argument
2071 /* Here is where we first touch the GSI hardware */ in gsi_setup()
2072 reg = gsi_reg(gsi, GSI_STATUS); in gsi_setup()
2073 val = ioread32(gsi->virt + reg_offset(reg)); in gsi_setup()
2075 dev_err(gsi->dev, "GSI has not been enabled\n"); in gsi_setup()
2079 ret = gsi_irq_setup(gsi); in gsi_setup()
2083 ret = gsi_ring_setup(gsi); /* No matching teardown required */ in gsi_setup()
2088 reg = gsi_reg(gsi, ERROR_LOG); in gsi_setup()
2089 iowrite32(0, gsi->virt + reg_offset(reg)); in gsi_setup()
2091 ret = gsi_channel_setup(gsi); in gsi_setup()
2098 gsi_irq_teardown(gsi); in gsi_setup()
2104 void gsi_teardown(struct gsi *gsi) in gsi_teardown() argument
2106 gsi_channel_teardown(gsi); in gsi_teardown()
2107 gsi_irq_teardown(gsi); in gsi_teardown()
2113 struct gsi *gsi = channel->gsi; in gsi_channel_evt_ring_init() local
2117 ret = gsi_evt_ring_id_alloc(gsi); in gsi_channel_evt_ring_init()
2122 evt_ring = &gsi->evt_ring[channel->evt_ring_id]; in gsi_channel_evt_ring_init()
2125 ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count); in gsi_channel_evt_ring_init()
2129 dev_err(gsi->dev, "error %d allocating channel %u event ring\n", in gsi_channel_evt_ring_init()
2132 gsi_evt_ring_id_free(gsi, channel->evt_ring_id); in gsi_channel_evt_ring_init()
2141 struct gsi *gsi = channel->gsi; in gsi_channel_evt_ring_exit() local
2144 evt_ring = &gsi->evt_ring[evt_ring_id]; in gsi_channel_evt_ring_exit()
2145 gsi_ring_free(gsi, &evt_ring->ring); in gsi_channel_evt_ring_exit()
2146 gsi_evt_ring_id_free(gsi, evt_ring_id); in gsi_channel_evt_ring_exit()
2149 static bool gsi_channel_data_valid(struct gsi *gsi, bool command, in gsi_channel_data_valid() argument
2154 struct device *dev = gsi->dev; in gsi_channel_data_valid()
2217 static int gsi_channel_init_one(struct gsi *gsi, in gsi_channel_init_one() argument
2225 if (!gsi_channel_data_valid(gsi, command, data)) in gsi_channel_init_one()
2231 dev_warn(gsi->dev, "channel %u limited to %u TREs\n", in gsi_channel_init_one()
2237 channel = &gsi->channel[data->channel_id]; in gsi_channel_init_one()
2240 channel->gsi = gsi; in gsi_channel_init_one()
2251 ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count); in gsi_channel_init_one()
2253 dev_err(gsi->dev, "error %d allocating channel %u ring\n", in gsi_channel_init_one()
2258 ret = gsi_channel_trans_init(gsi, data->channel_id); in gsi_channel_init_one()
2263 u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id); in gsi_channel_init_one()
2272 gsi_ring_free(gsi, &channel->tre_ring); in gsi_channel_init_one()
2276 channel->gsi = NULL; /* Mark it not (fully) initialized */ in gsi_channel_init_one()
2290 gsi_ring_free(channel->gsi, &channel->tre_ring); in gsi_channel_exit_one()
2295 static int gsi_channel_init(struct gsi *gsi, u32 count, in gsi_channel_init() argument
2303 modem_alloc = gsi->version == IPA_VERSION_4_2; in gsi_channel_init()
2305 gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX); in gsi_channel_init()
2306 gsi->ieob_enabled_bitmap = 0; in gsi_channel_init()
2318 gsi->modem_channel_bitmap |= in gsi_channel_init()
2323 ret = gsi_channel_init_one(gsi, &data[i], command); in gsi_channel_init()
2335 gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id); in gsi_channel_init()
2338 gsi_channel_exit_one(&gsi->channel[data->channel_id]); in gsi_channel_init()
2345 static void gsi_channel_exit(struct gsi *gsi) in gsi_channel_exit() argument
2350 gsi_channel_exit_one(&gsi->channel[channel_id]); in gsi_channel_exit()
2352 gsi->modem_channel_bitmap = 0; in gsi_channel_exit()
2355 /* Init function for GSI. GSI hardware does not need to be "ready" */
2356 int gsi_init(struct gsi *gsi, struct platform_device *pdev, in gsi_init() argument
2364 gsi->dev = &pdev->dev; in gsi_init()
2365 gsi->version = version; in gsi_init()
2367 /* GSI uses NAPI on all channels. Create a dummy network device in gsi_init()
2370 gsi->dummy_dev = alloc_netdev_dummy(0); in gsi_init()
2371 if (!gsi->dummy_dev) in gsi_init()
2373 init_completion(&gsi->completion); in gsi_init()
2375 ret = gsi_reg_init(gsi, pdev); in gsi_init()
2379 ret = gsi_irq_init(gsi, pdev); /* No matching exit required */ in gsi_init()
2383 ret = gsi_channel_init(gsi, count, data); in gsi_init()
2387 mutex_init(&gsi->mutex); in gsi_init()
2392 free_netdev(gsi->dummy_dev); in gsi_init()
2393 gsi_reg_exit(gsi); in gsi_init()
2399 void gsi_exit(struct gsi *gsi) in gsi_exit() argument
2401 mutex_destroy(&gsi->mutex); in gsi_exit()
2402 gsi_channel_exit(gsi); in gsi_exit()
2403 free_netdev(gsi->dummy_dev); in gsi_exit()
2404 gsi_reg_exit(gsi); in gsi_exit()
2427 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id) in gsi_channel_tre_max() argument
2429 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_tre_max()