Lines Matching +full:io +full:- +full:reset
1 // SPDX-License-Identifier: GPL-2.0-only
109 * also unmap and free the device-private scatter/gather list.
115 if (io_req->sgl_list_pa) in fnic_release_ioreq_buf()
116 dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa, in fnic_release_ioreq_buf()
117 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt, in fnic_release_ioreq_buf()
121 if (io_req->sgl_cnt) in fnic_release_ioreq_buf()
122 mempool_free(io_req->sgl_list_alloc, in fnic_release_ioreq_buf()
123 fnic->io_sgl_pool[io_req->sgl_type]); in fnic_release_ioreq_buf()
124 if (io_req->sense_buf_pa) in fnic_release_ioreq_buf()
125 dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa, in fnic_release_ioreq_buf()
135 struct fnic_io_req *io_req = fnic_priv(sc)->io_req; in fnic_count_portid_ioreqs_iter()
137 if (!io_req || (*portid && (io_req->port_id != *portid))) in fnic_count_portid_ioreqs_iter()
151 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_count_ioreqs()
168 if (sc->device != scsi_device || !fnic_priv(sc)->io_req) in fnic_count_lun_ioreqs_iter()
183 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_count_lun_ioreqs()
192 if (!fnic->fw_ack_recd[hwq]) in free_wq_copy_descs()
199 if (wq->to_clean_index <= fnic->fw_ack_index[hwq]) in free_wq_copy_descs()
200 wq->ring.desc_avail += (fnic->fw_ack_index[hwq] in free_wq_copy_descs()
201 - wq->to_clean_index + 1); in free_wq_copy_descs()
203 wq->ring.desc_avail += (wq->ring.desc_count in free_wq_copy_descs()
204 - wq->to_clean_index in free_wq_copy_descs()
205 + fnic->fw_ack_index[hwq] + 1); in free_wq_copy_descs()
212 wq->to_clean_index = in free_wq_copy_descs()
213 (fnic->fw_ack_index[hwq] + 1) % wq->ring.desc_count; in free_wq_copy_descs()
216 fnic->fw_ack_recd[hwq] = 0; in free_wq_copy_descs()
231 spin_lock_irqsave(&fnic->fnic_lock, flags); in __fnic_set_state_flags()
234 fnic->state_flags &= ~st_flags; in __fnic_set_state_flags()
236 fnic->state_flags |= st_flags; in __fnic_set_state_flags()
238 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in __fnic_set_state_flags()
246 * Routine to send reset msg to fw
250 struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0]; in fnic_fw_reset_handler()
255 /* indicate fwreset to io path */ in fnic_fw_reset_handler()
259 /* wait for io cmpl */ in fnic_fw_reset_handler()
260 while (atomic_read(&fnic->in_flight)) in fnic_fw_reset_handler()
263 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); in fnic_fw_reset_handler()
265 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) in fnic_fw_reset_handler()
269 ret = -EAGAIN; in fnic_fw_reset_handler()
271 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_fw_reset_handler()
274 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); in fnic_fw_reset_handler()
275 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > in fnic_fw_reset_handler()
276 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) in fnic_fw_reset_handler()
277 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, in fnic_fw_reset_handler()
279 &fnic->fnic_stats.fw_stats.active_fw_reqs)); in fnic_fw_reset_handler()
282 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); in fnic_fw_reset_handler()
285 atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets); in fnic_fw_reset_handler()
286 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_fw_reset_handler()
287 "Issued fw reset\n"); in fnic_fw_reset_handler()
290 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fw_reset_handler()
291 "Failed to issue fw reset\n"); in fnic_fw_reset_handler()
304 struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0]; in fnic_flogi_reg_handler()
309 struct fnic_iport_s *iport = &fnic->iport; in fnic_flogi_reg_handler()
311 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); in fnic_flogi_reg_handler()
313 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) in fnic_flogi_reg_handler()
317 ret = -EAGAIN; in fnic_flogi_reg_handler()
321 memcpy(gw_mac, fnic->iport.fcfmac, ETH_ALEN); in fnic_flogi_reg_handler()
324 if (fnic->config.flags & VFCF_FIP_CAPABLE) { in fnic_flogi_reg_handler()
327 fnic->iport.fpma, in fnic_flogi_reg_handler()
328 iport->r_a_tov, iport->e_d_tov); in fnic_flogi_reg_handler()
329 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_flogi_reg_handler()
331 fc_id, fnic->iport.fpma, gw_mac); in fnic_flogi_reg_handler()
335 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_flogi_reg_handler()
340 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); in fnic_flogi_reg_handler()
341 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > in fnic_flogi_reg_handler()
342 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) in fnic_flogi_reg_handler()
343 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, in fnic_flogi_reg_handler()
344 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); in fnic_flogi_reg_handler()
347 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); in fnic_flogi_reg_handler()
364 struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); in fnic_queue_wq_copy_desc()
366 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; in fnic_queue_wq_copy_desc()
374 rdd_data = rport->dd_data; in fnic_queue_wq_copy_desc()
375 tport = rdd_data->tport; in fnic_queue_wq_copy_desc()
379 desc = io_req->sgl_list; in fnic_queue_wq_copy_desc()
381 desc->addr = cpu_to_le64(sg_dma_address(sg)); in fnic_queue_wq_copy_desc()
382 desc->len = cpu_to_le32(sg_dma_len(sg)); in fnic_queue_wq_copy_desc()
383 desc->_resvd = 0; in fnic_queue_wq_copy_desc()
387 io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev, in fnic_queue_wq_copy_desc()
388 io_req->sgl_list, in fnic_queue_wq_copy_desc()
389 sizeof(io_req->sgl_list[0]) * sg_count, in fnic_queue_wq_copy_desc()
391 if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) { in fnic_queue_wq_copy_desc()
397 io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev, in fnic_queue_wq_copy_desc()
398 sc->sense_buffer, in fnic_queue_wq_copy_desc()
401 if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) { in fnic_queue_wq_copy_desc()
402 dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa, in fnic_queue_wq_copy_desc()
403 sizeof(io_req->sgl_list[0]) * sg_count, in fnic_queue_wq_copy_desc()
409 int_to_scsilun(sc->device->lun, &fc_lun); in fnic_queue_wq_copy_desc()
412 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq]) in fnic_queue_wq_copy_desc()
416 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_queue_wq_copy_desc()
417 "fnic_queue_wq_copy_desc failure - no descriptors\n"); in fnic_queue_wq_copy_desc()
418 atomic64_inc(&misc_stats->io_cpwq_alloc_failures); in fnic_queue_wq_copy_desc()
423 if (sc->sc_data_direction == DMA_FROM_DEVICE) in fnic_queue_wq_copy_desc()
425 else if (sc->sc_data_direction == DMA_TO_DEVICE) in fnic_queue_wq_copy_desc()
429 if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) && in fnic_queue_wq_copy_desc()
430 (tport->tgt_flags & FDLS_FC_RP_FLAGS_RETRY)) in fnic_queue_wq_copy_desc()
434 0, exch_flags, io_req->sgl_cnt, in fnic_queue_wq_copy_desc()
436 io_req->sgl_list_pa, in fnic_queue_wq_copy_desc()
437 io_req->sense_buf_pa, in fnic_queue_wq_copy_desc()
442 sc->cmnd, sc->cmd_len, in fnic_queue_wq_copy_desc()
444 fc_lun.scsi_lun, io_req->port_id, in fnic_queue_wq_copy_desc()
445 tport->max_payload_size, in fnic_queue_wq_copy_desc()
446 tport->r_a_tov, tport->e_d_tov); in fnic_queue_wq_copy_desc()
448 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); in fnic_queue_wq_copy_desc()
449 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > in fnic_queue_wq_copy_desc()
450 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) in fnic_queue_wq_copy_desc()
451 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, in fnic_queue_wq_copy_desc()
452 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); in fnic_queue_wq_copy_desc()
464 struct fnic *fnic = *((struct fnic **) shost_priv(sc->device->host)); in fnic_queuecommand()
466 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_queuecommand()
479 rport = starget_to_rport(scsi_target(sc->device)); in fnic_queuecommand()
481 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_queuecommand()
482 "returning DID_NO_CONNECT for IO as rport is NULL\n"); in fnic_queuecommand()
483 sc->result = DID_NO_CONNECT << 16; in fnic_queuecommand()
490 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_queuecommand()
492 atomic64_inc(&fnic_stats->misc_stats.tport_not_ready); in fnic_queuecommand()
493 sc->result = ret; in fnic_queuecommand()
499 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_queuecommand()
500 iport = &fnic->iport; in fnic_queuecommand()
502 if (iport->state != FNIC_IPORT_STATE_READY) { in fnic_queuecommand()
503 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_queuecommand()
504 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_queuecommand()
505 "returning DID_NO_CONNECT for IO as iport state: %d\n", in fnic_queuecommand()
506 iport->state); in fnic_queuecommand()
507 sc->result = DID_NO_CONNECT << 16; in fnic_queuecommand()
515 rdd_data = rport->dd_data; in fnic_queuecommand()
516 tport = rdd_data->tport; in fnic_queuecommand()
517 if (!tport || (rdd_data->iport != iport)) { in fnic_queuecommand()
518 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_queuecommand()
520 rport->port_id); in fnic_queuecommand()
521 tport = fnic_find_tport_by_fcid(iport, rport->port_id); in fnic_queuecommand()
523 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_queuecommand()
524 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_queuecommand()
525 "returning DID_BUS_BUSY for IO as tport not found for: 0x%x\n", in fnic_queuecommand()
526 rport->port_id); in fnic_queuecommand()
527 sc->result = DID_BUS_BUSY << 16; in fnic_queuecommand()
532 /* Re-assign same params as in fnic_fdls_add_tport */ in fnic_queuecommand()
533 rport->maxframe_size = FNIC_FC_MAX_PAYLOAD_LEN; in fnic_queuecommand()
534 rport->supported_classes = in fnic_queuecommand()
537 rdd_data = rport->dd_data; in fnic_queuecommand()
538 rdd_data->tport = tport; in fnic_queuecommand()
539 rdd_data->iport = iport; in fnic_queuecommand()
540 tport->rport = rport; in fnic_queuecommand()
541 tport->flags |= FNIC_FDLS_SCSI_REGISTERED; in fnic_queuecommand()
544 if ((tport->state != FDLS_TGT_STATE_READY) in fnic_queuecommand()
545 && (tport->state != FDLS_TGT_STATE_ADISC)) { in fnic_queuecommand()
546 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_queuecommand()
547 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_queuecommand()
548 "returning DID_NO_CONNECT for IO as tport state: %d\n", in fnic_queuecommand()
549 tport->state); in fnic_queuecommand()
550 sc->result = DID_NO_CONNECT << 16; in fnic_queuecommand()
555 atomic_inc(&fnic->in_flight); in fnic_queuecommand()
556 atomic_inc(&tport->in_flight); in fnic_queuecommand()
559 atomic_dec(&fnic->in_flight); in fnic_queuecommand()
560 atomic_dec(&tport->in_flight); in fnic_queuecommand()
561 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_queuecommand()
566 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_queuecommand()
567 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_queuecommand()
568 "fnic flags FW reset: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n", in fnic_queuecommand()
569 fnic->state_flags); in fnic_queuecommand()
573 if (!tport->lun0_delay) { in fnic_queuecommand()
575 tport->lun0_delay++; in fnic_queuecommand()
578 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_queuecommand()
580 fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED; in fnic_queuecommand()
581 fnic_priv(sc)->flags = FNIC_NO_FLAGS; in fnic_queuecommand()
583 /* Get a new io_req for this SCSI IO */ in fnic_queuecommand()
584 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); in fnic_queuecommand()
586 atomic64_inc(&fnic_stats->io_stats.alloc_failures); in fnic_queuecommand()
595 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, in fnic_queuecommand()
596 mqtag, sc, 0, sc->cmnd[0], sg_count, fnic_priv(sc)->state); in fnic_queuecommand()
597 mempool_free(io_req, fnic->io_req_pool); in fnic_queuecommand()
601 io_req->tport = tport; in fnic_queuecommand()
603 io_req->sgl_cnt = sg_count; in fnic_queuecommand()
604 io_req->sgl_type = FNIC_SGL_CACHE_DFLT; in fnic_queuecommand()
606 io_req->sgl_type = FNIC_SGL_CACHE_MAX; in fnic_queuecommand()
609 io_req->sgl_list = in fnic_queuecommand()
610 mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type], in fnic_queuecommand()
612 if (!io_req->sgl_list) { in fnic_queuecommand()
613 atomic64_inc(&fnic_stats->io_stats.alloc_failures); in fnic_queuecommand()
616 mempool_free(io_req, fnic->io_req_pool); in fnic_queuecommand()
621 io_req->sgl_list_alloc = io_req->sgl_list; in fnic_queuecommand()
622 ptr = (unsigned long) io_req->sgl_list; in fnic_queuecommand()
624 io_req->sgl_list = (struct host_sg_desc *) in fnic_queuecommand()
626 + FNIC_SG_DESC_ALIGN - 1) in fnic_queuecommand()
627 & ~(FNIC_SG_DESC_ALIGN - 1)); in fnic_queuecommand()
632 * Will acquire lock before setting to IO initialized. in fnic_queuecommand()
635 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_queuecommand()
639 io_req->port_id = rport->port_id; in fnic_queuecommand()
640 io_req->start_time = jiffies; in fnic_queuecommand()
641 fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING; in fnic_queuecommand()
642 fnic_priv(sc)->io_req = io_req; in fnic_queuecommand()
643 fnic_priv(sc)->flags |= FNIC_IO_INITIALIZED; in fnic_queuecommand()
644 io_req->sc = sc; in fnic_queuecommand()
646 if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL) { in fnic_queuecommand()
648 fnic->fnic_num, __func__, hwq, blk_mq_unique_tag_to_tag(mqtag)); in fnic_queuecommand()
649 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_queuecommand()
653 fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = io_req; in fnic_queuecommand()
654 io_req->tag = mqtag; in fnic_queuecommand()
657 wq = &fnic->hw_copy_wq[hwq]; in fnic_queuecommand()
658 atomic64_inc(&fnic_stats->io_stats.ios[hwq]); in fnic_queuecommand()
665 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, in fnic_queuecommand()
667 io_req = fnic_priv(sc)->io_req; in fnic_queuecommand()
668 fnic_priv(sc)->io_req = NULL; in fnic_queuecommand()
670 fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = NULL; in fnic_queuecommand()
671 fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE; in fnic_queuecommand()
672 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_queuecommand()
675 mempool_free(io_req, fnic->io_req_pool); in fnic_queuecommand()
677 atomic_dec(&fnic->in_flight); in fnic_queuecommand()
678 atomic_dec(&tport->in_flight); in fnic_queuecommand()
681 atomic64_inc(&fnic_stats->io_stats.active_ios); in fnic_queuecommand()
682 atomic64_inc(&fnic_stats->io_stats.num_ios); in fnic_queuecommand()
683 if (atomic64_read(&fnic_stats->io_stats.active_ios) > in fnic_queuecommand()
684 atomic64_read(&fnic_stats->io_stats.max_active_ios)) in fnic_queuecommand()
685 atomic64_set(&fnic_stats->io_stats.max_active_ios, in fnic_queuecommand()
686 atomic64_read(&fnic_stats->io_stats.active_ios)); in fnic_queuecommand()
688 /* REVISIT: Use per IO lock in the final code */ in fnic_queuecommand()
689 fnic_priv(sc)->flags |= FNIC_IO_ISSUED; in fnic_queuecommand()
692 cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 | in fnic_queuecommand()
693 (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 | in fnic_queuecommand()
694 (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 | in fnic_queuecommand()
695 sc->cmnd[5]); in fnic_queuecommand()
697 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, in fnic_queuecommand()
701 /* if only we issued IO, will we have the io lock */ in fnic_queuecommand()
703 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_queuecommand()
705 atomic_dec(&fnic->in_flight); in fnic_queuecommand()
706 atomic_dec(&tport->in_flight); in fnic_queuecommand()
709 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_queuecommand()
720 * Routine to handle fw reset completion
730 struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats; in fnic_fcpio_fw_reset_cmpl_handler()
732 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); in fnic_fcpio_fw_reset_cmpl_handler()
734 atomic64_inc(&reset_stats->fw_reset_completions); in fnic_fcpio_fw_reset_cmpl_handler()
736 /* Clean up all outstanding io requests */ in fnic_fcpio_fw_reset_cmpl_handler()
739 atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0); in fnic_fcpio_fw_reset_cmpl_handler()
740 atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0); in fnic_fcpio_fw_reset_cmpl_handler()
741 atomic64_set(&fnic->io_cmpl_skip, 0); in fnic_fcpio_fw_reset_cmpl_handler()
743 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_fcpio_fw_reset_cmpl_handler()
746 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) { in fnic_fcpio_fw_reset_cmpl_handler()
747 /* Check status of reset completion */ in fnic_fcpio_fw_reset_cmpl_handler()
749 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_fcpio_fw_reset_cmpl_handler()
750 "reset cmpl success\n"); in fnic_fcpio_fw_reset_cmpl_handler()
752 fnic->state = FNIC_IN_ETH_MODE; in fnic_fcpio_fw_reset_cmpl_handler()
754 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fcpio_fw_reset_cmpl_handler()
755 "reset failed with header status: %s\n", in fnic_fcpio_fw_reset_cmpl_handler()
758 fnic->state = FNIC_IN_FC_MODE; in fnic_fcpio_fw_reset_cmpl_handler()
759 atomic64_inc(&reset_stats->fw_reset_failures); in fnic_fcpio_fw_reset_cmpl_handler()
760 ret = -1; in fnic_fcpio_fw_reset_cmpl_handler()
763 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fcpio_fw_reset_cmpl_handler()
764 "Unexpected state while processing reset completion: %s\n", in fnic_fcpio_fw_reset_cmpl_handler()
765 fnic_state_to_str(fnic->state)); in fnic_fcpio_fw_reset_cmpl_handler()
766 atomic64_inc(&reset_stats->fw_reset_failures); in fnic_fcpio_fw_reset_cmpl_handler()
767 ret = -1; in fnic_fcpio_fw_reset_cmpl_handler()
770 if (fnic->fw_reset_done) in fnic_fcpio_fw_reset_cmpl_handler()
771 complete(fnic->fw_reset_done); in fnic_fcpio_fw_reset_cmpl_handler()
774 * If fnic is being removed, or fw reset failed in fnic_fcpio_fw_reset_cmpl_handler()
778 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_fcpio_fw_reset_cmpl_handler()
779 fnic_free_txq(&fnic->tx_queue); in fnic_fcpio_fw_reset_cmpl_handler()
783 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_fcpio_fw_reset_cmpl_handler()
785 queue_work(fnic_event_queue, &fnic->flush_work); in fnic_fcpio_fw_reset_cmpl_handler()
806 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); in fnic_fcpio_flogi_reg_cmpl_handler()
809 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_fcpio_flogi_reg_cmpl_handler()
811 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) { in fnic_fcpio_flogi_reg_cmpl_handler()
815 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_fcpio_flogi_reg_cmpl_handler()
817 fnic->state = FNIC_IN_FC_MODE; in fnic_fcpio_flogi_reg_cmpl_handler()
820 fnic->host, fnic->fnic_num, in fnic_fcpio_flogi_reg_cmpl_handler()
823 fnic->state = FNIC_IN_ETH_MODE; in fnic_fcpio_flogi_reg_cmpl_handler()
824 ret = -1; in fnic_fcpio_flogi_reg_cmpl_handler()
827 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_fcpio_flogi_reg_cmpl_handler()
830 fnic_state_to_str(fnic->state)); in fnic_fcpio_flogi_reg_cmpl_handler()
831 ret = -1; in fnic_fcpio_flogi_reg_cmpl_handler()
835 if (fnic->stop_rx_link_events) { in fnic_fcpio_flogi_reg_cmpl_handler()
836 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_fcpio_flogi_reg_cmpl_handler()
839 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_fcpio_flogi_reg_cmpl_handler()
841 queue_work(fnic_event_queue, &fnic->flush_work); in fnic_fcpio_flogi_reg_cmpl_handler()
842 queue_work(fnic_event_queue, &fnic->frame_work); in fnic_fcpio_flogi_reg_cmpl_handler()
844 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_fcpio_flogi_reg_cmpl_handler()
854 if (wq->to_clean_index <= wq->to_use_index) { in is_ack_index_in_range()
856 if (request_out < wq->to_clean_index || in is_ack_index_in_range()
857 request_out >= wq->to_use_index) in is_ack_index_in_range()
861 if (request_out < wq->to_clean_index && in is_ack_index_in_range()
862 request_out >= wq->to_use_index) in is_ack_index_in_range()
881 u16 request_out = desc->u.ack.request_out; in fnic_fcpio_ack_handler()
887 wq = &fnic->hw_copy_wq[cq_index]; in fnic_fcpio_ack_handler()
888 spin_lock_irqsave(&fnic->wq_copy_lock[wq_index], flags); in fnic_fcpio_ack_handler()
890 fnic->fnic_stats.misc_stats.last_ack_time = jiffies; in fnic_fcpio_ack_handler()
892 fnic->fw_ack_index[wq_index] = request_out; in fnic_fcpio_ack_handler()
893 fnic->fw_ack_recd[wq_index] = 1; in fnic_fcpio_ack_handler()
896 &fnic->fnic_stats.misc_stats.ack_index_out_of_range); in fnic_fcpio_ack_handler()
898 spin_unlock_irqrestore(&fnic->wq_copy_lock[wq_index], flags); in fnic_fcpio_ack_handler()
900 fnic->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3], in fnic_fcpio_ack_handler()
919 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_fcpio_icmnd_cmpl_handler()
929 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &ftag); in fnic_fcpio_icmnd_cmpl_handler()
931 icmnd_cmpl = &desc->u.icmnd_cmpl; in fnic_fcpio_icmnd_cmpl_handler()
938 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fcpio_icmnd_cmpl_handler()
941 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fcpio_icmnd_cmpl_handler()
946 if (tag >= fnic->fnic_max_tag_id) { in fnic_fcpio_icmnd_cmpl_handler()
947 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fcpio_icmnd_cmpl_handler()
950 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fcpio_icmnd_cmpl_handler()
955 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_icmnd_cmpl_handler()
957 sc = scsi_host_find_tag(fnic->host, id); in fnic_fcpio_icmnd_cmpl_handler()
960 atomic64_inc(&fnic_stats->io_stats.sc_null); in fnic_fcpio_icmnd_cmpl_handler()
961 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_icmnd_cmpl_handler()
962 shost_printk(KERN_ERR, fnic->host, in fnic_fcpio_icmnd_cmpl_handler()
963 "icmnd_cmpl sc is null - " in fnic_fcpio_icmnd_cmpl_handler()
967 fnic->host->host_no, id, in fnic_fcpio_icmnd_cmpl_handler()
968 ((u64)icmnd_cmpl->_resvd0[1] << 16 | in fnic_fcpio_icmnd_cmpl_handler()
969 (u64)icmnd_cmpl->_resvd0[0]), in fnic_fcpio_icmnd_cmpl_handler()
971 (u64)icmnd_cmpl->scsi_status << 8 | in fnic_fcpio_icmnd_cmpl_handler()
972 (u64)icmnd_cmpl->flags), desc, in fnic_fcpio_icmnd_cmpl_handler()
973 (u64)icmnd_cmpl->residual, 0); in fnic_fcpio_icmnd_cmpl_handler()
977 io_req = fnic_priv(sc)->io_req; in fnic_fcpio_icmnd_cmpl_handler()
978 if (fnic->sw_copy_wq[hwq].io_req_table[tag] != io_req) { in fnic_fcpio_icmnd_cmpl_handler()
981 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_icmnd_cmpl_handler()
987 atomic64_inc(&fnic_stats->io_stats.ioreq_null); in fnic_fcpio_icmnd_cmpl_handler()
988 fnic_priv(sc)->flags |= FNIC_IO_REQ_NULL; in fnic_fcpio_icmnd_cmpl_handler()
989 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_icmnd_cmpl_handler()
990 shost_printk(KERN_ERR, fnic->host, in fnic_fcpio_icmnd_cmpl_handler()
991 "icmnd_cmpl io_req is null - " in fnic_fcpio_icmnd_cmpl_handler()
996 start_time = io_req->start_time; in fnic_fcpio_icmnd_cmpl_handler()
998 /* firmware completed the io */ in fnic_fcpio_icmnd_cmpl_handler()
999 io_req->io_completed = 1; in fnic_fcpio_icmnd_cmpl_handler()
1002 * if SCSI-ML has already issued abort on this command, in fnic_fcpio_icmnd_cmpl_handler()
1003 * set completion of the IO. The abts path will clean it up in fnic_fcpio_icmnd_cmpl_handler()
1005 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { in fnic_fcpio_icmnd_cmpl_handler()
1011 fnic_priv(sc)->flags |= FNIC_IO_DONE; in fnic_fcpio_icmnd_cmpl_handler()
1012 fnic_priv(sc)->flags |= FNIC_IO_ABTS_PENDING; in fnic_fcpio_icmnd_cmpl_handler()
1013 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_icmnd_cmpl_handler()
1015 fnic_priv(sc)->flags |= FNIC_IO_ABORTED; in fnic_fcpio_icmnd_cmpl_handler()
1017 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_fcpio_icmnd_cmpl_handler()
1023 icmnd_cmpl->scsi_status, in fnic_fcpio_icmnd_cmpl_handler()
1024 icmnd_cmpl->residual); in fnic_fcpio_icmnd_cmpl_handler()
1028 /* Mark the IO as complete */ in fnic_fcpio_icmnd_cmpl_handler()
1029 fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE; in fnic_fcpio_icmnd_cmpl_handler()
1031 icmnd_cmpl = &desc->u.icmnd_cmpl; in fnic_fcpio_icmnd_cmpl_handler()
1035 sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status; in fnic_fcpio_icmnd_cmpl_handler()
1038 if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) { in fnic_fcpio_icmnd_cmpl_handler()
1039 xfer_len -= icmnd_cmpl->residual; in fnic_fcpio_icmnd_cmpl_handler()
1040 scsi_set_resid(sc, icmnd_cmpl->residual); in fnic_fcpio_icmnd_cmpl_handler()
1043 if (icmnd_cmpl->scsi_status == SAM_STAT_CHECK_CONDITION) in fnic_fcpio_icmnd_cmpl_handler()
1044 atomic64_inc(&fnic_stats->misc_stats.check_condition); in fnic_fcpio_icmnd_cmpl_handler()
1046 if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL) in fnic_fcpio_icmnd_cmpl_handler()
1047 atomic64_inc(&fnic_stats->misc_stats.queue_fulls); in fnic_fcpio_icmnd_cmpl_handler()
1049 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_fcpio_icmnd_cmpl_handler()
1054 atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout); in fnic_fcpio_icmnd_cmpl_handler()
1055 sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status; in fnic_fcpio_icmnd_cmpl_handler()
1059 atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted); in fnic_fcpio_icmnd_cmpl_handler()
1060 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; in fnic_fcpio_icmnd_cmpl_handler()
1064 atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch); in fnic_fcpio_icmnd_cmpl_handler()
1065 scsi_set_resid(sc, icmnd_cmpl->residual); in fnic_fcpio_icmnd_cmpl_handler()
1066 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; in fnic_fcpio_icmnd_cmpl_handler()
1070 atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources); in fnic_fcpio_icmnd_cmpl_handler()
1071 sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status; in fnic_fcpio_icmnd_cmpl_handler()
1075 atomic64_inc(&fnic_stats->io_stats.io_not_found); in fnic_fcpio_icmnd_cmpl_handler()
1076 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; in fnic_fcpio_icmnd_cmpl_handler()
1080 atomic64_inc(&fnic_stats->misc_stats.sgl_invalid); in fnic_fcpio_icmnd_cmpl_handler()
1081 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; in fnic_fcpio_icmnd_cmpl_handler()
1085 atomic64_inc(&fnic_stats->fw_stats.io_fw_errs); in fnic_fcpio_icmnd_cmpl_handler()
1086 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; in fnic_fcpio_icmnd_cmpl_handler()
1090 atomic64_inc(&fnic_stats->misc_stats.mss_invalid); in fnic_fcpio_icmnd_cmpl_handler()
1091 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; in fnic_fcpio_icmnd_cmpl_handler()
1098 sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; in fnic_fcpio_icmnd_cmpl_handler()
1103 fnic_priv(sc)->io_req = NULL; in fnic_fcpio_icmnd_cmpl_handler()
1104 io_req->sc = NULL; in fnic_fcpio_icmnd_cmpl_handler()
1105 fnic_priv(sc)->flags |= FNIC_IO_DONE; in fnic_fcpio_icmnd_cmpl_handler()
1106 fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL; in fnic_fcpio_icmnd_cmpl_handler()
1108 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_icmnd_cmpl_handler()
1111 atomic64_inc(&fnic_stats->io_stats.io_failures); in fnic_fcpio_icmnd_cmpl_handler()
1112 shost_printk(KERN_ERR, fnic->host, "hdr status = %s\n", in fnic_fcpio_icmnd_cmpl_handler()
1119 (u64)icmnd_cmpl->scsi_status << 48 | in fnic_fcpio_icmnd_cmpl_handler()
1120 (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 | in fnic_fcpio_icmnd_cmpl_handler()
1121 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | in fnic_fcpio_icmnd_cmpl_handler()
1122 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]; in fnic_fcpio_icmnd_cmpl_handler()
1125 sc->device->host->host_no, id, sc, in fnic_fcpio_icmnd_cmpl_handler()
1126 ((u64)icmnd_cmpl->_resvd0[1] << 56 | in fnic_fcpio_icmnd_cmpl_handler()
1127 (u64)icmnd_cmpl->_resvd0[0] << 48 | in fnic_fcpio_icmnd_cmpl_handler()
1128 jiffies_to_msecs(jiffies - start_time)), in fnic_fcpio_icmnd_cmpl_handler()
1131 if (sc->sc_data_direction == DMA_FROM_DEVICE) { in fnic_fcpio_icmnd_cmpl_handler()
1132 fnic_stats->host_stats.fcp_input_requests++; in fnic_fcpio_icmnd_cmpl_handler()
1133 fnic->fcp_input_bytes += xfer_len; in fnic_fcpio_icmnd_cmpl_handler()
1134 } else if (sc->sc_data_direction == DMA_TO_DEVICE) { in fnic_fcpio_icmnd_cmpl_handler()
1135 fnic_stats->host_stats.fcp_output_requests++; in fnic_fcpio_icmnd_cmpl_handler()
1136 fnic->fcp_output_bytes += xfer_len; in fnic_fcpio_icmnd_cmpl_handler()
1138 fnic_stats->host_stats.fcp_control_requests++; in fnic_fcpio_icmnd_cmpl_handler()
1140 /* Call SCSI completion function to complete the IO */ in fnic_fcpio_icmnd_cmpl_handler()
1143 mempool_free(io_req, fnic->io_req_pool); in fnic_fcpio_icmnd_cmpl_handler()
1145 atomic64_dec(&fnic_stats->io_stats.active_ios); in fnic_fcpio_icmnd_cmpl_handler()
1146 if (atomic64_read(&fnic->io_cmpl_skip)) in fnic_fcpio_icmnd_cmpl_handler()
1147 atomic64_dec(&fnic->io_cmpl_skip); in fnic_fcpio_icmnd_cmpl_handler()
1149 atomic64_inc(&fnic_stats->io_stats.io_completions); in fnic_fcpio_icmnd_cmpl_handler()
1152 io_duration_time = jiffies_to_msecs(jiffies) - in fnic_fcpio_icmnd_cmpl_handler()
1156 atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec); in fnic_fcpio_icmnd_cmpl_handler()
1158 atomic64_inc(&fnic_stats->io_stats.io_btw_10_to_100_msec); in fnic_fcpio_icmnd_cmpl_handler()
1160 atomic64_inc(&fnic_stats->io_stats.io_btw_100_to_500_msec); in fnic_fcpio_icmnd_cmpl_handler()
1162 atomic64_inc(&fnic_stats->io_stats.io_btw_500_to_5000_msec); in fnic_fcpio_icmnd_cmpl_handler()
1164 atomic64_inc(&fnic_stats->io_stats.io_btw_5000_to_10000_msec); in fnic_fcpio_icmnd_cmpl_handler()
1166 atomic64_inc(&fnic_stats->io_stats.io_btw_10000_to_30000_msec); in fnic_fcpio_icmnd_cmpl_handler()
1168 atomic64_inc(&fnic_stats->io_stats.io_greater_than_30000_msec); in fnic_fcpio_icmnd_cmpl_handler()
1170 if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time)) in fnic_fcpio_icmnd_cmpl_handler()
1171 atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time); in fnic_fcpio_icmnd_cmpl_handler()
1187 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_fcpio_itmf_cmpl_handler()
1188 struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats; in fnic_fcpio_itmf_cmpl_handler()
1189 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; in fnic_fcpio_itmf_cmpl_handler()
1190 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; in fnic_fcpio_itmf_cmpl_handler()
1197 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &ftag); in fnic_fcpio_itmf_cmpl_handler()
1205 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1208 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1213 if (tag > fnic->fnic_max_tag_id) { in fnic_fcpio_itmf_cmpl_handler()
1214 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1217 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1221 } else if ((tag == fnic->fnic_max_tag_id) && !(id & FNIC_TAG_DEV_RST)) { in fnic_fcpio_itmf_cmpl_handler()
1222 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1225 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1231 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_itmf_cmpl_handler()
1236 if ((mqtag == fnic->fnic_max_tag_id) && (id & FNIC_TAG_DEV_RST)) { in fnic_fcpio_itmf_cmpl_handler()
1237 io_req = fnic->sw_copy_wq[hwq].io_req_table[tag]; in fnic_fcpio_itmf_cmpl_handler()
1239 sc = io_req->sc; in fnic_fcpio_itmf_cmpl_handler()
1241 sc = scsi_host_find_tag(fnic->host, id & FNIC_TAG_MASK); in fnic_fcpio_itmf_cmpl_handler()
1246 atomic64_inc(&fnic_stats->io_stats.sc_null); in fnic_fcpio_itmf_cmpl_handler()
1247 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_itmf_cmpl_handler()
1248 shost_printk(KERN_ERR, fnic->host, in fnic_fcpio_itmf_cmpl_handler()
1249 "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n", in fnic_fcpio_itmf_cmpl_handler()
1254 io_req = fnic_priv(sc)->io_req; in fnic_fcpio_itmf_cmpl_handler()
1257 atomic64_inc(&fnic_stats->io_stats.ioreq_null); in fnic_fcpio_itmf_cmpl_handler()
1258 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_itmf_cmpl_handler()
1259 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL; in fnic_fcpio_itmf_cmpl_handler()
1260 shost_printk(KERN_ERR, fnic->host, in fnic_fcpio_itmf_cmpl_handler()
1261 "itmf_cmpl io_req is null - " in fnic_fcpio_itmf_cmpl_handler()
1266 start_time = io_req->start_time; in fnic_fcpio_itmf_cmpl_handler()
1269 /* Abort and terminate completion of device reset req */ in fnic_fcpio_itmf_cmpl_handler()
1271 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1275 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE; in fnic_fcpio_itmf_cmpl_handler()
1276 fnic_priv(sc)->abts_status = hdr_status; in fnic_fcpio_itmf_cmpl_handler()
1277 fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; in fnic_fcpio_itmf_cmpl_handler()
1278 if (io_req->abts_done) in fnic_fcpio_itmf_cmpl_handler()
1279 complete(io_req->abts_done); in fnic_fcpio_itmf_cmpl_handler()
1280 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_itmf_cmpl_handler()
1283 shost_printk(KERN_DEBUG, fnic->host, in fnic_fcpio_itmf_cmpl_handler()
1291 if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED) in fnic_fcpio_itmf_cmpl_handler()
1292 atomic64_inc(&abts_stats->abort_fw_timeouts); in fnic_fcpio_itmf_cmpl_handler()
1295 &term_stats->terminate_fw_timeouts); in fnic_fcpio_itmf_cmpl_handler()
1298 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1303 if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED) in fnic_fcpio_itmf_cmpl_handler()
1304 atomic64_inc(&abts_stats->abort_io_not_found); in fnic_fcpio_itmf_cmpl_handler()
1307 &term_stats->terminate_io_not_found); in fnic_fcpio_itmf_cmpl_handler()
1310 if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED) in fnic_fcpio_itmf_cmpl_handler()
1311 atomic64_inc(&abts_stats->abort_failures); in fnic_fcpio_itmf_cmpl_handler()
1314 &term_stats->terminate_failures); in fnic_fcpio_itmf_cmpl_handler()
1317 if (fnic_priv(sc)->state != FNIC_IOREQ_ABTS_PENDING) { in fnic_fcpio_itmf_cmpl_handler()
1319 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_itmf_cmpl_handler()
1323 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE; in fnic_fcpio_itmf_cmpl_handler()
1324 fnic_priv(sc)->abts_status = hdr_status; in fnic_fcpio_itmf_cmpl_handler()
1326 /* If the status is IO not found consider it as success */ in fnic_fcpio_itmf_cmpl_handler()
1328 fnic_priv(sc)->abts_status = FCPIO_SUCCESS; in fnic_fcpio_itmf_cmpl_handler()
1330 if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) in fnic_fcpio_itmf_cmpl_handler()
1331 atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls); in fnic_fcpio_itmf_cmpl_handler()
1333 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1340 * signal completion to it. IO will be cleaned in the thread in fnic_fcpio_itmf_cmpl_handler()
1343 if (io_req->abts_done) { in fnic_fcpio_itmf_cmpl_handler()
1344 complete(io_req->abts_done); in fnic_fcpio_itmf_cmpl_handler()
1345 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_itmf_cmpl_handler()
1346 shost_printk(KERN_INFO, fnic->host, in fnic_fcpio_itmf_cmpl_handler()
1350 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1351 "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Completing IO\n", in fnic_fcpio_itmf_cmpl_handler()
1354 fnic_priv(sc)->io_req = NULL; in fnic_fcpio_itmf_cmpl_handler()
1355 sc->result = (DID_ERROR << 16); in fnic_fcpio_itmf_cmpl_handler()
1356 fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL; in fnic_fcpio_itmf_cmpl_handler()
1357 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_itmf_cmpl_handler()
1360 mempool_free(io_req, fnic->io_req_pool); in fnic_fcpio_itmf_cmpl_handler()
1362 sc->device->host->host_no, id, in fnic_fcpio_itmf_cmpl_handler()
1364 jiffies_to_msecs(jiffies - start_time), in fnic_fcpio_itmf_cmpl_handler()
1367 (u64)sc->cmnd[0] << 32 | in fnic_fcpio_itmf_cmpl_handler()
1368 (u64)sc->cmnd[2] << 24 | in fnic_fcpio_itmf_cmpl_handler()
1369 (u64)sc->cmnd[3] << 16 | in fnic_fcpio_itmf_cmpl_handler()
1370 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), in fnic_fcpio_itmf_cmpl_handler()
1373 atomic64_dec(&fnic_stats->io_stats.active_ios); in fnic_fcpio_itmf_cmpl_handler()
1374 if (atomic64_read(&fnic->io_cmpl_skip)) in fnic_fcpio_itmf_cmpl_handler()
1375 atomic64_dec(&fnic->io_cmpl_skip); in fnic_fcpio_itmf_cmpl_handler()
1377 atomic64_inc(&fnic_stats->io_stats.io_completions); in fnic_fcpio_itmf_cmpl_handler()
1380 /* Completion of device reset */ in fnic_fcpio_itmf_cmpl_handler()
1381 shost_printk(KERN_INFO, fnic->host, in fnic_fcpio_itmf_cmpl_handler()
1385 fnic_priv(sc)->lr_status = hdr_status; in fnic_fcpio_itmf_cmpl_handler()
1386 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { in fnic_fcpio_itmf_cmpl_handler()
1387 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_itmf_cmpl_handler()
1388 fnic_priv(sc)->flags |= FNIC_DEV_RST_ABTS_PENDING; in fnic_fcpio_itmf_cmpl_handler()
1390 sc->device->host->host_no, id, sc, in fnic_fcpio_itmf_cmpl_handler()
1391 jiffies_to_msecs(jiffies - start_time), in fnic_fcpio_itmf_cmpl_handler()
1393 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1399 if (fnic_priv(sc)->flags & FNIC_DEV_RST_TIMED_OUT) { in fnic_fcpio_itmf_cmpl_handler()
1401 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_itmf_cmpl_handler()
1403 sc->device->host->host_no, id, sc, in fnic_fcpio_itmf_cmpl_handler()
1404 jiffies_to_msecs(jiffies - start_time), in fnic_fcpio_itmf_cmpl_handler()
1406 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1407 "dev reset cmpl recd after time out. " in fnic_fcpio_itmf_cmpl_handler()
1413 fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE; in fnic_fcpio_itmf_cmpl_handler()
1414 fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; in fnic_fcpio_itmf_cmpl_handler()
1415 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1419 if (io_req->dr_done) in fnic_fcpio_itmf_cmpl_handler()
1420 complete(io_req->dr_done); in fnic_fcpio_itmf_cmpl_handler()
1421 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_itmf_cmpl_handler()
1424 shost_printk(KERN_ERR, fnic->host, in fnic_fcpio_itmf_cmpl_handler()
1425 "%s: Unexpected itmf io state: hwq: %d tag 0x%x %s\n", in fnic_fcpio_itmf_cmpl_handler()
1426 __func__, hwq, id, fnic_ioreq_state_to_str(fnic_priv(sc)->state)); in fnic_fcpio_itmf_cmpl_handler()
1427 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_itmf_cmpl_handler()
1442 switch (desc->hdr.type) { in fnic_fcpio_cmpl_handler()
1444 case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/ in fnic_fcpio_cmpl_handler()
1447 case FCPIO_RESET_CMPL: /* fw completed reset */ in fnic_fcpio_cmpl_handler()
1448 atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs); in fnic_fcpio_cmpl_handler()
1454 cq_index -= fnic->copy_wq_base; in fnic_fcpio_cmpl_handler()
1456 switch (desc->hdr.type) { in fnic_fcpio_cmpl_handler()
1465 case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/ in fnic_fcpio_cmpl_handler()
1474 case FCPIO_RESET_CMPL: /* fw completed reset */ in fnic_fcpio_cmpl_handler()
1479 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_fcpio_cmpl_handler()
1481 desc->hdr.type); in fnic_fcpio_cmpl_handler()
1495 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; in fnic_wq_copy_cmpl_handler()
1502 cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index], in fnic_wq_copy_cmpl_handler()
1506 delta_jiffies = end_jiffies - start_jiffies; in fnic_wq_copy_cmpl_handler()
1507 if (delta_jiffies > (u64) atomic64_read(&misc_stats->max_isr_jiffies)) { in fnic_wq_copy_cmpl_handler()
1508 atomic64_set(&misc_stats->max_isr_jiffies, delta_jiffies); in fnic_wq_copy_cmpl_handler()
1510 atomic64_set(&misc_stats->max_isr_time_ms, delta_ms); in fnic_wq_copy_cmpl_handler()
1511 atomic64_set(&misc_stats->corr_work_done, cur_work_done); in fnic_wq_copy_cmpl_handler()
1524 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_cleanup_io_iter()
1533 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_cleanup_io_iter()
1535 fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL; in fnic_cleanup_io_iter()
1537 io_req = fnic_priv(sc)->io_req; in fnic_cleanup_io_iter()
1539 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_cleanup_io_iter()
1540 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_cleanup_io_iter()
1542 hwq, mqtag, tag, fnic_priv(sc)->flags); in fnic_cleanup_io_iter()
1546 if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && in fnic_cleanup_io_iter()
1547 !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { in fnic_cleanup_io_iter()
1549 * We will be here only when FW completes reset in fnic_cleanup_io_iter()
1552 fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; in fnic_cleanup_io_iter()
1553 if (io_req && io_req->dr_done) in fnic_cleanup_io_iter()
1554 complete(io_req->dr_done); in fnic_cleanup_io_iter()
1555 else if (io_req && io_req->abts_done) in fnic_cleanup_io_iter()
1556 complete(io_req->abts_done); in fnic_cleanup_io_iter()
1558 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_cleanup_io_iter()
1560 } else if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { in fnic_cleanup_io_iter()
1561 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_cleanup_io_iter()
1565 fnic_priv(sc)->io_req = NULL; in fnic_cleanup_io_iter()
1566 io_req->sc = NULL; in fnic_cleanup_io_iter()
1567 start_time = io_req->start_time; in fnic_cleanup_io_iter()
1568 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_cleanup_io_iter()
1575 mempool_free(io_req, fnic->io_req_pool); in fnic_cleanup_io_iter()
1577 sc->result = DID_TRANSPORT_DISRUPTED << 16; in fnic_cleanup_io_iter()
1578 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_cleanup_io_iter()
1580 mqtag, tag, sc, (jiffies - start_time)); in fnic_cleanup_io_iter()
1582 if (atomic64_read(&fnic->io_cmpl_skip)) in fnic_cleanup_io_iter()
1583 atomic64_dec(&fnic->io_cmpl_skip); in fnic_cleanup_io_iter()
1585 atomic64_inc(&fnic_stats->io_stats.io_completions); in fnic_cleanup_io_iter()
1588 sc->device->host->host_no, tag, sc, in fnic_cleanup_io_iter()
1589 jiffies_to_msecs(jiffies - start_time), in fnic_cleanup_io_iter()
1590 0, ((u64) sc->cmnd[0] << 32 | in fnic_cleanup_io_iter()
1591 (u64) sc->cmnd[2] << 24 | in fnic_cleanup_io_iter()
1592 (u64) sc->cmnd[3] << 16 | in fnic_cleanup_io_iter()
1593 (u64) sc->cmnd[4] << 8 | sc->cmnd[5]), in fnic_cleanup_io_iter()
1594 (((u64) fnic_priv(sc)->flags << 32) | fnic_priv(sc)-> in fnic_cleanup_io_iter()
1610 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_cleanup_io()
1611 "Outstanding ioreq count: %d active io count: %lld Waiting\n", in fnic_cleanup_io()
1613 atomic64_read(&fnic->fnic_stats.io_stats.active_ios)); in fnic_cleanup_io()
1615 scsi_host_busy_iter(fnic->host, in fnic_cleanup_io()
1618 /* with sg3utils device reset, SC needs to be retrieved from ioreq */ in fnic_cleanup_io()
1619 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); in fnic_cleanup_io()
1620 io_req = fnic->sw_copy_wq[0].io_req_table[fnic->fnic_max_tag_id]; in fnic_cleanup_io()
1622 sc = io_req->sc; in fnic_cleanup_io()
1624 if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) in fnic_cleanup_io()
1625 && !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { in fnic_cleanup_io()
1626 fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; in fnic_cleanup_io()
1627 if (io_req && io_req->dr_done) in fnic_cleanup_io()
1628 complete(io_req->dr_done); in fnic_cleanup_io()
1632 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); in fnic_cleanup_io()
1635 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_cleanup_io()
1636 "Outstanding ioreq count: %d active io count: %lld Waiting\n", in fnic_cleanup_io()
1638 atomic64_read(&fnic->fnic_stats.io_stats.active_ios)); in fnic_cleanup_io()
1648 struct fnic *fnic = vnic_dev_priv(wq->vdev); in fnic_wq_copy_cleanup_handler()
1656 fcpio_tag_id_dec(&desc->hdr.tag, &id); in fnic_wq_copy_cleanup_handler()
1659 if (id >= fnic->fnic_max_tag_id) in fnic_wq_copy_cleanup_handler()
1662 sc = scsi_host_find_tag(fnic->host, id); in fnic_wq_copy_cleanup_handler()
1667 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_wq_copy_cleanup_handler()
1669 /* Get the IO context which this desc refers to */ in fnic_wq_copy_cleanup_handler()
1670 io_req = fnic_priv(sc)->io_req; in fnic_wq_copy_cleanup_handler()
1675 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_wq_copy_cleanup_handler()
1679 fnic_priv(sc)->io_req = NULL; in fnic_wq_copy_cleanup_handler()
1680 io_req->sc = NULL; in fnic_wq_copy_cleanup_handler()
1681 fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(id)] = NULL; in fnic_wq_copy_cleanup_handler()
1683 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_wq_copy_cleanup_handler()
1685 start_time = io_req->start_time; in fnic_wq_copy_cleanup_handler()
1687 mempool_free(io_req, fnic->io_req_pool); in fnic_wq_copy_cleanup_handler()
1690 sc->result = DID_NO_CONNECT << 16; in fnic_wq_copy_cleanup_handler()
1691 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "wq_copy_cleanup_handler:" in fnic_wq_copy_cleanup_handler()
1695 sc->device->host->host_no, id, sc, in fnic_wq_copy_cleanup_handler()
1696 jiffies_to_msecs(jiffies - start_time), in fnic_wq_copy_cleanup_handler()
1697 0, ((u64)sc->cmnd[0] << 32 | in fnic_wq_copy_cleanup_handler()
1698 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | in fnic_wq_copy_cleanup_handler()
1699 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), in fnic_wq_copy_cleanup_handler()
1710 struct vnic_wq_copy *wq = &fnic->hw_copy_wq[hwq]; in fnic_queue_abort_io_req()
1711 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; in fnic_queue_abort_io_req()
1713 struct fnic_tport_s *tport = io_req->tport; in fnic_queue_abort_io_req()
1715 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_queue_abort_io_req()
1718 atomic_dec(&fnic->in_flight); in fnic_queue_abort_io_req()
1719 atomic_dec(&tport->in_flight); in fnic_queue_abort_io_req()
1720 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_queue_abort_io_req()
1723 atomic_inc(&fnic->in_flight); in fnic_queue_abort_io_req()
1724 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_queue_abort_io_req()
1726 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_queue_abort_io_req()
1728 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq]) in fnic_queue_abort_io_req()
1732 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_queue_abort_io_req()
1733 atomic_dec(&fnic->in_flight); in fnic_queue_abort_io_req()
1734 atomic_dec(&tport->in_flight); in fnic_queue_abort_io_req()
1735 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_queue_abort_io_req()
1737 atomic64_inc(&misc_stats->abts_cpwq_alloc_failures); in fnic_queue_abort_io_req()
1741 0, task_req, tag, fc_lun, io_req->port_id, in fnic_queue_abort_io_req()
1742 fnic->config.ra_tov, fnic->config.ed_tov); in fnic_queue_abort_io_req()
1744 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); in fnic_queue_abort_io_req()
1745 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > in fnic_queue_abort_io_req()
1746 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) in fnic_queue_abort_io_req()
1747 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, in fnic_queue_abort_io_req()
1748 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); in fnic_queue_abort_io_req()
1750 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_queue_abort_io_req()
1751 atomic_dec(&fnic->in_flight); in fnic_queue_abort_io_req()
1766 struct fnic *fnic = iter_data->fnic; in fnic_rport_abort_io_iter()
1769 struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats; in fnic_rport_abort_io_iter()
1770 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; in fnic_rport_abort_io_iter()
1780 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_rport_abort_io_iter()
1785 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_rport_abort_io_iter()
1786 io_req = fnic_priv(sc)->io_req; in fnic_rport_abort_io_iter()
1787 if (!io_req || io_req->port_id != iter_data->port_id) { in fnic_rport_abort_io_iter()
1788 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_rport_abort_io_iter()
1792 if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && in fnic_rport_abort_io_iter()
1793 !(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED)) { in fnic_rport_abort_io_iter()
1794 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_rport_abort_io_iter()
1795 "hwq: %d abt_tag: 0x%x flags: 0x%x Device reset is not pending\n", in fnic_rport_abort_io_iter()
1796 hwq, abt_tag, fnic_priv(sc)->flags); in fnic_rport_abort_io_iter()
1797 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_rport_abort_io_iter()
1802 * Found IO that is still pending with firmware and in fnic_rport_abort_io_iter()
1805 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { in fnic_rport_abort_io_iter()
1806 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_rport_abort_io_iter()
1810 if (io_req->abts_done) { in fnic_rport_abort_io_iter()
1811 shost_printk(KERN_ERR, fnic->host, in fnic_rport_abort_io_iter()
1812 "fnic_rport_exch_reset: io_req->abts_done is set state is %s\n", in fnic_rport_abort_io_iter()
1813 fnic_ioreq_state_to_str(fnic_priv(sc)->state)); in fnic_rport_abort_io_iter()
1816 if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) { in fnic_rport_abort_io_iter()
1817 shost_printk(KERN_ERR, fnic->host, in fnic_rport_abort_io_iter()
1818 "rport_exch_reset IO not yet issued %p abt_tag 0x%x", in fnic_rport_abort_io_iter()
1820 shost_printk(KERN_ERR, fnic->host, in fnic_rport_abort_io_iter()
1821 "flags %x state %d\n", fnic_priv(sc)->flags, in fnic_rport_abort_io_iter()
1822 fnic_priv(sc)->state); in fnic_rport_abort_io_iter()
1824 old_ioreq_state = fnic_priv(sc)->state; in fnic_rport_abort_io_iter()
1825 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; in fnic_rport_abort_io_iter()
1826 fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE; in fnic_rport_abort_io_iter()
1828 if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { in fnic_rport_abort_io_iter()
1829 atomic64_inc(&reset_stats->device_reset_terminates); in fnic_rport_abort_io_iter()
1831 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_rport_abort_io_iter()
1832 "dev reset sc 0x%p\n", sc); in fnic_rport_abort_io_iter()
1834 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_rport_abort_io_iter()
1836 WARN_ON_ONCE(io_req->abts_done); in fnic_rport_abort_io_iter()
1837 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_rport_abort_io_iter()
1840 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_rport_abort_io_iter()
1843 int_to_scsilun(sc->device->lun, &fc_lun); in fnic_rport_abort_io_iter()
1852 * lun reset in fnic_rport_abort_io_iter()
1854 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_rport_abort_io_iter()
1855 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_rport_abort_io_iter()
1857 hwq, abt_tag, fnic_priv(sc)->flags); in fnic_rport_abort_io_iter()
1858 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) in fnic_rport_abort_io_iter()
1859 fnic_priv(sc)->state = old_ioreq_state; in fnic_rport_abort_io_iter()
1860 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_rport_abort_io_iter()
1862 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_rport_abort_io_iter()
1863 if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) in fnic_rport_abort_io_iter()
1864 fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; in fnic_rport_abort_io_iter()
1866 fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED; in fnic_rport_abort_io_iter()
1867 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_rport_abort_io_iter()
1868 atomic64_inc(&term_stats->terminates); in fnic_rport_abort_io_iter()
1869 iter_data->term_cnt++; in fnic_rport_abort_io_iter()
1879 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; in fnic_rport_exch_reset()
1886 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_rport_exch_reset()
1887 "fnic rport exchange reset for tport: 0x%06x\n", in fnic_rport_exch_reset()
1890 if (fnic->in_remove) in fnic_rport_exch_reset()
1894 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_rport_exch_reset()
1895 "Starting terminates: rport:0x%x portid-io-count: %d active-io-count: %lld\n", in fnic_rport_exch_reset()
1897 atomic64_read(&fnic->fnic_stats.io_stats.active_ios)); in fnic_rport_exch_reset()
1899 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_rport_exch_reset()
1901 atomic_inc(&fnic->in_flight); in fnic_rport_exch_reset()
1903 atomic_dec(&fnic->in_flight); in fnic_rport_exch_reset()
1904 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_rport_exch_reset()
1907 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_rport_exch_reset()
1909 scsi_host_busy_iter(fnic->host, fnic_rport_abort_io_iter, in fnic_rport_exch_reset()
1912 if (iter_data.term_cnt > atomic64_read(&term_stats->max_terminates)) in fnic_rport_exch_reset()
1913 atomic64_set(&term_stats->max_terminates, iter_data.term_cnt); in fnic_rport_exch_reset()
1915 atomic_dec(&fnic->in_flight); in fnic_rport_exch_reset()
1920 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_rport_exch_reset()
1921 "rport: 0x%x remaining portid-io-count: %d ", in fnic_rport_exch_reset()
1937 rdd_data = rport->dd_data; in fnic_terminate_rport_io()
1939 tport = rdd_data->tport; in fnic_terminate_rport_io()
1942 "term rport io called after tport is deleted. Returning 0x%8x\n", in fnic_terminate_rport_io()
1943 rport->port_id); in fnic_terminate_rport_io()
1946 "term rport io called after tport is set 0x%8x\n", in fnic_terminate_rport_io()
1947 rport->port_id); in fnic_terminate_rport_io()
1951 iport = (struct fnic_iport_s *) tport->iport; in fnic_terminate_rport_io()
1952 fnic = iport->fnic; in fnic_terminate_rport_io()
1953 fnic_rport_exch_reset(fnic, rport->port_id); in fnic_terminate_rport_io()
1959 * FCP-SCSI specific handling for module unload
1972 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_scsi_unload()
1973 fnic->iport.state = FNIC_IPORT_STATE_LINK_WAIT; in fnic_scsi_unload()
1974 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_scsi_unload()
1976 if (fdls_get_state(&fnic->iport.fabric) != FDLS_STATE_INIT) in fnic_scsi_unload()
1979 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_scsi_unload()
1980 fnic->in_remove = 1; in fnic_scsi_unload()
1981 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_scsi_unload()
1991 fc_remove_host(fnic->host); in fnic_scsi_unload_cleanup()
1992 scsi_remove_host(fnic->host); in fnic_scsi_unload_cleanup()
1993 for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) in fnic_scsi_unload_cleanup()
1994 kfree(fnic->sw_copy_wq[hwq].io_req_table); in fnic_scsi_unload_cleanup()
1999 * A SCSI IO is represented by a io_req in the driver.
2000 * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
2029 /* Get local-port, check ready and link up */ in fnic_abort_cmd()
2030 fnic = *((struct fnic **) shost_priv(sc->device->host)); in fnic_abort_cmd()
2032 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_abort_cmd()
2033 iport = &fnic->iport; in fnic_abort_cmd()
2035 fnic_stats = &fnic->fnic_stats; in fnic_abort_cmd()
2036 abts_stats = &fnic->fnic_stats.abts_stats; in fnic_abort_cmd()
2037 term_stats = &fnic->fnic_stats.term_stats; in fnic_abort_cmd()
2039 rport = starget_to_rport(scsi_target(sc->device)); in fnic_abort_cmd()
2043 fnic_priv(sc)->flags = FNIC_NO_FLAGS; in fnic_abort_cmd()
2045 rdd_data = rport->dd_data; in fnic_abort_cmd()
2046 tport = rdd_data->tport; in fnic_abort_cmd()
2049 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_abort_cmd()
2051 rport->port_id); in fnic_abort_cmd()
2052 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_abort_cmd()
2054 sc->device->lun, hwq, mqtag, in fnic_abort_cmd()
2055 sc->cmnd[0], fnic_priv(sc)->flags); in fnic_abort_cmd()
2057 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_abort_cmd()
2061 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_abort_cmd()
2063 rport->port_id, sc->device->lun, hwq, mqtag); in fnic_abort_cmd()
2065 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_abort_cmd()
2067 sc->cmnd[0], in fnic_abort_cmd()
2068 fnic_priv(sc)->flags); in fnic_abort_cmd()
2070 if (iport->state != FNIC_IPORT_STATE_READY) { in fnic_abort_cmd()
2071 atomic64_inc(&fnic_stats->misc_stats.iport_not_ready); in fnic_abort_cmd()
2072 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_abort_cmd()
2075 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_abort_cmd()
2079 if ((tport->state != FDLS_TGT_STATE_READY) && in fnic_abort_cmd()
2080 (tport->state != FDLS_TGT_STATE_ADISC)) { in fnic_abort_cmd()
2081 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_abort_cmd()
2082 "tport state: %d\n", tport->state); in fnic_abort_cmd()
2084 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_abort_cmd()
2088 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_abort_cmd()
2101 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_abort_cmd()
2102 io_req = fnic_priv(sc)->io_req; in fnic_abort_cmd()
2105 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_abort_cmd()
2109 io_req->abts_done = &tm_done; in fnic_abort_cmd()
2111 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { in fnic_abort_cmd()
2112 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_abort_cmd()
2116 abt_issued_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time); in fnic_abort_cmd()
2118 atomic64_inc(&abts_stats->abort_issued_btw_0_to_6_sec); in fnic_abort_cmd()
2120 atomic64_inc(&abts_stats->abort_issued_btw_6_to_20_sec); in fnic_abort_cmd()
2122 atomic64_inc(&abts_stats->abort_issued_btw_20_to_30_sec); in fnic_abort_cmd()
2124 atomic64_inc(&abts_stats->abort_issued_btw_30_to_40_sec); in fnic_abort_cmd()
2126 atomic64_inc(&abts_stats->abort_issued_btw_40_to_50_sec); in fnic_abort_cmd()
2128 atomic64_inc(&abts_stats->abort_issued_btw_50_to_60_sec); in fnic_abort_cmd()
2130 atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec); in fnic_abort_cmd()
2132 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_abort_cmd()
2134 sc->cmnd[0], abt_issued_time); in fnic_abort_cmd()
2138 * the completion wont be done till mid-layer, since abort in fnic_abort_cmd()
2141 old_ioreq_state = fnic_priv(sc)->state; in fnic_abort_cmd()
2142 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; in fnic_abort_cmd()
2143 fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE; in fnic_abort_cmd()
2145 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_abort_cmd()
2150 * the IO. Else, just locally terminate the IO in the firmware in fnic_abort_cmd()
2155 atomic64_inc(&fnic_stats->misc_stats.tport_not_ready); in fnic_abort_cmd()
2160 int_to_scsilun(sc->device->lun, &fc_lun); in fnic_abort_cmd()
2164 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_abort_cmd()
2165 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) in fnic_abort_cmd()
2166 fnic_priv(sc)->state = old_ioreq_state; in fnic_abort_cmd()
2167 io_req = fnic_priv(sc)->io_req; in fnic_abort_cmd()
2169 io_req->abts_done = NULL; in fnic_abort_cmd()
2170 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_abort_cmd()
2175 fnic_priv(sc)->flags |= FNIC_IO_ABTS_ISSUED; in fnic_abort_cmd()
2176 atomic64_inc(&fnic_stats->abts_stats.aborts); in fnic_abort_cmd()
2178 fnic_priv(sc)->flags |= FNIC_IO_TERM_ISSUED; in fnic_abort_cmd()
2179 atomic64_inc(&fnic_stats->term_stats.terminates); in fnic_abort_cmd()
2183 * We queued an abort IO, wait for its completion. in fnic_abort_cmd()
2190 (2 * fnic->config.ra_tov + in fnic_abort_cmd()
2191 fnic->config.ed_tov)); in fnic_abort_cmd()
2194 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_abort_cmd()
2196 io_req = fnic_priv(sc)->io_req; in fnic_abort_cmd()
2198 atomic64_inc(&fnic_stats->io_stats.ioreq_null); in fnic_abort_cmd()
2199 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_abort_cmd()
2200 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL; in fnic_abort_cmd()
2204 io_req->abts_done = NULL; in fnic_abort_cmd()
2207 if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) { in fnic_abort_cmd()
2208 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_abort_cmd()
2210 atomic64_inc(&abts_stats->abort_drv_timeouts); in fnic_abort_cmd()
2212 atomic64_inc(&term_stats->terminate_drv_timeouts); in fnic_abort_cmd()
2214 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_TIMED_OUT; in fnic_abort_cmd()
2219 /* IO out of order */ in fnic_abort_cmd()
2221 if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) { in fnic_abort_cmd()
2222 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_abort_cmd()
2223 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_abort_cmd()
2224 "Issuing host reset due to out of order IO\n"); in fnic_abort_cmd()
2230 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE; in fnic_abort_cmd()
2232 start_time = io_req->start_time; in fnic_abort_cmd()
2236 * Device reset will clean the I/O. in fnic_abort_cmd()
2238 if (fnic_priv(sc)->abts_status == FCPIO_SUCCESS || in fnic_abort_cmd()
2239 (fnic_priv(sc)->abts_status == FCPIO_ABORTED)) { in fnic_abort_cmd()
2240 fnic_priv(sc)->io_req = NULL; in fnic_abort_cmd()
2241 io_req->sc = NULL; in fnic_abort_cmd()
2244 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_abort_cmd()
2248 fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = NULL; in fnic_abort_cmd()
2249 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_abort_cmd()
2252 mempool_free(io_req, fnic->io_req_pool); in fnic_abort_cmd()
2254 /* Call SCSI completion function to complete the IO */ in fnic_abort_cmd()
2255 sc->result = DID_ABORT << 16; in fnic_abort_cmd()
2257 atomic64_dec(&fnic_stats->io_stats.active_ios); in fnic_abort_cmd()
2258 if (atomic64_read(&fnic->io_cmpl_skip)) in fnic_abort_cmd()
2259 atomic64_dec(&fnic->io_cmpl_skip); in fnic_abort_cmd()
2261 atomic64_inc(&fnic_stats->io_stats.io_completions); in fnic_abort_cmd()
2264 FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, mqtag, sc, in fnic_abort_cmd()
2265 jiffies_to_msecs(jiffies - start_time), in fnic_abort_cmd()
2266 0, ((u64)sc->cmnd[0] << 32 | in fnic_abort_cmd()
2267 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | in fnic_abort_cmd()
2268 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), in fnic_abort_cmd()
2271 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_abort_cmd()
2283 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; in fnic_queue_dr_io_req()
2289 struct fnic_tport_s *tport = io_req->tport; in fnic_queue_dr_io_req()
2291 tag = io_req->tag; in fnic_queue_dr_io_req()
2293 wq = &fnic->hw_copy_wq[hwq]; in fnic_queue_dr_io_req()
2295 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_queue_dr_io_req()
2298 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_queue_dr_io_req()
2301 atomic_inc(&fnic->in_flight); in fnic_queue_dr_io_req()
2302 atomic_inc(&tport->in_flight); in fnic_queue_dr_io_req()
2304 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_queue_dr_io_req()
2306 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_queue_dr_io_req()
2308 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq]) in fnic_queue_dr_io_req()
2312 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_queue_dr_io_req()
2313 "queue_dr_io_req failure - no descriptors\n"); in fnic_queue_dr_io_req()
2314 atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures); in fnic_queue_dr_io_req()
2315 ret = -EAGAIN; in fnic_queue_dr_io_req()
2320 int_to_scsilun(sc->device->lun, &fc_lun); in fnic_queue_dr_io_req()
2325 fc_lun.scsi_lun, io_req->port_id, in fnic_queue_dr_io_req()
2326 fnic->config.ra_tov, fnic->config.ed_tov); in fnic_queue_dr_io_req()
2328 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); in fnic_queue_dr_io_req()
2329 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > in fnic_queue_dr_io_req()
2330 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) in fnic_queue_dr_io_req()
2331 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, in fnic_queue_dr_io_req()
2332 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); in fnic_queue_dr_io_req()
2335 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_queue_dr_io_req()
2336 atomic_dec(&fnic->in_flight); in fnic_queue_dr_io_req()
2337 atomic_dec(&tport->in_flight); in fnic_queue_dr_io_req()
2353 struct fnic *fnic = iter_data->fnic; in fnic_pending_aborts_iter()
2354 struct scsi_device *lun_dev = iter_data->lun_dev; in fnic_pending_aborts_iter()
2363 if (sc == iter_data->lr_sc || sc->device != lun_dev) in fnic_pending_aborts_iter()
2369 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2370 io_req = fnic_priv(sc)->io_req; in fnic_pending_aborts_iter()
2372 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2377 * Found IO that is still pending with firmware and in fnic_pending_aborts_iter()
2380 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_pending_aborts_iter()
2381 "Found IO in %s on lun\n", in fnic_pending_aborts_iter()
2382 fnic_ioreq_state_to_str(fnic_priv(sc)->state)); in fnic_pending_aborts_iter()
2384 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { in fnic_pending_aborts_iter()
2385 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2388 if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && in fnic_pending_aborts_iter()
2389 (!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED))) { in fnic_pending_aborts_iter()
2390 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_pending_aborts_iter()
2392 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2396 if (io_req->abts_done) in fnic_pending_aborts_iter()
2397 shost_printk(KERN_ERR, fnic->host, in fnic_pending_aborts_iter()
2398 "%s: io_req->abts_done is set state is %s\n", in fnic_pending_aborts_iter()
2399 __func__, fnic_ioreq_state_to_str(fnic_priv(sc)->state)); in fnic_pending_aborts_iter()
2400 old_ioreq_state = fnic_priv(sc)->state; in fnic_pending_aborts_iter()
2402 * Any pending IO issued prior to reset is expected to be in fnic_pending_aborts_iter()
2404 * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending. in fnic_pending_aborts_iter()
2405 * When IO is completed, the IO will be handed over and in fnic_pending_aborts_iter()
2408 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; in fnic_pending_aborts_iter()
2410 BUG_ON(io_req->abts_done); in fnic_pending_aborts_iter()
2412 if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { in fnic_pending_aborts_iter()
2413 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_pending_aborts_iter()
2417 fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE; in fnic_pending_aborts_iter()
2418 io_req->abts_done = &tm_done; in fnic_pending_aborts_iter()
2419 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2422 int_to_scsilun(sc->device->lun, &fc_lun); in fnic_pending_aborts_iter()
2427 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2428 io_req = fnic_priv(sc)->io_req; in fnic_pending_aborts_iter()
2430 io_req->abts_done = NULL; in fnic_pending_aborts_iter()
2431 if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) in fnic_pending_aborts_iter()
2432 fnic_priv(sc)->state = old_ioreq_state; in fnic_pending_aborts_iter()
2433 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2434 iter_data->ret = FAILED; in fnic_pending_aborts_iter()
2435 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_pending_aborts_iter()
2440 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2441 if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) in fnic_pending_aborts_iter()
2442 fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; in fnic_pending_aborts_iter()
2443 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2445 fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED; in fnic_pending_aborts_iter()
2448 (fnic->config.ed_tov)); in fnic_pending_aborts_iter()
2451 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2452 io_req = fnic_priv(sc)->io_req; in fnic_pending_aborts_iter()
2454 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2455 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL; in fnic_pending_aborts_iter()
2459 io_req->abts_done = NULL; in fnic_pending_aborts_iter()
2462 if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) { in fnic_pending_aborts_iter()
2463 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2464 fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE; in fnic_pending_aborts_iter()
2465 iter_data->ret = FAILED; in fnic_pending_aborts_iter()
2468 fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE; in fnic_pending_aborts_iter()
2470 /* original sc used for lr is handled by dev reset code */ in fnic_pending_aborts_iter()
2471 if (sc != iter_data->lr_sc) { in fnic_pending_aborts_iter()
2472 fnic_priv(sc)->io_req = NULL; in fnic_pending_aborts_iter()
2473 fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(abt_tag)] = NULL; in fnic_pending_aborts_iter()
2475 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2477 /* original sc used for lr is handled by dev reset code */ in fnic_pending_aborts_iter()
2478 if (sc != iter_data->lr_sc) { in fnic_pending_aborts_iter()
2480 mempool_free(io_req, fnic->io_req_pool); in fnic_pending_aborts_iter()
2484 * Any IO is returned during reset, it needs to call scsi_done in fnic_pending_aborts_iter()
2488 sc->result = DID_RESET << 16; in fnic_pending_aborts_iter()
2496 * For each outstanding IO on this lun, whose abort is not completed by fw,
2508 .lun_dev = lr_sc->device, in fnic_clean_pending_aborts()
2514 scsi_host_busy_iter(fnic->host, in fnic_clean_pending_aborts()
2520 schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov)); in fnic_clean_pending_aborts()
2527 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_clean_pending_aborts()
2533 * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
2551 int mqtag = rq->tag; in fnic_device_reset()
2565 /* Get local-port, check ready and link up */ in fnic_device_reset()
2566 fnic = *((struct fnic **) shost_priv(sc->device->host)); in fnic_device_reset()
2567 iport = &fnic->iport; in fnic_device_reset()
2569 fnic_stats = &fnic->fnic_stats; in fnic_device_reset()
2570 reset_stats = &fnic_stats->reset_stats; in fnic_device_reset()
2572 atomic64_inc(&reset_stats->device_resets); in fnic_device_reset()
2574 rport = starget_to_rport(scsi_target(sc->device)); in fnic_device_reset()
2576 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_device_reset()
2577 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_device_reset()
2578 "fcid: 0x%x lun: %llu hwq: %d mqtag: 0x%x flags: 0x%x Device reset\n", in fnic_device_reset()
2579 rport->port_id, sc->device->lun, hwq, mqtag, in fnic_device_reset()
2580 fnic_priv(sc)->flags); in fnic_device_reset()
2582 rdd_data = rport->dd_data; in fnic_device_reset()
2583 tport = rdd_data->tport; in fnic_device_reset()
2585 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_device_reset()
2587 rport->port_id, sc->device->lun); in fnic_device_reset()
2588 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_device_reset()
2592 if (iport->state != FNIC_IPORT_STATE_READY) { in fnic_device_reset()
2593 atomic64_inc(&fnic_stats->misc_stats.iport_not_ready); in fnic_device_reset()
2594 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_device_reset()
2596 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_device_reset()
2600 if ((tport->state != FDLS_TGT_STATE_READY) && in fnic_device_reset()
2601 (tport->state != FDLS_TGT_STATE_ADISC)) { in fnic_device_reset()
2602 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_device_reset()
2603 "tport state: %d\n", tport->state); in fnic_device_reset()
2604 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_device_reset()
2607 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_device_reset()
2611 atomic64_inc(&fnic_stats->misc_stats.tport_not_ready); in fnic_device_reset()
2615 fnic_priv(sc)->flags = FNIC_DEVICE_RESET; in fnic_device_reset()
2619 * For device reset issued through sg3utils, we let in fnic_device_reset()
2625 mutex_lock(&fnic->sgreset_mutex); in fnic_device_reset()
2626 mqtag = fnic->fnic_max_tag_id; in fnic_device_reset()
2633 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2634 io_req = fnic_priv(sc)->io_req; in fnic_device_reset()
2641 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); in fnic_device_reset()
2643 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2647 io_req->port_id = rport->port_id; in fnic_device_reset()
2648 io_req->tag = mqtag; in fnic_device_reset()
2649 fnic_priv(sc)->io_req = io_req; in fnic_device_reset()
2650 io_req->tport = tport; in fnic_device_reset()
2651 io_req->sc = sc; in fnic_device_reset()
2653 if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL) in fnic_device_reset()
2655 fnic->fnic_num, __func__, blk_mq_unique_tag_to_tag(mqtag)); in fnic_device_reset()
2657 fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = in fnic_device_reset()
2660 io_req->dr_done = &tm_done; in fnic_device_reset()
2661 fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING; in fnic_device_reset()
2662 fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE; in fnic_device_reset()
2663 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2665 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "TAG %x\n", mqtag); in fnic_device_reset()
2668 * issue the device reset, if enqueue failed, clean up the ioreq in fnic_device_reset()
2672 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2673 io_req = fnic_priv(sc)->io_req; in fnic_device_reset()
2675 io_req->dr_done = NULL; in fnic_device_reset()
2678 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2679 fnic_priv(sc)->flags |= FNIC_DEV_RST_ISSUED; in fnic_device_reset()
2680 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2682 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_device_reset()
2683 old_link_down_cnt = iport->fnic->link_down_cnt; in fnic_device_reset()
2684 old_soft_reset_count = fnic->soft_reset_count; in fnic_device_reset()
2685 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_device_reset()
2688 * Wait on the local completion for LUN reset. The io_req may be in fnic_device_reset()
2696 * 1) The device reset completed from target. in fnic_device_reset()
2697 * 2) Device reset timed out. in fnic_device_reset()
2698 * 3) A link-down/host_reset may have happened in between. in fnic_device_reset()
2699 * 4) The device reset was aborted and io_req->dr_done was called. in fnic_device_reset()
2703 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_device_reset()
2704 if ((old_link_down_cnt != fnic->link_down_cnt) || in fnic_device_reset()
2705 (fnic->reset_in_progress) || in fnic_device_reset()
2706 (fnic->soft_reset_count != old_soft_reset_count) || in fnic_device_reset()
2707 (iport->state != FNIC_IPORT_STATE_READY)) in fnic_device_reset()
2710 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_device_reset()
2712 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2713 io_req = fnic_priv(sc)->io_req; in fnic_device_reset()
2715 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2716 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_device_reset()
2722 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_device_reset()
2723 "Host reset called for fnic. Exit device reset\n"); in fnic_device_reset()
2724 io_req->dr_done = NULL; in fnic_device_reset()
2727 io_req->dr_done = NULL; in fnic_device_reset()
2729 status = fnic_priv(sc)->lr_status; in fnic_device_reset()
2732 * If lun reset not completed, bail out with failed. io_req in fnic_device_reset()
2736 atomic64_inc(&reset_stats->device_reset_timeouts); in fnic_device_reset()
2737 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_device_reset()
2738 "Device reset timed out\n"); in fnic_device_reset()
2739 fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT; in fnic_device_reset()
2740 int_to_scsilun(sc->device->lun, &fc_lun); in fnic_device_reset()
2743 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2748 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2750 fnic->host, fnic->fnic_num, in fnic_device_reset()
2751 "Device reset completed - failed\n"); in fnic_device_reset()
2752 io_req = fnic_priv(sc)->io_req; in fnic_device_reset()
2758 * completed. If any of these fail, then LUN reset fails. in fnic_device_reset()
2760 * the lun reset cmd. If all cmds get cleaned, the lun reset in fnic_device_reset()
2764 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2765 io_req = fnic_priv(sc)->io_req; in fnic_device_reset()
2766 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_device_reset()
2767 "Device reset failed: Cannot abort all IOs\n"); in fnic_device_reset()
2771 /* Clean lun reset command */ in fnic_device_reset()
2772 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2773 io_req = fnic_priv(sc)->io_req; in fnic_device_reset()
2780 fnic_priv(sc)->io_req = NULL; in fnic_device_reset()
2781 io_req->sc = NULL; in fnic_device_reset()
2782 fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(io_req->tag)] = NULL; in fnic_device_reset()
2785 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2788 start_time = io_req->start_time; in fnic_device_reset()
2790 mempool_free(io_req, fnic->io_req_pool); in fnic_device_reset()
2794 * If link-event is seen while LUN reset is issued we need in fnic_device_reset()
2795 * to complete the LUN reset here in fnic_device_reset()
2798 sc->result = DID_RESET << 16; in fnic_device_reset()
2803 FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, rq->tag, sc, in fnic_device_reset()
2804 jiffies_to_msecs(jiffies - start_time), in fnic_device_reset()
2805 0, ((u64)sc->cmnd[0] << 32 | in fnic_device_reset()
2806 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | in fnic_device_reset()
2807 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), in fnic_device_reset()
2811 fnic->sgreset_sc = NULL; in fnic_device_reset()
2812 mutex_unlock(&fnic->sgreset_mutex); in fnic_device_reset()
2815 while ((ret == SUCCESS) && fnic_count_lun_ioreqs(fnic, sc->device)) { in fnic_device_reset()
2820 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_device_reset()
2826 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_device_reset()
2827 "Returning from device reset %s\n", in fnic_device_reset()
2832 atomic64_inc(&reset_stats->device_reset_failures); in fnic_device_reset()
2842 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_post_flogo_linkflap()
2844 if (fnic->link_status) { in fnic_post_flogo_linkflap()
2845 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_post_flogo_linkflap()
2849 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_post_flogo_linkflap()
2859 reset_stats = &fnic->fnic_stats.reset_stats; in fnic_reset()
2861 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_reset()
2862 "Issuing fnic reset\n"); in fnic_reset()
2864 atomic64_inc(&reset_stats->fnic_resets); in fnic_reset()
2867 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_reset()
2868 "Returning from fnic reset"); in fnic_reset()
2870 atomic64_inc(&reset_stats->fnic_reset_completions); in fnic_reset()
2878 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_issue_fc_host_lip()
2891 struct fnic_iport_s *iport = &fnic->iport; in fnic_host_reset()
2893 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_host_reset()
2894 if (fnic->reset_in_progress == NOT_IN_PROGRESS) { in fnic_host_reset()
2895 fnic->reset_in_progress = IN_PROGRESS; in fnic_host_reset()
2897 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_host_reset()
2898 wait_for_completion_timeout(&fnic->reset_completion_wait, in fnic_host_reset()
2901 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_host_reset()
2902 if (fnic->reset_in_progress == IN_PROGRESS) { in fnic_host_reset()
2903 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_host_reset()
2904 FNIC_SCSI_DBG(KERN_WARNING, fnic->host, fnic->fnic_num, in fnic_host_reset()
2905 "Firmware reset in progress. Skipping another host reset\n"); in fnic_host_reset()
2908 fnic->reset_in_progress = IN_PROGRESS; in fnic_host_reset()
2910 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_host_reset()
2914 * scsi-ml tries to send a TUR to every device if host reset is in fnic_host_reset()
2919 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_host_reset()
2920 fnic->reset_in_progress = NOT_IN_PROGRESS; in fnic_host_reset()
2921 complete(&fnic->reset_completion_wait); in fnic_host_reset()
2922 fnic->soft_reset_count++; in fnic_host_reset()
2925 if (fnic->link_status) { in fnic_host_reset()
2929 if (iport->state != FNIC_IPORT_STATE_READY in fnic_host_reset()
2930 && fnic->link_status) { in fnic_host_reset()
2931 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_host_reset()
2933 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_host_reset()
2940 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_host_reset()
2942 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_host_reset()
2943 "host reset return status: %d\n", ret); in fnic_host_reset()
2951 struct fnic *fnic = iter_data->fnic; in fnic_abts_pending_iter()
2962 * ignore this lun reset cmd or cmds that do not belong to in fnic_abts_pending_iter()
2965 if (iter_data->lr_sc && sc == iter_data->lr_sc) in fnic_abts_pending_iter()
2967 if (iter_data->lun_dev && sc->device != iter_data->lun_dev) in fnic_abts_pending_iter()
2970 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_abts_pending_iter()
2972 io_req = fnic_priv(sc)->io_req; in fnic_abts_pending_iter()
2974 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_abts_pending_iter()
2979 * Found IO that is still pending with firmware and in fnic_abts_pending_iter()
2982 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_abts_pending_iter()
2983 "hwq: %d tag: 0x%x Found IO in state: %s on lun\n", in fnic_abts_pending_iter()
2985 fnic_ioreq_state_to_str(fnic_priv(sc)->state)); in fnic_abts_pending_iter()
2986 cmd_state = fnic_priv(sc)->state; in fnic_abts_pending_iter()
2987 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_abts_pending_iter()
2989 iter_data->ret = 1; in fnic_abts_pending_iter()
2991 return iter_data->ret ? false : true; in fnic_abts_pending_iter()
3010 iter_data.lun_dev = lr_sc->device; in fnic_is_abts_pending()
3015 scsi_host_busy_iter(fnic->host, in fnic_is_abts_pending()
3023 * error handling levels return FAILED. If host reset completes
3026 * Host Reset is the highest level of error recovery. If this fails, then
3033 struct Scsi_Host *shost = sc->device->host; in fnic_eh_host_reset_handler()
3036 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_eh_host_reset_handler()
3037 "SCSI error handling: fnic host reset"); in fnic_eh_host_reset_handler()
3048 struct fnic_iport_s *iport = &fnic->iport; in fnic_scsi_fcpio_reset()
3052 /* issue fw reset */ in fnic_scsi_fcpio_reset()
3053 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_scsi_fcpio_reset()
3054 if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) { in fnic_scsi_fcpio_reset()
3055 /* fw reset is in progress, poll for its completion */ in fnic_scsi_fcpio_reset()
3056 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_scsi_fcpio_reset()
3057 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_scsi_fcpio_reset()
3059 fnic->state); in fnic_scsi_fcpio_reset()
3063 old_state = fnic->state; in fnic_scsi_fcpio_reset()
3064 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; in fnic_scsi_fcpio_reset()
3066 fnic_update_mac_locked(fnic, iport->hwmac); in fnic_scsi_fcpio_reset()
3067 fnic->fw_reset_done = &fw_reset_done; in fnic_scsi_fcpio_reset()
3068 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_scsi_fcpio_reset()
3070 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_scsi_fcpio_reset()
3071 "Issuing fw reset\n"); in fnic_scsi_fcpio_reset()
3073 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_scsi_fcpio_reset()
3074 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) in fnic_scsi_fcpio_reset()
3075 fnic->state = old_state; in fnic_scsi_fcpio_reset()
3076 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_scsi_fcpio_reset()
3078 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_scsi_fcpio_reset()
3082 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_scsi_fcpio_reset()
3085 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_scsi_fcpio_reset()
3086 "FW reset completion timed out after %d ms)\n", in fnic_scsi_fcpio_reset()
3089 atomic64_inc(&fnic->fnic_stats.reset_stats.fw_reset_timeouts); in fnic_scsi_fcpio_reset()
3091 fnic->fw_reset_done = NULL; in fnic_scsi_fcpio_reset()