Lines Matching full:request
24 typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
27 * request flags */
34 /* request for flush sequence */
42 /* use an I/O scheduler for this request */
50 /* runtime pm request */
59 /* request completion needs to be signaled to zone write plugging. */
102 struct request { struct
124 struct request *rq_next; argument
129 /* Time that the first bio started allocating this request. */ argument
132 /* Time that this request was allocated for this IO. */ argument
166 * request reaches the dispatch list. The ipi_list is only used argument
167 * to queue the request for softirq completion, which is long
168 * after the request has been unhashed (and even removed from
210 static inline enum req_op req_op(const struct request *req) in req_op() argument
215 static inline bool blk_rq_is_passthrough(struct request *rq) in blk_rq_is_passthrough()
220 static inline unsigned short req_get_ioprio(struct request *req) in req_get_ioprio()
243 static inline void rq_list_add_tail(struct rq_list *rl, struct request *rq) in rq_list_add_tail()
253 static inline void rq_list_add_head(struct rq_list *rl, struct request *rq) in rq_list_add_head()
261 static inline struct request *rq_list_pop(struct rq_list *rl) in rq_list_pop()
263 struct request *rq = rl->head; in rq_list_pop()
275 static inline struct request *rq_list_peek(struct rq_list *rl) in rq_list_peek()
291 * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the
292 * request to complete.
343 * @sched_data: Pointer owned by the IO scheduler attached to a request
348 * @queue: Pointer to the request queue that owns this hardware context.
362 * pending request in that software queue.
401 * assigned when a request is dispatched from a hardware queue.
406 * scheduler associated with a request queue, a tag is assigned when
407 * that request is allocated. Else, this member is not used.
418 * shared across request queues.
422 /** @cpuhp_online: List to store request if CPU is going to die */
424 /** @cpuhp_dead: List to store request if some CPU die. */
478 * struct blk_mq_tag_set - tag set that can be shared between request queues
492 * @cmd_size: Number of additional bytes to allocate per request. The block
495 * @timeout: Request processing timeout in jiffies.
505 * @tag_list: List of the request queues that use this tag set. See also
507 * @srcu: Use as lock when type of the request queue is blocking
533 * struct blk_mq_queue_data - Data about a request inserted in a queue
535 * @rq: Request pointer.
536 * @last: If it is the last request in the queue.
539 struct request *rq;
543 typedef bool (busy_tag_iter_fn)(struct request *, void *);
551 * @queue_rq: Queue a new request from block IO.
560 * purpose of kicking the hardware (which the last request otherwise
567 * that each request belongs to the same queue. If the driver doesn't
574 * @get_budget: Reserve budget before queue request, once .queue_rq is
589 void (*set_rq_budget_token)(struct request *, int);
593 int (*get_rq_budget_token)(struct request *);
596 * @timeout: Called on request timeout.
598 enum blk_eh_timer_return (*timeout)(struct request *);
606 * @complete: Mark the request as complete.
608 void (*complete)(struct request *);
626 * flush request.
628 int (*init_request)(struct blk_mq_tag_set *set, struct request *,
633 void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
637 * @cleanup_rq: Called before freeing one request which isn't completed
640 void (*cleanup_rq)(struct request *);
656 * information about a request.
658 void (*show_rq)(struct seq_file *m, struct request *rq);
723 void blk_mq_free_request(struct request *rq);
724 int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
738 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
740 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
755 struct request **rqs;
756 struct request **static_rqs;
760 * used to clear request reference in rqs[] before freeing one
761 * request pool
766 static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, in blk_mq_tag_to_rq()
782 u32 blk_mq_unique_tag(struct request *rq);
795 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
796 * @rq: target request.
798 static inline enum mq_rq_state blk_mq_rq_state(struct request *rq) in blk_mq_rq_state()
803 static inline int blk_mq_request_started(struct request *rq) in blk_mq_request_started()
808 static inline int blk_mq_request_completed(struct request *rq) in blk_mq_request_completed()
815 * Set the state to complete when completing a request from inside ->queue_rq.
817 * need access to the request are called on failure, e.g. by nvme for
820 static inline void blk_mq_set_request_complete(struct request *rq) in blk_mq_set_request_complete()
826 * Complete the request directly instead of deferring it to softirq or
829 static inline void blk_mq_complete_request_direct(struct request *rq, in blk_mq_complete_request_direct()
830 void (*complete)(struct request *rq)) in blk_mq_complete_request_direct()
836 void blk_mq_start_request(struct request *rq);
837 void blk_mq_end_request(struct request *rq, blk_status_t error);
838 void __blk_mq_end_request(struct request *rq, blk_status_t error);
845 static inline bool blk_mq_need_time_stamp(struct request *rq) in blk_mq_need_time_stamp()
850 static inline bool blk_mq_is_reserved_rq(struct request *rq) in blk_mq_is_reserved_rq()
856 * blk_mq_add_to_batch() - add a request to the completion batch
857 * @req: The request to add to batch
858 * @iob: The batch to add the request
859 * @is_error: Specify true if the request failed with an error
860 * @complete: The completaion handler for the request
865 * Return: true when the request was added to the batch, otherwise false
867 static inline bool blk_mq_add_to_batch(struct request *req, in blk_mq_add_to_batch()
875 * 3) Not a passthrough request and end_io set in blk_mq_add_to_batch()
876 * 4) Not a passthrough request and failed with an error in blk_mq_add_to_batch()
898 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
901 void blk_mq_complete_request(struct request *rq);
902 bool blk_mq_complete_request_remote(struct request *rq);
951 unsigned int blk_mq_rq_cpu(struct request *rq);
963 * blk_mq_rq_from_pdu - cast a PDU to a request
966 * Return: request
968 * Driver command data is immediately after the request. So subtract request
969 * size to get back to the original request.
971 static inline struct request *blk_mq_rq_from_pdu(void *pdu) in blk_mq_rq_from_pdu()
973 return pdu - sizeof(struct request); in blk_mq_rq_from_pdu()
977 * blk_mq_rq_to_pdu - cast a request to a PDU
978 * @rq: the request to be casted
982 * Driver command data is immediately after the request. So add request to get
985 static inline void *blk_mq_rq_to_pdu(struct request *rq) in blk_mq_rq_to_pdu()
997 static inline void blk_mq_cleanup_rq(struct request *rq) in blk_mq_cleanup_rq()
1006 static inline bool rq_is_sync(struct request *rq) in rq_is_sync()
1011 void blk_rq_init(struct request_queue *q, struct request *rq);
1012 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
1015 void blk_rq_unprep_clone(struct request *rq);
1016 blk_status_t blk_insert_cloned_request(struct request *rq);
1027 int blk_rq_map_user(struct request_queue *, struct request *,
1029 int blk_rq_map_user_io(struct request *, struct rq_map_data *,
1031 int blk_rq_map_user_iov(struct request_queue *, struct request *,
1034 int blk_rq_map_kern(struct request_queue *, struct request *, void *,
1036 int blk_rq_append_bio(struct request *rq, struct bio *bio);
1037 void blk_execute_rq_nowait(struct request *rq, bool at_head);
1038 blk_status_t blk_execute_rq(struct request *rq, bool at_head);
1039 bool blk_rq_is_poll(struct request *rq);
1064 * blk_rq_bytes() : bytes left in the entire request
1066 * blk_rq_sectors() : sectors left in the entire request
1068 * blk_rq_stats_sectors() : sectors of the entire request used for stats
1070 static inline sector_t blk_rq_pos(const struct request *rq) in blk_rq_pos()
1075 static inline unsigned int blk_rq_bytes(const struct request *rq) in blk_rq_bytes()
1080 static inline int blk_rq_cur_bytes(const struct request *rq) in blk_rq_cur_bytes()
1089 static inline unsigned int blk_rq_sectors(const struct request *rq) in blk_rq_sectors()
1094 static inline unsigned int blk_rq_cur_sectors(const struct request *rq) in blk_rq_cur_sectors()
1099 static inline unsigned int blk_rq_stats_sectors(const struct request *rq) in blk_rq_stats_sectors()
1106 * is different from the size of the request. Any driver that supports such
1110 static inline unsigned int blk_rq_payload_bytes(struct request *rq) in blk_rq_payload_bytes()
1118 * Return the first full biovec in the request. The caller needs to check that
1121 static inline struct bio_vec req_bvec(struct request *rq) in req_bvec()
1128 static inline unsigned int blk_rq_count_bios(struct request *rq) in blk_rq_count_bios()
1139 void blk_steal_bios(struct bio_list *list, struct request *rq);
1142 * Request completion related functions.
1145 * the request without completing it.
1147 bool blk_update_request(struct request *rq, blk_status_t error,
1149 void blk_abort_request(struct request *);
1160 static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) in blk_rq_nr_phys_segments()
1169 * Each discard bio merged into a request is counted as one segment.
1171 static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) in blk_rq_nr_discard_segments()
1176 int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
1178 static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, in blk_rq_map_sg()
1185 void blk_dump_rq_flags(struct request *, char *);