Lines Matching full:md
19 struct mapped_device *md; member
59 int dm_request_based(struct mapped_device *md) in dm_request_based() argument
61 return queue_is_mq(md->queue); in dm_request_based()
127 static void rq_end_stats(struct mapped_device *md, struct request *orig) in rq_end_stats() argument
129 if (unlikely(dm_stats_used(&md->stats))) { in rq_end_stats()
133 dm_stats_account_io(&md->stats, rq_data_dir(orig), in rq_end_stats()
140 * Don't touch any member of the md after calling this function because
141 * the md may be freed in dm_put() at the end of this function.
144 static void rq_completed(struct mapped_device *md) in rq_completed() argument
149 dm_put(md); in rq_completed()
160 struct mapped_device *md = tio->md; in dm_end_request() local
166 rq_end_stats(md, rq); in dm_end_request()
168 rq_completed(md); in dm_end_request()
176 void dm_mq_kick_requeue_list(struct mapped_device *md) in dm_mq_kick_requeue_list() argument
178 __dm_mq_kick_requeue_list(md->queue, 0); in dm_mq_kick_requeue_list()
190 struct mapped_device *md = tio->md; in dm_requeue_original_request() local
194 rq_end_stats(md, rq); in dm_requeue_original_request()
201 rq_completed(md); in dm_requeue_original_request()
220 disable_discard(tio->md); in dm_done()
223 disable_write_zeroes(tio->md); in dm_done()
258 struct mapped_device *md = tio->md; in dm_softirq_done() local
260 rq_end_stats(md, rq); in dm_softirq_done()
262 rq_completed(md); in dm_softirq_done()
325 r = blk_rq_prep_clone(clone, rq, &tio->md->mempools->bs, gfp_mask, in setup_clone()
339 struct mapped_device *md) in init_tio() argument
341 tio->md = md; in init_tio()
352 if (!md->init_tio_pdu) in init_tio()
366 struct mapped_device *md = tio->md; in map_request() local
384 trace_block_rq_remap(clone, disk_devt(dm_disk(md)), in map_request()
422 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) in dm_attr_rq_based_seq_io_merge_deadline_show() argument
427 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, in dm_attr_rq_based_seq_io_merge_deadline_store() argument
433 static void dm_start_request(struct mapped_device *md, struct request *orig) in dm_start_request() argument
437 if (unlikely(dm_stats_used(&md->stats))) { in dm_start_request()
442 dm_stats_account_io(&md->stats, rq_data_dir(orig), in dm_start_request()
448 * Hold the md reference here for the in-flight I/O. in dm_start_request()
454 dm_get(md); in dm_start_request()
460 struct mapped_device *md = set->driver_data; in dm_mq_init_request() local
464 * Must initialize md member of tio, otherwise it won't in dm_mq_init_request()
467 tio->md = md; in dm_mq_init_request()
469 if (md->init_tio_pdu) { in dm_mq_init_request()
482 struct mapped_device *md = tio->md; in dm_mq_queue_rq() local
483 struct dm_target *ti = md->immutable_target; in dm_mq_queue_rq()
490 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) in dm_mq_queue_rq()
497 map = dm_get_live_table(md, &srcu_idx); in dm_mq_queue_rq()
500 dm_device_name(md)); in dm_mq_queue_rq()
501 dm_put_live_table(md, srcu_idx); in dm_mq_queue_rq()
505 dm_put_live_table(md, srcu_idx); in dm_mq_queue_rq()
511 dm_start_request(md, rq); in dm_mq_queue_rq()
513 /* Init tio using md established in .init_request */ in dm_mq_queue_rq()
514 init_tio(tio, rq, md); in dm_mq_queue_rq()
524 rq_end_stats(md, rq); in dm_mq_queue_rq()
525 rq_completed(md); in dm_mq_queue_rq()
538 int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) in dm_mq_init_request_queue() argument
543 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id); in dm_mq_init_request_queue()
544 if (!md->tag_set) in dm_mq_init_request_queue()
547 md->tag_set->ops = &dm_mq_ops; in dm_mq_init_request_queue()
548 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth(); in dm_mq_init_request_queue()
549 md->tag_set->numa_node = md->numa_node_id; in dm_mq_init_request_queue()
550 md->tag_set->flags = BLK_MQ_F_STACKING; in dm_mq_init_request_queue()
551 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues(); in dm_mq_init_request_queue()
552 md->tag_set->driver_data = md; in dm_mq_init_request_queue()
554 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io); in dm_mq_init_request_queue()
558 md->tag_set->cmd_size += immutable_tgt->per_io_data_size; in dm_mq_init_request_queue()
559 md->init_tio_pdu = true; in dm_mq_init_request_queue()
562 err = blk_mq_alloc_tag_set(md->tag_set); in dm_mq_init_request_queue()
566 err = blk_mq_init_allocated_queue(md->tag_set, md->queue); in dm_mq_init_request_queue()
572 blk_mq_free_tag_set(md->tag_set); in dm_mq_init_request_queue()
574 kfree(md->tag_set); in dm_mq_init_request_queue()
575 md->tag_set = NULL; in dm_mq_init_request_queue()
580 void dm_mq_cleanup_mapped_device(struct mapped_device *md) in dm_mq_cleanup_mapped_device() argument
582 if (md->tag_set) { in dm_mq_cleanup_mapped_device()
583 blk_mq_free_tag_set(md->tag_set); in dm_mq_cleanup_mapped_device()
584 kfree(md->tag_set); in dm_mq_cleanup_mapped_device()
585 md->tag_set = NULL; in dm_mq_cleanup_mapped_device()