Lines Matching +full:pd +full:- +full:node

3  * Copyright (C) 2001-2004 Peter Osterlund <[email protected]>
9 * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
10 * DVD-RAM devices.
25 * randomly read and written using 2kB-sized blocks.
39 * At the top layer there is a custom ->submit_bio function that forwards
49 #include <linux/backing-dev.h>
98 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd) in get_zone() argument
100 return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1); in get_zone()
107 /sys/class/pktcdvd/pktcdvd[0-7]/
122 struct pktcdvd_device *pd = dev_get_drvdata(dev); in packets_started_show() local
124 return sysfs_emit(buf, "%lu\n", pd->stats.pkt_started); in packets_started_show()
131 struct pktcdvd_device *pd = dev_get_drvdata(dev); in packets_finished_show() local
133 return sysfs_emit(buf, "%lu\n", pd->stats.pkt_ended); in packets_finished_show()
140 struct pktcdvd_device *pd = dev_get_drvdata(dev); in kb_written_show() local
142 return sysfs_emit(buf, "%lu\n", pd->stats.secs_w >> 1); in kb_written_show()
149 struct pktcdvd_device *pd = dev_get_drvdata(dev); in kb_read_show() local
151 return sysfs_emit(buf, "%lu\n", pd->stats.secs_r >> 1); in kb_read_show()
158 struct pktcdvd_device *pd = dev_get_drvdata(dev); in kb_read_gather_show() local
160 return sysfs_emit(buf, "%lu\n", pd->stats.secs_rg >> 1); in kb_read_gather_show()
167 struct pktcdvd_device *pd = dev_get_drvdata(dev); in reset_store() local
170 pd->stats.pkt_started = 0; in reset_store()
171 pd->stats.pkt_ended = 0; in reset_store()
172 pd->stats.secs_w = 0; in reset_store()
173 pd->stats.secs_rg = 0; in reset_store()
174 pd->stats.secs_r = 0; in reset_store()
198 struct pktcdvd_device *pd = dev_get_drvdata(dev); in size_show() local
201 spin_lock(&pd->lock); in size_show()
202 n = sysfs_emit(buf, "%d\n", pd->bio_queue_size); in size_show()
203 spin_unlock(&pd->lock); in size_show()
214 *lo = *hi - 100; in init_write_congestion_marks()
216 *lo = min(*lo, *hi - 100); in init_write_congestion_marks()
220 *hi = -1; in init_write_congestion_marks()
221 *lo = -1; in init_write_congestion_marks()
228 struct pktcdvd_device *pd = dev_get_drvdata(dev); in congestion_off_show() local
231 spin_lock(&pd->lock); in congestion_off_show()
232 n = sysfs_emit(buf, "%d\n", pd->write_congestion_off); in congestion_off_show()
233 spin_unlock(&pd->lock); in congestion_off_show()
241 struct pktcdvd_device *pd = dev_get_drvdata(dev); in congestion_off_store() local
248 spin_lock(&pd->lock); in congestion_off_store()
249 pd->write_congestion_off = val; in congestion_off_store()
250 init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on); in congestion_off_store()
251 spin_unlock(&pd->lock); in congestion_off_store()
259 struct pktcdvd_device *pd = dev_get_drvdata(dev); in congestion_on_show() local
262 spin_lock(&pd->lock); in congestion_on_show()
263 n = sysfs_emit(buf, "%d\n", pd->write_congestion_on); in congestion_on_show()
264 spin_unlock(&pd->lock); in congestion_on_show()
272 struct pktcdvd_device *pd = dev_get_drvdata(dev); in congestion_on_store() local
279 spin_lock(&pd->lock); in congestion_on_store()
280 pd->write_congestion_on = val; in congestion_on_store()
281 init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on); in congestion_on_store()
282 spin_unlock(&pd->lock); in congestion_on_store()
305 static void pkt_sysfs_dev_new(struct pktcdvd_device *pd) in pkt_sysfs_dev_new() argument
308 pd->dev = device_create_with_groups(&class_pktcdvd, NULL, in pkt_sysfs_dev_new()
309 MKDEV(0, 0), pd, pkt_groups, in pkt_sysfs_dev_new()
310 "%s", pd->disk->disk_name); in pkt_sysfs_dev_new()
311 if (IS_ERR(pd->dev)) in pkt_sysfs_dev_new()
312 pd->dev = NULL; in pkt_sysfs_dev_new()
316 static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd) in pkt_sysfs_dev_remove() argument
319 device_unregister(pd->dev); in pkt_sysfs_dev_remove()
337 struct pktcdvd_device *pd = pkt_devs[idx]; in device_map_show() local
338 if (!pd) in device_map_show()
341 pd->disk->disk_name, in device_map_show()
342 MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev), in device_map_show()
343 MAJOR(file_bdev(pd->bdev_file)->bd_dev), in device_map_show()
344 MINOR(file_bdev(pd->bdev_file)->bd_dev)); in device_map_show()
359 return -ENODEV; in add_store()
368 return -EINVAL; in add_store()
380 return -EINVAL; in remove_store()
414 /sys/kernel/debug/pktcdvd[0-7]/
419 static void pkt_count_states(struct pktcdvd_device *pd, int *states) in pkt_count_states() argument
427 spin_lock(&pd->cdrw.active_list_lock); in pkt_count_states()
428 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { in pkt_count_states()
429 states[pkt->state]++; in pkt_count_states()
431 spin_unlock(&pd->cdrw.active_list_lock); in pkt_count_states()
436 struct pktcdvd_device *pd = m->private; in pkt_seq_show() local
440 seq_printf(m, "Writer %s mapped to %pg:\n", pd->disk->disk_name, in pkt_seq_show()
441 file_bdev(pd->bdev_file)); in pkt_seq_show()
444 seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2); in pkt_seq_show()
446 if (pd->settings.write_type == 0) in pkt_seq_show()
452 seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable"); in pkt_seq_show()
453 seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss); in pkt_seq_show()
455 seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode); in pkt_seq_show()
457 if (pd->settings.block_mode == PACKET_BLOCK_MODE1) in pkt_seq_show()
459 else if (pd->settings.block_mode == PACKET_BLOCK_MODE2) in pkt_seq_show()
466 seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started); in pkt_seq_show()
467 seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended); in pkt_seq_show()
468 seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1); in pkt_seq_show()
469 seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1); in pkt_seq_show()
470 seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1); in pkt_seq_show()
473 seq_printf(m, "\treference count:\t%d\n", pd->refcnt); in pkt_seq_show()
474 seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags); in pkt_seq_show()
475 seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed); in pkt_seq_show()
476 seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed); in pkt_seq_show()
477 seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset); in pkt_seq_show()
478 seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset); in pkt_seq_show()
481 seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size); in pkt_seq_show()
482 seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios)); in pkt_seq_show()
483 seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", pd->current_sector); in pkt_seq_show()
485 pkt_count_states(pd, states); in pkt_seq_show()
490 pd->write_congestion_off, in pkt_seq_show()
491 pd->write_congestion_on); in pkt_seq_show()
496 static void pkt_debugfs_dev_new(struct pktcdvd_device *pd) in pkt_debugfs_dev_new() argument
500 pd->dfs_d_root = debugfs_create_dir(pd->disk->disk_name, pkt_debugfs_root); in pkt_debugfs_dev_new()
502 pd->dfs_f_info = debugfs_create_file("info", 0444, pd->dfs_d_root, in pkt_debugfs_dev_new()
503 pd, &pkt_seq_fops); in pkt_debugfs_dev_new()
506 static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd) in pkt_debugfs_dev_remove() argument
510 debugfs_remove(pd->dfs_f_info); in pkt_debugfs_dev_remove()
511 debugfs_remove(pd->dfs_d_root); in pkt_debugfs_dev_remove()
512 pd->dfs_f_info = NULL; in pkt_debugfs_dev_remove()
513 pd->dfs_d_root = NULL; in pkt_debugfs_dev_remove()
527 /* ----------------------------------------------------------*/
530 static void pkt_bio_finished(struct pktcdvd_device *pd) in pkt_bio_finished() argument
532 struct device *ddev = disk_to_dev(pd->disk); in pkt_bio_finished()
534 BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0); in pkt_bio_finished()
535 if (atomic_dec_and_test(&pd->cdrw.pending_bios)) { in pkt_bio_finished()
537 atomic_set(&pd->iosched.attention, 1); in pkt_bio_finished()
538 wake_up(&pd->wqueue); in pkt_bio_finished()
554 pkt->frames = frames; in pkt_alloc_packet_data()
555 pkt->w_bio = bio_kmalloc(frames, GFP_KERNEL); in pkt_alloc_packet_data()
556 if (!pkt->w_bio) in pkt_alloc_packet_data()
560 pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO); in pkt_alloc_packet_data()
561 if (!pkt->pages[i]) in pkt_alloc_packet_data()
565 spin_lock_init(&pkt->lock); in pkt_alloc_packet_data()
566 bio_list_init(&pkt->orig_bios); in pkt_alloc_packet_data()
569 pkt->r_bios[i] = bio_kmalloc(1, GFP_KERNEL); in pkt_alloc_packet_data()
570 if (!pkt->r_bios[i]) in pkt_alloc_packet_data()
578 kfree(pkt->r_bios[i]); in pkt_alloc_packet_data()
581 if (pkt->pages[i]) in pkt_alloc_packet_data()
582 __free_page(pkt->pages[i]); in pkt_alloc_packet_data()
583 kfree(pkt->w_bio); in pkt_alloc_packet_data()
597 for (i = 0; i < pkt->frames; i++) in pkt_free_packet_data()
598 kfree(pkt->r_bios[i]); in pkt_free_packet_data()
599 for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++) in pkt_free_packet_data()
600 __free_page(pkt->pages[i]); in pkt_free_packet_data()
601 kfree(pkt->w_bio); in pkt_free_packet_data()
605 static void pkt_shrink_pktlist(struct pktcdvd_device *pd) in pkt_shrink_pktlist() argument
609 BUG_ON(!list_empty(&pd->cdrw.pkt_active_list)); in pkt_shrink_pktlist()
611 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) { in pkt_shrink_pktlist()
614 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); in pkt_shrink_pktlist()
617 static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets) in pkt_grow_pktlist() argument
621 BUG_ON(!list_empty(&pd->cdrw.pkt_free_list)); in pkt_grow_pktlist()
624 pkt = pkt_alloc_packet_data(pd->settings.size >> 2); in pkt_grow_pktlist()
626 pkt_shrink_pktlist(pd); in pkt_grow_pktlist()
629 pkt->id = nr_packets; in pkt_grow_pktlist()
630 pkt->pd = pd; in pkt_grow_pktlist()
631 list_add(&pkt->list, &pd->cdrw.pkt_free_list); in pkt_grow_pktlist()
632 nr_packets--; in pkt_grow_pktlist()
637 static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node) in pkt_rbtree_next() argument
639 struct rb_node *n = rb_next(&node->rb_node); in pkt_rbtree_next()
645 static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node) in pkt_rbtree_erase() argument
647 rb_erase(&node->rb_node, &pd->bio_queue); in pkt_rbtree_erase()
648 mempool_free(node, &pd->rb_pool); in pkt_rbtree_erase()
649 pd->bio_queue_size--; in pkt_rbtree_erase()
650 BUG_ON(pd->bio_queue_size < 0); in pkt_rbtree_erase()
654 * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
656 static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s) in pkt_rbtree_find() argument
658 struct rb_node *n = pd->bio_queue.rb_node; in pkt_rbtree_find()
663 BUG_ON(pd->bio_queue_size > 0); in pkt_rbtree_find()
669 if (s <= tmp->bio->bi_iter.bi_sector) in pkt_rbtree_find()
670 next = n->rb_left; in pkt_rbtree_find()
672 next = n->rb_right; in pkt_rbtree_find()
678 if (s > tmp->bio->bi_iter.bi_sector) { in pkt_rbtree_find()
683 BUG_ON(s > tmp->bio->bi_iter.bi_sector); in pkt_rbtree_find()
688 * Insert a node into the pd->bio_queue rb tree.
690 static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node) in pkt_rbtree_insert() argument
692 struct rb_node **p = &pd->bio_queue.rb_node; in pkt_rbtree_insert()
694 sector_t s = node->bio->bi_iter.bi_sector; in pkt_rbtree_insert()
700 if (s < tmp->bio->bi_iter.bi_sector) in pkt_rbtree_insert()
701 p = &(*p)->rb_left; in pkt_rbtree_insert()
703 p = &(*p)->rb_right; in pkt_rbtree_insert()
705 rb_link_node(&node->rb_node, parent, p); in pkt_rbtree_insert()
706 rb_insert_color(&node->rb_node, &pd->bio_queue); in pkt_rbtree_insert()
707 pd->bio_queue_size++; in pkt_rbtree_insert()
714 static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc) in pkt_generic_packet() argument
716 struct request_queue *q = bdev_get_queue(file_bdev(pd->bdev_file)); in pkt_generic_packet()
721 rq = scsi_alloc_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? in pkt_generic_packet()
727 if (cgc->buflen) { in pkt_generic_packet()
728 ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, in pkt_generic_packet()
734 scmd->cmd_len = COMMAND_SIZE(cgc->cmd[0]); in pkt_generic_packet()
735 memcpy(scmd->cmnd, cgc->cmd, CDROM_PACKET_SIZE); in pkt_generic_packet()
737 rq->timeout = 60*HZ; in pkt_generic_packet()
738 if (cgc->quiet) in pkt_generic_packet()
739 rq->rq_flags |= RQF_QUIET; in pkt_generic_packet()
742 if (scmd->result) in pkt_generic_packet()
743 ret = -EIO; in pkt_generic_packet()
764 static void pkt_dump_sense(struct pktcdvd_device *pd, in pkt_dump_sense() argument
767 struct device *ddev = disk_to_dev(pd->disk); in pkt_dump_sense()
768 struct scsi_sense_hdr *sshdr = cgc->sshdr; in pkt_dump_sense()
771 dev_err(ddev, "%*ph - sense %02x.%02x.%02x (%s)\n", in pkt_dump_sense()
772 CDROM_PACKET_SIZE, cgc->cmd, in pkt_dump_sense()
773 sshdr->sense_key, sshdr->asc, sshdr->ascq, in pkt_dump_sense()
774 sense_key_string(sshdr->sense_key)); in pkt_dump_sense()
776 dev_err(ddev, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd); in pkt_dump_sense()
782 static int pkt_flush_cache(struct pktcdvd_device *pd) in pkt_flush_cache() argument
791 * the IMMED bit -- we default to not setting it, although that in pkt_flush_cache()
797 return pkt_generic_packet(pd, &cgc); in pkt_flush_cache()
803 static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd, in pkt_set_speed() argument
816 ret = pkt_generic_packet(pd, &cgc); in pkt_set_speed()
818 pkt_dump_sense(pd, &cgc); in pkt_set_speed()
824 * Queue a bio for processing by the low-level CD device. Must be called
827 static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio) in pkt_queue_bio() argument
833 bio->bi_opf |= REQ_NOMERGE; in pkt_queue_bio()
835 spin_lock(&pd->iosched.lock); in pkt_queue_bio()
837 bio_list_add(&pd->iosched.read_queue, bio); in pkt_queue_bio()
839 bio_list_add(&pd->iosched.write_queue, bio); in pkt_queue_bio()
840 spin_unlock(&pd->iosched.lock); in pkt_queue_bio()
842 atomic_set(&pd->iosched.attention, 1); in pkt_queue_bio()
843 wake_up(&pd->wqueue); in pkt_queue_bio()
849 * - A cache flush command must be inserted before a read request if the
851 * - Switching between reading and writing is slow, so don't do it more often
853 * - Optimize for throughput at the expense of latency. This means that streaming
857 * - Set the read speed according to current usage pattern. When only reading
862 static void pkt_iosched_process_queue(struct pktcdvd_device *pd) in pkt_iosched_process_queue() argument
864 struct device *ddev = disk_to_dev(pd->disk); in pkt_iosched_process_queue()
866 if (atomic_read(&pd->iosched.attention) == 0) in pkt_iosched_process_queue()
868 atomic_set(&pd->iosched.attention, 0); in pkt_iosched_process_queue()
874 spin_lock(&pd->iosched.lock); in pkt_iosched_process_queue()
875 reads_queued = !bio_list_empty(&pd->iosched.read_queue); in pkt_iosched_process_queue()
876 writes_queued = !bio_list_empty(&pd->iosched.write_queue); in pkt_iosched_process_queue()
877 spin_unlock(&pd->iosched.lock); in pkt_iosched_process_queue()
882 if (pd->iosched.writing) { in pkt_iosched_process_queue()
884 spin_lock(&pd->iosched.lock); in pkt_iosched_process_queue()
885 bio = bio_list_peek(&pd->iosched.write_queue); in pkt_iosched_process_queue()
886 spin_unlock(&pd->iosched.lock); in pkt_iosched_process_queue()
887 if (bio && (bio->bi_iter.bi_sector == in pkt_iosched_process_queue()
888 pd->iosched.last_write)) in pkt_iosched_process_queue()
891 if (atomic_read(&pd->cdrw.pending_bios) > 0) { in pkt_iosched_process_queue()
895 pkt_flush_cache(pd); in pkt_iosched_process_queue()
896 pd->iosched.writing = 0; in pkt_iosched_process_queue()
900 if (atomic_read(&pd->cdrw.pending_bios) > 0) { in pkt_iosched_process_queue()
904 pd->iosched.writing = 1; in pkt_iosched_process_queue()
908 spin_lock(&pd->iosched.lock); in pkt_iosched_process_queue()
909 if (pd->iosched.writing) in pkt_iosched_process_queue()
910 bio = bio_list_pop(&pd->iosched.write_queue); in pkt_iosched_process_queue()
912 bio = bio_list_pop(&pd->iosched.read_queue); in pkt_iosched_process_queue()
913 spin_unlock(&pd->iosched.lock); in pkt_iosched_process_queue()
919 pd->iosched.successive_reads += in pkt_iosched_process_queue()
920 bio->bi_iter.bi_size >> 10; in pkt_iosched_process_queue()
922 pd->iosched.successive_reads = 0; in pkt_iosched_process_queue()
923 pd->iosched.last_write = bio_end_sector(bio); in pkt_iosched_process_queue()
925 if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) { in pkt_iosched_process_queue()
926 if (pd->read_speed == pd->write_speed) { in pkt_iosched_process_queue()
927 pd->read_speed = MAX_SPEED; in pkt_iosched_process_queue()
928 pkt_set_speed(pd, pd->write_speed, pd->read_speed); in pkt_iosched_process_queue()
931 if (pd->read_speed != pd->write_speed) { in pkt_iosched_process_queue()
932 pd->read_speed = pd->write_speed; in pkt_iosched_process_queue()
933 pkt_set_speed(pd, pd->write_speed, pd->read_speed); in pkt_iosched_process_queue()
937 atomic_inc(&pd->cdrw.pending_bios); in pkt_iosched_process_queue()
946 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) in pkt_set_segment_merging() argument
948 struct device *ddev = disk_to_dev(pd->disk); in pkt_set_segment_merging()
950 if ((pd->settings.size << 9) / CD_FRAMESIZE <= queue_max_segments(q)) { in pkt_set_segment_merging()
954 clear_bit(PACKET_MERGE_SEGS, &pd->flags); in pkt_set_segment_merging()
958 if ((pd->settings.size << 9) / PAGE_SIZE <= queue_max_segments(q)) { in pkt_set_segment_merging()
963 set_bit(PACKET_MERGE_SEGS, &pd->flags); in pkt_set_segment_merging()
968 return -EIO; in pkt_set_segment_merging()
973 struct packet_data *pkt = bio->bi_private; in pkt_end_io_read()
974 struct pktcdvd_device *pd = pkt->pd; in pkt_end_io_read() local
975 BUG_ON(!pd); in pkt_end_io_read()
977 dev_dbg(disk_to_dev(pd->disk), "bio=%p sec0=%llx sec=%llx err=%d\n", in pkt_end_io_read()
978 bio, pkt->sector, bio->bi_iter.bi_sector, bio->bi_status); in pkt_end_io_read()
980 if (bio->bi_status) in pkt_end_io_read()
981 atomic_inc(&pkt->io_errors); in pkt_end_io_read()
983 if (atomic_dec_and_test(&pkt->io_wait)) { in pkt_end_io_read()
984 atomic_inc(&pkt->run_sm); in pkt_end_io_read()
985 wake_up(&pd->wqueue); in pkt_end_io_read()
987 pkt_bio_finished(pd); in pkt_end_io_read()
992 struct packet_data *pkt = bio->bi_private; in pkt_end_io_packet_write()
993 struct pktcdvd_device *pd = pkt->pd; in pkt_end_io_packet_write() local
994 BUG_ON(!pd); in pkt_end_io_packet_write()
996 dev_dbg(disk_to_dev(pd->disk), "id=%d, err=%d\n", pkt->id, bio->bi_status); in pkt_end_io_packet_write()
998 pd->stats.pkt_ended++; in pkt_end_io_packet_write()
1001 pkt_bio_finished(pd); in pkt_end_io_packet_write()
1002 atomic_dec(&pkt->io_wait); in pkt_end_io_packet_write()
1003 atomic_inc(&pkt->run_sm); in pkt_end_io_packet_write()
1004 wake_up(&pd->wqueue); in pkt_end_io_packet_write()
1010 static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) in pkt_gather_data() argument
1012 struct device *ddev = disk_to_dev(pd->disk); in pkt_gather_data()
1018 BUG_ON(bio_list_empty(&pkt->orig_bios)); in pkt_gather_data()
1020 atomic_set(&pkt->io_wait, 0); in pkt_gather_data()
1021 atomic_set(&pkt->io_errors, 0); in pkt_gather_data()
1027 spin_lock(&pkt->lock); in pkt_gather_data()
1028 bio_list_for_each(bio, &pkt->orig_bios) { in pkt_gather_data()
1029 int first_frame = (bio->bi_iter.bi_sector - pkt->sector) / in pkt_gather_data()
1031 int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE; in pkt_gather_data()
1032 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); in pkt_gather_data()
1034 BUG_ON(first_frame + num_frames > pkt->frames); in pkt_gather_data()
1038 spin_unlock(&pkt->lock); in pkt_gather_data()
1040 if (pkt->cache_valid) { in pkt_gather_data()
1041 dev_dbg(ddev, "zone %llx cached\n", pkt->sector); in pkt_gather_data()
1048 for (f = 0; f < pkt->frames; f++) { in pkt_gather_data()
1054 bio = pkt->r_bios[f]; in pkt_gather_data()
1055 bio_init(bio, file_bdev(pd->bdev_file), bio->bi_inline_vecs, 1, in pkt_gather_data()
1057 bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); in pkt_gather_data()
1058 bio->bi_end_io = pkt_end_io_read; in pkt_gather_data()
1059 bio->bi_private = pkt; in pkt_gather_data()
1064 pkt->pages[p], offset); in pkt_gather_data()
1065 if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset)) in pkt_gather_data()
1068 atomic_inc(&pkt->io_wait); in pkt_gather_data()
1069 pkt_queue_bio(pd, bio); in pkt_gather_data()
1074 dev_dbg(ddev, "need %d frames for zone %llx\n", frames_read, pkt->sector); in pkt_gather_data()
1075 pd->stats.pkt_started++; in pkt_gather_data()
1076 pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9); in pkt_gather_data()
1083 static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone) in pkt_get_packet_data() argument
1087 list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) { in pkt_get_packet_data()
1088 if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) { in pkt_get_packet_data()
1089 list_del_init(&pkt->list); in pkt_get_packet_data()
1090 if (pkt->sector != zone) in pkt_get_packet_data()
1091 pkt->cache_valid = 0; in pkt_get_packet_data()
1099 static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt) in pkt_put_packet_data() argument
1101 if (pkt->cache_valid) { in pkt_put_packet_data()
1102 list_add(&pkt->list, &pd->cdrw.pkt_free_list); in pkt_put_packet_data()
1104 list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list); in pkt_put_packet_data()
1114 enum packet_data_state old_state = pkt->state; in pkt_set_state()
1116 dev_dbg(ddev, "pkt %2d : s=%6llx %s -> %s\n", in pkt_set_state()
1117 pkt->id, pkt->sector, state_name[old_state], state_name[state]); in pkt_set_state()
1119 pkt->state = state; in pkt_set_state()
1124 * returns non-zero if any work was done.
1126 static int pkt_handle_queue(struct pktcdvd_device *pd) in pkt_handle_queue() argument
1128 struct device *ddev = disk_to_dev(pd->disk); in pkt_handle_queue()
1132 struct pkt_rb_node *node, *first_node; in pkt_handle_queue() local
1135 atomic_set(&pd->scan_queue, 0); in pkt_handle_queue()
1137 if (list_empty(&pd->cdrw.pkt_free_list)) { in pkt_handle_queue()
1145 spin_lock(&pd->lock); in pkt_handle_queue()
1146 first_node = pkt_rbtree_find(pd, pd->current_sector); in pkt_handle_queue()
1148 n = rb_first(&pd->bio_queue); in pkt_handle_queue()
1152 node = first_node; in pkt_handle_queue()
1153 while (node) { in pkt_handle_queue()
1154 bio = node->bio; in pkt_handle_queue()
1155 zone = get_zone(bio->bi_iter.bi_sector, pd); in pkt_handle_queue()
1156 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) { in pkt_handle_queue()
1157 if (p->sector == zone) { in pkt_handle_queue()
1164 node = pkt_rbtree_next(node); in pkt_handle_queue()
1165 if (!node) { in pkt_handle_queue()
1166 n = rb_first(&pd->bio_queue); in pkt_handle_queue()
1168 node = rb_entry(n, struct pkt_rb_node, rb_node); in pkt_handle_queue()
1170 if (node == first_node) in pkt_handle_queue()
1171 node = NULL; in pkt_handle_queue()
1173 spin_unlock(&pd->lock); in pkt_handle_queue()
1179 pkt = pkt_get_packet_data(pd, zone); in pkt_handle_queue()
1181 pd->current_sector = zone + pd->settings.size; in pkt_handle_queue()
1182 pkt->sector = zone; in pkt_handle_queue()
1183 BUG_ON(pkt->frames != pd->settings.size >> 2); in pkt_handle_queue()
1184 pkt->write_size = 0; in pkt_handle_queue()
1190 spin_lock(&pd->lock); in pkt_handle_queue()
1192 while ((node = pkt_rbtree_find(pd, zone)) != NULL) { in pkt_handle_queue()
1193 sector_t tmp = get_zone(node->bio->bi_iter.bi_sector, pd); in pkt_handle_queue()
1195 bio = node->bio; in pkt_handle_queue()
1199 pkt_rbtree_erase(pd, node); in pkt_handle_queue()
1200 spin_lock(&pkt->lock); in pkt_handle_queue()
1201 bio_list_add(&pkt->orig_bios, bio); in pkt_handle_queue()
1202 pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE; in pkt_handle_queue()
1203 spin_unlock(&pkt->lock); in pkt_handle_queue()
1208 if (pd->congested && in pkt_handle_queue()
1209 pd->bio_queue_size <= pd->write_congestion_off) { in pkt_handle_queue()
1210 pd->congested = false; in pkt_handle_queue()
1211 wake_up_var(&pd->congested); in pkt_handle_queue()
1213 spin_unlock(&pd->lock); in pkt_handle_queue()
1215 pkt->sleep_time = max(PACKET_WAIT_TIME, 1); in pkt_handle_queue()
1217 atomic_set(&pkt->run_sm, 1); in pkt_handle_queue()
1219 spin_lock(&pd->cdrw.active_list_lock); in pkt_handle_queue()
1220 list_add(&pkt->list, &pd->cdrw.pkt_active_list); in pkt_handle_queue()
1221 spin_unlock(&pd->cdrw.active_list_lock); in pkt_handle_queue()
1227 * bio_list_copy_data - copy contents of data buffers from one chain of bios to
1232 * Stops when it reaches the end of either the @src list or @dst list - that is,
1233 * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
1238 struct bvec_iter src_iter = src->bi_iter; in bio_list_copy_data()
1239 struct bvec_iter dst_iter = dst->bi_iter; in bio_list_copy_data()
1243 src = src->bi_next; in bio_list_copy_data()
1247 src_iter = src->bi_iter; in bio_list_copy_data()
1251 dst = dst->bi_next; in bio_list_copy_data()
1255 dst_iter = dst->bi_iter; in bio_list_copy_data()
1266 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) in pkt_start_write() argument
1268 struct device *ddev = disk_to_dev(pd->disk); in pkt_start_write()
1271 bio_init(pkt->w_bio, file_bdev(pd->bdev_file), pkt->w_bio->bi_inline_vecs, in pkt_start_write()
1272 pkt->frames, REQ_OP_WRITE); in pkt_start_write()
1273 pkt->w_bio->bi_iter.bi_sector = pkt->sector; in pkt_start_write()
1274 pkt->w_bio->bi_end_io = pkt_end_io_packet_write; in pkt_start_write()
1275 pkt->w_bio->bi_private = pkt; in pkt_start_write()
1278 for (f = 0; f < pkt->frames; f++) { in pkt_start_write()
1279 struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE]; in pkt_start_write()
1282 if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset)) in pkt_start_write()
1285 dev_dbg(ddev, "vcnt=%d\n", pkt->w_bio->bi_vcnt); in pkt_start_write()
1288 * Fill-in bvec with data from orig_bios. in pkt_start_write()
1290 spin_lock(&pkt->lock); in pkt_start_write()
1291 bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head); in pkt_start_write()
1294 spin_unlock(&pkt->lock); in pkt_start_write()
1296 dev_dbg(ddev, "Writing %d frames for zone %llx\n", pkt->write_size, pkt->sector); in pkt_start_write()
1298 if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) in pkt_start_write()
1299 pkt->cache_valid = 1; in pkt_start_write()
1301 pkt->cache_valid = 0; in pkt_start_write()
1304 atomic_set(&pkt->io_wait, 1); in pkt_start_write()
1305 pkt_queue_bio(pd, pkt->w_bio); in pkt_start_write()
1313 pkt->cache_valid = 0; in pkt_finish_packet()
1316 while ((bio = bio_list_pop(&pkt->orig_bios))) { in pkt_finish_packet()
1317 bio->bi_status = status; in pkt_finish_packet()
1322 static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt) in pkt_run_state_machine() argument
1324 struct device *ddev = disk_to_dev(pd->disk); in pkt_run_state_machine()
1326 dev_dbg(ddev, "pkt %d\n", pkt->id); in pkt_run_state_machine()
1329 switch (pkt->state) { in pkt_run_state_machine()
1331 if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0)) in pkt_run_state_machine()
1334 pkt->sleep_time = 0; in pkt_run_state_machine()
1335 pkt_gather_data(pd, pkt); in pkt_run_state_machine()
1340 if (atomic_read(&pkt->io_wait) > 0) in pkt_run_state_machine()
1343 if (atomic_read(&pkt->io_errors) > 0) { in pkt_run_state_machine()
1346 pkt_start_write(pd, pkt); in pkt_run_state_machine()
1351 if (atomic_read(&pkt->io_wait) > 0) in pkt_run_state_machine()
1354 if (!pkt->w_bio->bi_status) { in pkt_run_state_machine()
1367 pkt_finish_packet(pkt, pkt->w_bio->bi_status); in pkt_run_state_machine()
1377 static void pkt_handle_packets(struct pktcdvd_device *pd) in pkt_handle_packets() argument
1379 struct device *ddev = disk_to_dev(pd->disk); in pkt_handle_packets()
1385 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { in pkt_handle_packets()
1386 if (atomic_read(&pkt->run_sm) > 0) { in pkt_handle_packets()
1387 atomic_set(&pkt->run_sm, 0); in pkt_handle_packets()
1388 pkt_run_state_machine(pd, pkt); in pkt_handle_packets()
1395 spin_lock(&pd->cdrw.active_list_lock); in pkt_handle_packets()
1396 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) { in pkt_handle_packets()
1397 if (pkt->state == PACKET_FINISHED_STATE) { in pkt_handle_packets()
1398 list_del(&pkt->list); in pkt_handle_packets()
1399 pkt_put_packet_data(pd, pkt); in pkt_handle_packets()
1401 atomic_set(&pd->scan_queue, 1); in pkt_handle_packets()
1404 spin_unlock(&pd->cdrw.active_list_lock); in pkt_handle_packets()
1413 struct pktcdvd_device *pd = foobar; in kcdrwd() local
1414 struct device *ddev = disk_to_dev(pd->disk); in kcdrwd()
1428 add_wait_queue(&pd->wqueue, &wait); in kcdrwd()
1433 if (atomic_read(&pd->scan_queue) > 0) in kcdrwd()
1437 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { in kcdrwd()
1438 if (atomic_read(&pkt->run_sm) > 0) in kcdrwd()
1443 if (atomic_read(&pd->iosched.attention) != 0) in kcdrwd()
1447 pkt_count_states(pd, states); in kcdrwd()
1452 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { in kcdrwd()
1453 if (pkt->sleep_time && pkt->sleep_time < min_sleep_time) in kcdrwd()
1454 min_sleep_time = pkt->sleep_time; in kcdrwd()
1464 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { in kcdrwd()
1465 if (!pkt->sleep_time) in kcdrwd()
1467 pkt->sleep_time -= min_sleep_time - residue; in kcdrwd()
1468 if (pkt->sleep_time <= 0) { in kcdrwd()
1469 pkt->sleep_time = 0; in kcdrwd()
1470 atomic_inc(&pkt->run_sm); in kcdrwd()
1479 remove_wait_queue(&pd->wqueue, &wait); in kcdrwd()
1488 while (pkt_handle_queue(pd)) in kcdrwd()
1494 pkt_handle_packets(pd); in kcdrwd()
1499 pkt_iosched_process_queue(pd); in kcdrwd()
1505 static void pkt_print_settings(struct pktcdvd_device *pd) in pkt_print_settings() argument
1507 dev_info(disk_to_dev(pd->disk), "%s packets, %u blocks, Mode-%c disc\n", in pkt_print_settings()
1508 pd->settings.fp ? "Fixed" : "Variable", in pkt_print_settings()
1509 pd->settings.size >> 2, in pkt_print_settings()
1510 pd->settings.block_mode == 8 ? '1' : '2'); in pkt_print_settings()
1513 static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int… in pkt_mode_sense() argument
1515 memset(cgc->cmd, 0, sizeof(cgc->cmd)); in pkt_mode_sense()
1517 cgc->cmd[0] = GPCMD_MODE_SENSE_10; in pkt_mode_sense()
1518 cgc->cmd[2] = page_code | (page_control << 6); in pkt_mode_sense()
1519 put_unaligned_be16(cgc->buflen, &cgc->cmd[7]); in pkt_mode_sense()
1520 cgc->data_direction = CGC_DATA_READ; in pkt_mode_sense()
1521 return pkt_generic_packet(pd, cgc); in pkt_mode_sense()
1524 static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc) in pkt_mode_select() argument
1526 memset(cgc->cmd, 0, sizeof(cgc->cmd)); in pkt_mode_select()
1527 memset(cgc->buffer, 0, 2); in pkt_mode_select()
1528 cgc->cmd[0] = GPCMD_MODE_SELECT_10; in pkt_mode_select()
1529 cgc->cmd[1] = 0x10; /* PF */ in pkt_mode_select()
1530 put_unaligned_be16(cgc->buflen, &cgc->cmd[7]); in pkt_mode_select()
1531 cgc->data_direction = CGC_DATA_WRITE; in pkt_mode_select()
1532 return pkt_generic_packet(pd, cgc); in pkt_mode_select()
1535 static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di) in pkt_get_disc_info() argument
1546 ret = pkt_generic_packet(pd, &cgc); in pkt_get_disc_info()
1553 cgc.buflen = be16_to_cpu(di->disc_information_length) + in pkt_get_disc_info()
1554 sizeof(di->disc_information_length); in pkt_get_disc_info()
1560 return pkt_generic_packet(pd, &cgc); in pkt_get_disc_info()
1563 static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information … in pkt_get_track_info() argument
1575 ret = pkt_generic_packet(pd, &cgc); in pkt_get_track_info()
1579 cgc.buflen = be16_to_cpu(ti->track_information_length) + in pkt_get_track_info()
1580 sizeof(ti->track_information_length); in pkt_get_track_info()
1586 return pkt_generic_packet(pd, &cgc); in pkt_get_track_info()
1589 static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd, in pkt_get_last_written() argument
1597 ret = pkt_get_disc_info(pd, &di); in pkt_get_last_written()
1602 ret = pkt_get_track_info(pd, last_track, 1, &ti); in pkt_get_last_written()
1608 last_track--; in pkt_get_last_written()
1609 ret = pkt_get_track_info(pd, last_track, 1, &ti); in pkt_get_last_written()
1622 *last_written -= (be32_to_cpu(ti.free_blocks) + 7); in pkt_get_last_written()
1628 * write mode select package based on pd->settings
1630 static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) in pkt_set_write_settings() argument
1632 struct device *ddev = disk_to_dev(pd->disk); in pkt_set_write_settings()
1639 /* doesn't apply to DVD+RW or DVD-RAM */ in pkt_set_write_settings()
1640 if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12)) in pkt_set_write_settings()
1646 ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0); in pkt_set_write_settings()
1648 pkt_dump_sense(pd, &cgc); in pkt_set_write_settings()
1653 pd->mode_offset = get_unaligned_be16(&buffer[6]); in pkt_set_write_settings()
1662 ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0); in pkt_set_write_settings()
1664 pkt_dump_sense(pd, &cgc); in pkt_set_write_settings()
1671 wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset]; in pkt_set_write_settings()
1673 wp->fp = pd->settings.fp; in pkt_set_write_settings()
1674 wp->track_mode = pd->settings.track_mode; in pkt_set_write_settings()
1675 wp->write_type = pd->settings.write_type; in pkt_set_write_settings()
1676 wp->data_block_type = pd->settings.block_mode; in pkt_set_write_settings()
1678 wp->multi_session = 0; in pkt_set_write_settings()
1681 wp->link_size = 7; in pkt_set_write_settings()
1682 wp->ls_v = 1; in pkt_set_write_settings()
1685 if (wp->data_block_type == PACKET_BLOCK_MODE1) { in pkt_set_write_settings()
1686 wp->session_format = 0; in pkt_set_write_settings()
1687 wp->subhdr2 = 0x20; in pkt_set_write_settings()
1688 } else if (wp->data_block_type == PACKET_BLOCK_MODE2) { in pkt_set_write_settings()
1689 wp->session_format = 0x20; in pkt_set_write_settings()
1690 wp->subhdr2 = 8; in pkt_set_write_settings()
1692 wp->mcn[0] = 0x80; in pkt_set_write_settings()
1693 memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1); in pkt_set_write_settings()
1699 dev_err(ddev, "write mode wrong %d\n", wp->data_block_type); in pkt_set_write_settings()
1702 wp->packet_size = cpu_to_be32(pd->settings.size >> 2); in pkt_set_write_settings()
1705 ret = pkt_mode_select(pd, &cgc); in pkt_set_write_settings()
1707 pkt_dump_sense(pd, &cgc); in pkt_set_write_settings()
1711 pkt_print_settings(pd); in pkt_set_write_settings()
1716 * 1 -- we can write to this track, 0 -- we can't
1718 static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti) in pkt_writable_track() argument
1720 struct device *ddev = disk_to_dev(pd->disk); in pkt_writable_track()
1722 switch (pd->mmc3_profile) { in pkt_writable_track()
1724 case 0x12: /* DVD-RAM */ in pkt_writable_track()
1725 /* The track is always writable on DVD+RW/DVD-RAM */ in pkt_writable_track()
1731 if (!ti->packet || !ti->fp) in pkt_writable_track()
1737 if (ti->rt == 0 && ti->blank == 0) in pkt_writable_track()
1740 if (ti->rt == 0 && ti->blank == 1) in pkt_writable_track()
1743 if (ti->rt == 1 && ti->blank == 0) in pkt_writable_track()
1746 dev_err(ddev, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet); in pkt_writable_track()
1751 * 1 -- we can write to this disc, 0 -- we can't
1753 static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di) in pkt_writable_disc() argument
1755 struct device *ddev = disk_to_dev(pd->disk); in pkt_writable_disc()
1757 switch (pd->mmc3_profile) { in pkt_writable_disc()
1758 case 0x0a: /* CD-RW */ in pkt_writable_disc()
1762 case 0x13: /* DVD-RW */ in pkt_writable_disc()
1763 case 0x12: /* DVD-RAM */ in pkt_writable_disc()
1766 dev_dbg(ddev, "Wrong disc profile (%x)\n", pd->mmc3_profile); in pkt_writable_disc()
1774 if (di->disc_type == 0xff) { in pkt_writable_disc()
1775 dev_notice(ddev, "unknown disc - no track?\n"); in pkt_writable_disc()
1779 if (di->disc_type != 0x20 && di->disc_type != 0) { in pkt_writable_disc()
1780 dev_err(ddev, "wrong disc type (%x)\n", di->disc_type); in pkt_writable_disc()
1784 if (di->erasable == 0) { in pkt_writable_disc()
1789 if (di->border_status == PACKET_SESSION_RESERVED) { in pkt_writable_disc()
1797 static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) in pkt_probe_settings() argument
1799 struct device *ddev = disk_to_dev(pd->disk); in pkt_probe_settings()
1809 ret = pkt_generic_packet(pd, &cgc); in pkt_probe_settings()
1810 pd->mmc3_profile = ret ? 0xffff : get_unaligned_be16(&buf[6]); in pkt_probe_settings()
1815 ret = pkt_get_disc_info(pd, &di); in pkt_probe_settings()
1821 if (!pkt_writable_disc(pd, &di)) in pkt_probe_settings()
1822 return -EROFS; in pkt_probe_settings()
1824 pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR; in pkt_probe_settings()
1827 ret = pkt_get_track_info(pd, track, 1, &ti); in pkt_probe_settings()
1833 if (!pkt_writable_track(pd, &ti)) { in pkt_probe_settings()
1835 return -EROFS; in pkt_probe_settings()
1842 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2; in pkt_probe_settings()
1843 if (pd->settings.size == 0) { in pkt_probe_settings()
1845 return -ENXIO; in pkt_probe_settings()
1847 if (pd->settings.size > PACKET_MAX_SECTORS) { in pkt_probe_settings()
1849 return -EROFS; in pkt_probe_settings()
1851 pd->settings.fp = ti.fp; in pkt_probe_settings()
1852 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1); in pkt_probe_settings()
1855 pd->nwa = be32_to_cpu(ti.next_writable); in pkt_probe_settings()
1856 set_bit(PACKET_NWA_VALID, &pd->flags); in pkt_probe_settings()
1860 * in theory we could use lra on -RW media as well and just zero in pkt_probe_settings()
1862 * is just a no-go. we'll use that for -R, naturally. in pkt_probe_settings()
1865 pd->lra = be32_to_cpu(ti.last_rec_address); in pkt_probe_settings()
1866 set_bit(PACKET_LRA_VALID, &pd->flags); in pkt_probe_settings()
1868 pd->lra = 0xffffffff; in pkt_probe_settings()
1869 set_bit(PACKET_LRA_VALID, &pd->flags); in pkt_probe_settings()
1875 pd->settings.link_loss = 7; in pkt_probe_settings()
1876 pd->settings.write_type = 0; /* packet */ in pkt_probe_settings()
1877 pd->settings.track_mode = ti.track_mode; in pkt_probe_settings()
1884 pd->settings.block_mode = PACKET_BLOCK_MODE1; in pkt_probe_settings()
1887 pd->settings.block_mode = PACKET_BLOCK_MODE2; in pkt_probe_settings()
1891 return -EROFS; in pkt_probe_settings()
1899 static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd) in pkt_write_caching() argument
1901 struct device *ddev = disk_to_dev(pd->disk); in pkt_write_caching()
1910 cgc.buflen = pd->mode_offset + 12; in pkt_write_caching()
1917 ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0); in pkt_write_caching()
1922 * use drive write caching -- we need deferred error handling to be in pkt_write_caching()
1926 buf[pd->mode_offset + 10] |= (set << 2); in pkt_write_caching()
1929 ret = pkt_mode_select(pd, &cgc); in pkt_write_caching()
1932 pkt_dump_sense(pd, &cgc); in pkt_write_caching()
1938 static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag) in pkt_lock_door() argument
1945 return pkt_generic_packet(pd, &cgc); in pkt_lock_door()
1951 static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd, in pkt_get_max_speed() argument
1960 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset]; in pkt_get_max_speed()
1964 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0); in pkt_get_max_speed()
1966 cgc.buflen = pd->mode_offset + cap_buf[1] + 2 + in pkt_get_max_speed()
1968 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0); in pkt_get_max_speed()
1970 pkt_dump_sense(pd, &cgc); in pkt_get_max_speed()
1992 /* These tables from cdrecord - I don't have orange book */
1993 /* standard speed CD-RW (1-4x) */
1998 /* high speed CD-RW (-10x) */
2003 /* ultra high speed CD-RW */
2012 static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, in pkt_media_speed() argument
2015 struct device *ddev = disk_to_dev(pd->disk); in pkt_media_speed()
2028 ret = pkt_generic_packet(pd, &cgc); in pkt_media_speed()
2030 pkt_dump_sense(pd, &cgc); in pkt_media_speed()
2043 ret = pkt_generic_packet(pd, &cgc); in pkt_media_speed()
2045 pkt_dump_sense(pd, &cgc); in pkt_media_speed()
2050 dev_notice(ddev, "disc type is not CD-RW\n"); in pkt_media_speed()
2058 st = (buf[6] >> 3) & 0x7; /* disc sub-type */ in pkt_media_speed()
2074 dev_notice(ddev, "unknown disc sub-type %d\n", st); in pkt_media_speed()
2081 dev_notice(ddev, "unknown speed %d for sub-type %d\n", sp, st); in pkt_media_speed()
2086 static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd) in pkt_perform_opc() argument
2088 struct device *ddev = disk_to_dev(pd->disk); in pkt_perform_opc()
2100 ret = pkt_generic_packet(pd, &cgc); in pkt_perform_opc()
2102 pkt_dump_sense(pd, &cgc); in pkt_perform_opc()
2106 static int pkt_open_write(struct pktcdvd_device *pd) in pkt_open_write() argument
2108 struct device *ddev = disk_to_dev(pd->disk); in pkt_open_write()
2112 ret = pkt_probe_settings(pd); in pkt_open_write()
2118 ret = pkt_set_write_settings(pd); in pkt_open_write()
2121 return -EIO; in pkt_open_write()
2124 pkt_write_caching(pd); in pkt_open_write()
2126 ret = pkt_get_max_speed(pd, &write_speed); in pkt_open_write()
2129 switch (pd->mmc3_profile) { in pkt_open_write()
2130 case 0x13: /* DVD-RW */ in pkt_open_write()
2132 case 0x12: /* DVD-RAM */ in pkt_open_write()
2136 ret = pkt_media_speed(pd, &media_write_speed); in pkt_open_write()
2145 ret = pkt_set_speed(pd, write_speed, read_speed); in pkt_open_write()
2148 return -EIO; in pkt_open_write()
2150 pd->write_speed = write_speed; in pkt_open_write()
2151 pd->read_speed = read_speed; in pkt_open_write()
2153 ret = pkt_perform_opc(pd); in pkt_open_write()
2163 static int pkt_open_dev(struct pktcdvd_device *pd, bool write) in pkt_open_dev() argument
2165 struct device *ddev = disk_to_dev(pd->disk); in pkt_open_dev()
2172 * We need to re-open the cdrom device without O_NONBLOCK to be able in pkt_open_dev()
2176 bdev_file = bdev_file_open_by_dev(file_bdev(pd->bdev_file)->bd_dev, in pkt_open_dev()
2177 BLK_OPEN_READ, pd, NULL); in pkt_open_dev()
2182 pd->f_open_bdev = bdev_file; in pkt_open_dev()
2184 ret = pkt_get_last_written(pd, &lba); in pkt_open_dev()
2190 set_capacity(pd->disk, lba << 2); in pkt_open_dev()
2191 set_capacity_and_notify(file_bdev(pd->bdev_file)->bd_disk, lba << 2); in pkt_open_dev()
2193 q = bdev_get_queue(file_bdev(pd->bdev_file)); in pkt_open_dev()
2195 ret = pkt_open_write(pd); in pkt_open_dev()
2198 set_bit(PACKET_WRITABLE, &pd->flags); in pkt_open_dev()
2200 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); in pkt_open_dev()
2201 clear_bit(PACKET_WRITABLE, &pd->flags); in pkt_open_dev()
2204 ret = pkt_set_segment_merging(pd, q); in pkt_open_dev()
2209 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) { in pkt_open_dev()
2211 ret = -ENOMEM; in pkt_open_dev()
2230 static void pkt_release_dev(struct pktcdvd_device *pd, int flush) in pkt_release_dev() argument
2232 struct device *ddev = disk_to_dev(pd->disk); in pkt_release_dev()
2234 if (flush && pkt_flush_cache(pd)) in pkt_release_dev()
2237 pkt_lock_door(pd, 0); in pkt_release_dev()
2239 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); in pkt_release_dev()
2240 fput(pd->f_open_bdev); in pkt_release_dev()
2241 pd->f_open_bdev = NULL; in pkt_release_dev()
2243 pkt_shrink_pktlist(pd); in pkt_release_dev()
2257 struct pktcdvd_device *pd = NULL; in pkt_open() local
2262 pd = pkt_find_dev_from_minor(disk->first_minor); in pkt_open()
2263 if (!pd) { in pkt_open()
2264 ret = -ENODEV; in pkt_open()
2267 BUG_ON(pd->refcnt < 0); in pkt_open()
2269 pd->refcnt++; in pkt_open()
2270 if (pd->refcnt > 1) { in pkt_open()
2272 !test_bit(PACKET_WRITABLE, &pd->flags)) { in pkt_open()
2273 ret = -EBUSY; in pkt_open()
2277 ret = pkt_open_dev(pd, mode & BLK_OPEN_WRITE); in pkt_open()
2286 pd->refcnt--; in pkt_open()
2295 struct pktcdvd_device *pd = disk->private_data; in pkt_release() local
2299 pd->refcnt--; in pkt_release()
2300 BUG_ON(pd->refcnt < 0); in pkt_release()
2301 if (pd->refcnt == 0) { in pkt_release()
2302 int flush = test_bit(PACKET_WRITABLE, &pd->flags); in pkt_release()
2303 pkt_release_dev(pd, flush); in pkt_release()
2312 struct packet_stacked_data *psd = bio->bi_private; in pkt_end_io_read_cloned()
2313 struct pktcdvd_device *pd = psd->pd; in pkt_end_io_read_cloned() local
2315 psd->bio->bi_status = bio->bi_status; in pkt_end_io_read_cloned()
2317 bio_endio(psd->bio); in pkt_end_io_read_cloned()
2319 pkt_bio_finished(pd); in pkt_end_io_read_cloned()
2322 static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio) in pkt_make_request_read() argument
2324 struct bio *cloned_bio = bio_alloc_clone(file_bdev(pd->bdev_file), bio, in pkt_make_request_read()
2328 psd->pd = pd; in pkt_make_request_read()
2329 psd->bio = bio; in pkt_make_request_read()
2330 cloned_bio->bi_private = psd; in pkt_make_request_read()
2331 cloned_bio->bi_end_io = pkt_end_io_read_cloned; in pkt_make_request_read()
2332 pd->stats.secs_r += bio_sectors(bio); in pkt_make_request_read()
2333 pkt_queue_bio(pd, cloned_bio); in pkt_make_request_read()
2338 struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->private_data; in pkt_make_request_write() local
2342 struct pkt_rb_node *node; in pkt_make_request_write() local
2344 zone = get_zone(bio->bi_iter.bi_sector, pd); in pkt_make_request_write()
2350 spin_lock(&pd->cdrw.active_list_lock); in pkt_make_request_write()
2352 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { in pkt_make_request_write()
2353 if (pkt->sector == zone) { in pkt_make_request_write()
2354 spin_lock(&pkt->lock); in pkt_make_request_write()
2355 if ((pkt->state == PACKET_WAITING_STATE) || in pkt_make_request_write()
2356 (pkt->state == PACKET_READ_WAIT_STATE)) { in pkt_make_request_write()
2357 bio_list_add(&pkt->orig_bios, bio); in pkt_make_request_write()
2358 pkt->write_size += in pkt_make_request_write()
2359 bio->bi_iter.bi_size / CD_FRAMESIZE; in pkt_make_request_write()
2360 if ((pkt->write_size >= pkt->frames) && in pkt_make_request_write()
2361 (pkt->state == PACKET_WAITING_STATE)) { in pkt_make_request_write()
2362 atomic_inc(&pkt->run_sm); in pkt_make_request_write()
2363 wake_up(&pd->wqueue); in pkt_make_request_write()
2365 spin_unlock(&pkt->lock); in pkt_make_request_write()
2366 spin_unlock(&pd->cdrw.active_list_lock); in pkt_make_request_write()
2371 spin_unlock(&pkt->lock); in pkt_make_request_write()
2374 spin_unlock(&pd->cdrw.active_list_lock); in pkt_make_request_write()
2381 spin_lock(&pd->lock); in pkt_make_request_write()
2382 if (pd->write_congestion_on > 0 in pkt_make_request_write()
2383 && pd->bio_queue_size >= pd->write_congestion_on) { in pkt_make_request_write()
2386 init_wait_var_entry(&wqe, &pd->congested, 0); in pkt_make_request_write()
2388 prepare_to_wait_event(__var_waitqueue(&pd->congested), in pkt_make_request_write()
2391 if (pd->bio_queue_size <= pd->write_congestion_off) in pkt_make_request_write()
2393 pd->congested = true; in pkt_make_request_write()
2394 spin_unlock(&pd->lock); in pkt_make_request_write()
2396 spin_lock(&pd->lock); in pkt_make_request_write()
2399 spin_unlock(&pd->lock); in pkt_make_request_write()
2404 node = mempool_alloc(&pd->rb_pool, GFP_NOIO); in pkt_make_request_write()
2405 node->bio = bio; in pkt_make_request_write()
2406 spin_lock(&pd->lock); in pkt_make_request_write()
2407 BUG_ON(pd->bio_queue_size < 0); in pkt_make_request_write()
2408 was_empty = (pd->bio_queue_size == 0); in pkt_make_request_write()
2409 pkt_rbtree_insert(pd, node); in pkt_make_request_write()
2410 spin_unlock(&pd->lock); in pkt_make_request_write()
2415 atomic_set(&pd->scan_queue, 1); in pkt_make_request_write()
2418 wake_up(&pd->wqueue); in pkt_make_request_write()
2419 } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) { in pkt_make_request_write()
2424 wake_up(&pd->wqueue); in pkt_make_request_write()
2430 struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->private_data; in pkt_submit_bio() local
2431 struct device *ddev = disk_to_dev(pd->disk); in pkt_submit_bio()
2439 bio->bi_iter.bi_sector, bio_end_sector(bio)); in pkt_submit_bio()
2445 pkt_make_request_read(pd, bio); in pkt_submit_bio()
2449 if (!test_bit(PACKET_WRITABLE, &pd->flags)) { in pkt_submit_bio()
2450 dev_notice(ddev, "WRITE for ro device (%llu)\n", bio->bi_iter.bi_sector); in pkt_submit_bio()
2454 if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) { in pkt_submit_bio()
2460 sector_t zone = get_zone(bio->bi_iter.bi_sector, pd); in pkt_submit_bio()
2461 sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd); in pkt_submit_bio()
2464 BUG_ON(last_zone != zone + pd->settings.size); in pkt_submit_bio()
2466 split = bio_split(bio, last_zone - in pkt_submit_bio()
2467 bio->bi_iter.bi_sector, in pkt_submit_bio()
2482 static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) in pkt_new_dev() argument
2484 struct device *ddev = disk_to_dev(pd->disk); in pkt_new_dev()
2489 if (pd->pkt_dev == dev) { in pkt_new_dev()
2491 return -EBUSY; in pkt_new_dev()
2497 if (file_bdev(pd2->bdev_file)->bd_dev == dev) { in pkt_new_dev()
2499 file_bdev(pd2->bdev_file)); in pkt_new_dev()
2500 return -EBUSY; in pkt_new_dev()
2502 if (pd2->pkt_dev == dev) { in pkt_new_dev()
2504 return -EBUSY; in pkt_new_dev()
2512 sdev = scsi_device_from_queue(file_bdev(bdev_file)->bd_disk->queue); in pkt_new_dev()
2515 return -EINVAL; in pkt_new_dev()
2517 put_device(&sdev->sdev_gendev); in pkt_new_dev()
2522 pd->bdev_file = bdev_file; in pkt_new_dev()
2524 atomic_set(&pd->cdrw.pending_bios, 0); in pkt_new_dev()
2525 pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->disk->disk_name); in pkt_new_dev()
2526 if (IS_ERR(pd->cdrw.thread)) { in pkt_new_dev()
2531 proc_create_single_data(pd->disk->disk_name, 0, pkt_proc, pkt_seq_show, pd); in pkt_new_dev()
2539 return -ENOMEM; in pkt_new_dev()
2545 struct pktcdvd_device *pd = bdev->bd_disk->private_data; in pkt_ioctl() local
2546 struct device *ddev = disk_to_dev(pd->disk); in pkt_ioctl()
2549 dev_dbg(ddev, "cmd %x, dev %d:%d\n", cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); in pkt_ioctl()
2558 if (pd->refcnt == 1) in pkt_ioctl()
2559 pkt_lock_door(pd, 0); in pkt_ioctl()
2562 * forward selected CDROM ioctls to CD-ROM, for UDF in pkt_ioctl()
2569 if (!bdev->bd_disk->fops->ioctl) in pkt_ioctl()
2570 ret = -ENOTTY; in pkt_ioctl()
2572 ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); in pkt_ioctl()
2576 ret = -ENOTTY; in pkt_ioctl()
2586 struct pktcdvd_device *pd = disk->private_data; in pkt_check_events() local
2589 if (!pd) in pkt_check_events()
2591 if (!pd->bdev_file) in pkt_check_events()
2593 attached_disk = file_bdev(pd->bdev_file)->bd_disk; in pkt_check_events()
2594 if (!attached_disk || !attached_disk->fops->check_events) in pkt_check_events()
2596 return attached_disk->fops->check_events(attached_disk, clearing); in pkt_check_events()
2601 return kasprintf(GFP_KERNEL, "pktcdvd/%s", disk->disk_name); in pkt_devnode()
2616 * Set up mapping from pktcdvd device to CD-ROM device.
2626 int ret = -ENOMEM; in pkt_setup_dev()
2627 struct pktcdvd_device *pd; in pkt_setup_dev() local
2637 ret = -EBUSY; in pkt_setup_dev()
2641 pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL); in pkt_setup_dev()
2642 if (!pd) in pkt_setup_dev()
2645 ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE, in pkt_setup_dev()
2650 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); in pkt_setup_dev()
2651 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list); in pkt_setup_dev()
2652 spin_lock_init(&pd->cdrw.active_list_lock); in pkt_setup_dev()
2654 spin_lock_init(&pd->lock); in pkt_setup_dev()
2655 spin_lock_init(&pd->iosched.lock); in pkt_setup_dev()
2656 bio_list_init(&pd->iosched.read_queue); in pkt_setup_dev()
2657 bio_list_init(&pd->iosched.write_queue); in pkt_setup_dev()
2658 init_waitqueue_head(&pd->wqueue); in pkt_setup_dev()
2659 pd->bio_queue = RB_ROOT; in pkt_setup_dev()
2661 pd->write_congestion_on = write_congestion_on; in pkt_setup_dev()
2662 pd->write_congestion_off = write_congestion_off; in pkt_setup_dev()
2669 pd->disk = disk; in pkt_setup_dev()
2670 disk->major = pktdev_major; in pkt_setup_dev()
2671 disk->first_minor = idx; in pkt_setup_dev()
2672 disk->minors = 1; in pkt_setup_dev()
2673 disk->fops = &pktcdvd_ops; in pkt_setup_dev()
2674 disk->flags = GENHD_FL_REMOVABLE | GENHD_FL_NO_PART; in pkt_setup_dev()
2675 snprintf(disk->disk_name, sizeof(disk->disk_name), DRIVER_NAME"%d", idx); in pkt_setup_dev()
2676 disk->private_data = pd; in pkt_setup_dev()
2678 pd->pkt_dev = MKDEV(pktdev_major, idx); in pkt_setup_dev()
2679 ret = pkt_new_dev(pd, dev); in pkt_setup_dev()
2684 disk->events = file_bdev(pd->bdev_file)->bd_disk->events; in pkt_setup_dev()
2690 pkt_sysfs_dev_new(pd); in pkt_setup_dev()
2691 pkt_debugfs_dev_new(pd); in pkt_setup_dev()
2693 pkt_devs[idx] = pd; in pkt_setup_dev()
2695 *pkt_dev = pd->pkt_dev; in pkt_setup_dev()
2703 mempool_exit(&pd->rb_pool); in pkt_setup_dev()
2704 kfree(pd); in pkt_setup_dev()
2712 * Tear down mapping from pktcdvd device to CD-ROM device.
2716 struct pktcdvd_device *pd; in pkt_remove_dev() local
2724 pd = pkt_devs[idx]; in pkt_remove_dev()
2725 if (pd && (pd->pkt_dev == pkt_dev)) in pkt_remove_dev()
2730 ret = -ENXIO; in pkt_remove_dev()
2734 if (pd->refcnt > 0) { in pkt_remove_dev()
2735 ret = -EBUSY; in pkt_remove_dev()
2739 ddev = disk_to_dev(pd->disk); in pkt_remove_dev()
2741 if (!IS_ERR(pd->cdrw.thread)) in pkt_remove_dev()
2742 kthread_stop(pd->cdrw.thread); in pkt_remove_dev()
2746 pkt_debugfs_dev_remove(pd); in pkt_remove_dev()
2747 pkt_sysfs_dev_remove(pd); in pkt_remove_dev()
2749 fput(pd->bdev_file); in pkt_remove_dev()
2751 remove_proc_entry(pd->disk->disk_name, pkt_proc); in pkt_remove_dev()
2754 del_gendisk(pd->disk); in pkt_remove_dev()
2755 put_disk(pd->disk); in pkt_remove_dev()
2757 mempool_exit(&pd->rb_pool); in pkt_remove_dev()
2758 kfree(pd); in pkt_remove_dev()
2770 struct pktcdvd_device *pd; in pkt_get_status() local
2774 pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index); in pkt_get_status()
2775 if (pd) { in pkt_get_status()
2776 ctrl_cmd->dev = new_encode_dev(file_bdev(pd->bdev_file)->bd_dev); in pkt_get_status()
2777 ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev); in pkt_get_status()
2779 ctrl_cmd->dev = 0; in pkt_get_status()
2780 ctrl_cmd->pkt_dev = 0; in pkt_get_status()
2782 ctrl_cmd->num_devices = MAX_WRITERS; in pkt_get_status()
2795 return -ENOTTY; in pkt_ctl_ioctl()
2798 return -EFAULT; in pkt_ctl_ioctl()
2803 return -EPERM; in pkt_ctl_ioctl()
2809 return -EPERM; in pkt_ctl_ioctl()
2816 return -ENOTTY; in pkt_ctl_ioctl()
2820 return -EFAULT; in pkt_ctl_ioctl()