Lines Matching full:md
123 struct mport_dev *md; member
187 * @md master port character device object
199 struct mport_dev *md; member
263 struct rio_mport *mport = priv->md->mport; in rio_mport_maint_rd()
308 struct rio_mport *mport = priv->md->mport; in rio_mport_maint_wr()
361 rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, in rio_mport_create_outbound_mapping() argument
365 struct rio_mport *mport = md->mport; in rio_mport_create_outbound_mapping()
385 map->md = md; in rio_mport_create_outbound_mapping()
387 list_add_tail(&map->node, &md->mappings); in rio_mport_create_outbound_mapping()
395 rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp, in rio_mport_get_outbound_mapping() argument
402 mutex_lock(&md->buf_mutex); in rio_mport_get_outbound_mapping()
403 list_for_each_entry(map, &md->mappings, node) { in rio_mport_get_outbound_mapping()
421 err = rio_mport_create_outbound_mapping(md, filp, rioid, raddr, in rio_mport_get_outbound_mapping()
423 mutex_unlock(&md->buf_mutex); in rio_mport_get_outbound_mapping()
430 struct mport_dev *data = priv->md; in rio_mport_obw_map()
464 struct mport_dev *md = priv->md; in rio_mport_obw_free() local
468 if (!md->mport->ops->unmap_outb) in rio_mport_obw_free()
476 mutex_lock(&md->buf_mutex); in rio_mport_obw_free()
477 list_for_each_entry_safe(map, _map, &md->mappings, node) { in rio_mport_obw_free()
487 mutex_unlock(&md->buf_mutex); in rio_mport_obw_free()
499 struct mport_dev *md = priv->md; in maint_hdid_set() local
505 md->mport->host_deviceid = hdid; in maint_hdid_set()
506 md->properties.hdid = hdid; in maint_hdid_set()
507 rio_local_set_device_id(md->mport, hdid); in maint_hdid_set()
521 struct mport_dev *md = priv->md; in maint_comptag_set() local
527 rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag); in maint_comptag_set()
555 struct mport_dev *md = in mport_release_def_dma() local
558 rmcd_debug(EXIT, "DMA_%d", md->dma_chan->chan_id); in mport_release_def_dma()
559 rio_release_dma(md->dma_chan); in mport_release_def_dma()
560 md->dma_chan = NULL; in mport_release_def_dma()
587 mutex_lock(&req->map->md->buf_mutex); in dma_req_free()
589 mutex_unlock(&req->map->md->buf_mutex); in dma_req_free()
657 priv->dmach = rio_request_mport_dma(priv->md->mport); in get_dma_channel()
660 if (priv->md->dma_chan) { in get_dma_channel()
661 priv->dmach = priv->md->dma_chan; in get_dma_channel()
662 kref_get(&priv->md->dma_ref); in get_dma_channel()
668 } else if (!priv->md->dma_chan) { in get_dma_channel()
670 priv->md->dma_chan = priv->dmach; in get_dma_channel()
671 kref_init(&priv->md->dma_ref); in get_dma_channel()
816 struct mport_dev *md = priv->md; in rio_dma_transfer() local
904 mutex_lock(&md->buf_mutex); in rio_dma_transfer()
905 list_for_each_entry(map, &md->mappings, node) { in rio_dma_transfer()
913 mutex_unlock(&md->buf_mutex); in rio_dma_transfer()
979 priv->md->properties.transfer_mode) == 0) in rio_mport_transfer_ioctl()
1088 static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp, in rio_mport_create_dma_mapping() argument
1097 map->virt_addr = dma_alloc_coherent(md->mport->dev.parent, size, in rio_mport_create_dma_mapping()
1107 map->md = md; in rio_mport_create_dma_mapping()
1109 mutex_lock(&md->buf_mutex); in rio_mport_create_dma_mapping()
1110 list_add_tail(&map->node, &md->mappings); in rio_mport_create_dma_mapping()
1111 mutex_unlock(&md->buf_mutex); in rio_mport_create_dma_mapping()
1120 struct mport_dev *md = priv->md; in rio_mport_alloc_dma() local
1128 ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping); in rio_mport_alloc_dma()
1135 mutex_lock(&md->buf_mutex); in rio_mport_alloc_dma()
1137 mutex_unlock(&md->buf_mutex); in rio_mport_alloc_dma()
1147 struct mport_dev *md = priv->md; in rio_mport_free_dma() local
1156 mutex_lock(&md->buf_mutex); in rio_mport_free_dma()
1157 list_for_each_entry_safe(map, _map, &md->mappings, node) { in rio_mport_free_dma()
1165 mutex_unlock(&md->buf_mutex); in rio_mport_free_dma()
1201 rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp, in rio_mport_create_inbound_mapping() argument
1205 struct rio_mport *mport = md->mport; in rio_mport_create_inbound_mapping()
1234 map->md = md; in rio_mport_create_inbound_mapping()
1236 mutex_lock(&md->buf_mutex); in rio_mport_create_inbound_mapping()
1237 list_add_tail(&map->node, &md->mappings); in rio_mport_create_inbound_mapping()
1238 mutex_unlock(&md->buf_mutex); in rio_mport_create_inbound_mapping()
1251 rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp, in rio_mport_get_inbound_mapping() argument
1261 mutex_lock(&md->buf_mutex); in rio_mport_get_inbound_mapping()
1262 list_for_each_entry(map, &md->mappings, node) { in rio_mport_get_inbound_mapping()
1276 mutex_unlock(&md->buf_mutex); in rio_mport_get_inbound_mapping()
1282 return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping); in rio_mport_get_inbound_mapping()
1288 struct mport_dev *md = priv->md; in rio_mport_map_inbound() local
1293 if (!md->mport->ops->map_inb) in rio_mport_map_inbound()
1298 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); in rio_mport_map_inbound()
1300 ret = rio_mport_get_inbound_mapping(md, filp, map.rio_addr, in rio_mport_map_inbound()
1311 mutex_lock(&md->buf_mutex); in rio_mport_map_inbound()
1313 mutex_unlock(&md->buf_mutex); in rio_mport_map_inbound()
1330 struct mport_dev *md = priv->md; in rio_mport_inbound_free() local
1334 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); in rio_mport_inbound_free()
1336 if (!md->mport->ops->unmap_inb) in rio_mport_inbound_free()
1342 mutex_lock(&md->buf_mutex); in rio_mport_inbound_free()
1343 list_for_each_entry_safe(map, _map, &md->mappings, node) { in rio_mport_inbound_free()
1352 mutex_unlock(&md->buf_mutex); in rio_mport_inbound_free()
1364 struct mport_dev *md = priv->md; in maint_port_idx_get() local
1365 u32 port_idx = md->mport->index; in maint_port_idx_get()
1392 dev_warn(&priv->md->dev, DRV_NAME ": event fifo overflow\n"); in rio_mport_add_event()
1435 struct mport_dev *md = priv->md; in rio_mport_add_db_filter() local
1447 ret = rio_request_inb_dbell(md->mport, md, filter.low, filter.high, in rio_mport_add_db_filter()
1451 dev_name(&md->dev), ret); in rio_mport_add_db_filter()
1457 rio_release_inb_dbell(md->mport, filter.low, filter.high); in rio_mport_add_db_filter()
1463 spin_lock_irqsave(&md->db_lock, flags); in rio_mport_add_db_filter()
1465 list_add_tail(&db_filter->data_node, &md->doorbells); in rio_mport_add_db_filter()
1466 spin_unlock_irqrestore(&md->db_lock, flags); in rio_mport_add_db_filter()
1492 spin_lock_irqsave(&priv->md->db_lock, flags); in rio_mport_remove_db_filter()
1502 spin_unlock_irqrestore(&priv->md->db_lock, flags); in rio_mport_remove_db_filter()
1505 rio_release_inb_dbell(priv->md->mport, filter.low, filter.high); in rio_mport_remove_db_filter()
1522 struct mport_dev *md = context; in rio_mport_pw_handler() local
1532 spin_lock(&md->pw_lock); in rio_mport_pw_handler()
1533 list_for_each_entry(pw_filter, &md->portwrites, md_node) { in rio_mport_pw_handler()
1540 spin_unlock(&md->pw_lock); in rio_mport_pw_handler()
1554 struct mport_dev *md = priv->md; in rio_mport_add_pw_filter() local
1569 spin_lock_irqsave(&md->pw_lock, flags); in rio_mport_add_pw_filter()
1570 if (list_empty(&md->portwrites)) in rio_mport_add_pw_filter()
1573 list_add_tail(&pw_filter->md_node, &md->portwrites); in rio_mport_add_pw_filter()
1574 spin_unlock_irqrestore(&md->pw_lock, flags); in rio_mport_add_pw_filter()
1579 ret = rio_add_mport_pw_handler(md->mport, md, in rio_mport_add_pw_filter()
1582 dev_err(&md->dev, in rio_mport_add_pw_filter()
1587 rio_pw_enable(md->mport, 1); in rio_mport_add_pw_filter()
1611 struct mport_dev *md = priv->md; in rio_mport_remove_pw_filter() local
1621 spin_lock_irqsave(&md->pw_lock, flags); in rio_mport_remove_pw_filter()
1630 if (list_empty(&md->portwrites)) in rio_mport_remove_pw_filter()
1632 spin_unlock_irqrestore(&md->pw_lock, flags); in rio_mport_remove_pw_filter()
1635 rio_del_mport_pw_handler(md->mport, priv->md, in rio_mport_remove_pw_filter()
1637 rio_pw_enable(md->mport, 0); in rio_mport_remove_pw_filter()
1681 struct mport_dev *md = priv->md; in rio_mport_add_riodev() local
1709 mport = md->mport; in rio_mport_add_riodev()
1835 mport = priv->md->mport; in rio_mport_del_riodev()
1908 priv->md = chdev; in mport_cdev_open()
1952 struct mport_dev *md; in mport_cdev_release_dma() local
1966 md = priv->md; in mport_cdev_release_dma()
1995 if (priv->dmach != priv->md->dma_chan) { in mport_cdev_release_dma()
2001 kref_put(&md->dma_ref, mport_release_def_dma); in mport_cdev_release_dma()
2022 rmcd_debug(EXIT, "%s filp=%p", dev_name(&priv->md->dev), filp); in mport_cdev_release()
2024 chdev = priv->md; in mport_cdev_release()
2074 struct mport_dev *md = data->md; in mport_cdev_ioctl() local
2076 if (atomic_read(&md->active) == 0) in mport_cdev_ioctl()
2095 md->properties.hdid = md->mport->host_deviceid; in mport_cdev_ioctl()
2096 if (copy_to_user((void __user *)arg, &(md->properties), in mport_cdev_ioctl()
2097 sizeof(md->properties))) in mport_cdev_ioctl()
2153 struct rio_mport *mport = map->md->mport; in mport_release_mapping()
2189 mutex_lock(&map->md->buf_mutex); in mport_mm_close()
2191 mutex_unlock(&map->md->buf_mutex); in mport_mm_close()
2202 struct mport_dev *md; in mport_cdev_mmap() local
2212 md = priv->md; in mport_cdev_mmap()
2215 mutex_lock(&md->buf_mutex); in mport_cdev_mmap()
2216 list_for_each_entry(map, &md->mappings, node) { in mport_cdev_mmap()
2223 mutex_unlock(&md->buf_mutex); in mport_cdev_mmap()
2237 ret = dma_mmap_coherent(md->mport->dev.parent, vma, in mport_cdev_mmap()
2306 struct rio_mport *mport = priv->md->mport; in mport_write()
2355 struct mport_dev *md; in mport_device_release() local
2358 md = container_of(dev, struct mport_dev, dev); in mport_device_release()
2359 kfree(md); in mport_device_release()
2369 struct mport_dev *md; in mport_cdev_add() local
2372 md = kzalloc(sizeof(*md), GFP_KERNEL); in mport_cdev_add()
2373 if (!md) { in mport_cdev_add()
2378 md->mport = mport; in mport_cdev_add()
2379 mutex_init(&md->buf_mutex); in mport_cdev_add()
2380 mutex_init(&md->file_mutex); in mport_cdev_add()
2381 INIT_LIST_HEAD(&md->file_list); in mport_cdev_add()
2383 device_initialize(&md->dev); in mport_cdev_add()
2384 md->dev.devt = MKDEV(MAJOR(dev_number), mport->id); in mport_cdev_add()
2385 md->dev.class = &dev_class; in mport_cdev_add()
2386 md->dev.parent = &mport->dev; in mport_cdev_add()
2387 md->dev.release = mport_device_release; in mport_cdev_add()
2388 dev_set_name(&md->dev, DEV_NAME "%d", mport->id); in mport_cdev_add()
2389 atomic_set(&md->active, 1); in mport_cdev_add()
2391 cdev_init(&md->cdev, &mport_fops); in mport_cdev_add()
2392 md->cdev.owner = THIS_MODULE; in mport_cdev_add()
2394 INIT_LIST_HEAD(&md->doorbells); in mport_cdev_add()
2395 spin_lock_init(&md->db_lock); in mport_cdev_add()
2396 INIT_LIST_HEAD(&md->portwrites); in mport_cdev_add()
2397 spin_lock_init(&md->pw_lock); in mport_cdev_add()
2398 INIT_LIST_HEAD(&md->mappings); in mport_cdev_add()
2400 md->properties.id = mport->id; in mport_cdev_add()
2401 md->properties.sys_size = mport->sys_size; in mport_cdev_add()
2402 md->properties.hdid = mport->host_deviceid; in mport_cdev_add()
2403 md->properties.index = mport->index; in mport_cdev_add()
2409 md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED; in mport_cdev_add()
2411 md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER; in mport_cdev_add()
2414 ret = cdev_device_add(&md->cdev, &md->dev); in mport_cdev_add()
2422 md->properties.flags = attr.flags; in mport_cdev_add()
2423 md->properties.link_speed = attr.link_speed; in mport_cdev_add()
2424 md->properties.link_width = attr.link_width; in mport_cdev_add()
2425 md->properties.dma_max_sge = attr.dma_max_sge; in mport_cdev_add()
2426 md->properties.dma_max_size = attr.dma_max_size; in mport_cdev_add()
2427 md->properties.dma_align = attr.dma_align; in mport_cdev_add()
2428 md->properties.cap_sys_size = 0; in mport_cdev_add()
2429 md->properties.cap_transfer_mode = 0; in mport_cdev_add()
2430 md->properties.cap_addr_size = 0; in mport_cdev_add()
2436 list_add_tail(&md->node, &mport_devs); in mport_cdev_add()
2442 return md; in mport_cdev_add()
2445 put_device(&md->dev); in mport_cdev_add()
2453 static void mport_cdev_terminate_dma(struct mport_dev *md) in mport_cdev_terminate_dma() argument
2458 rmcd_debug(DMA, "%s", dev_name(&md->dev)); in mport_cdev_terminate_dma()
2460 mutex_lock(&md->file_mutex); in mport_cdev_terminate_dma()
2461 list_for_each_entry(client, &md->file_list, list) { in mport_cdev_terminate_dma()
2467 mutex_unlock(&md->file_mutex); in mport_cdev_terminate_dma()
2469 if (md->dma_chan) { in mport_cdev_terminate_dma()
2470 dmaengine_terminate_all(md->dma_chan); in mport_cdev_terminate_dma()
2471 rio_release_dma(md->dma_chan); in mport_cdev_terminate_dma()
2472 md->dma_chan = NULL; in mport_cdev_terminate_dma()
2482 static int mport_cdev_kill_fasync(struct mport_dev *md) in mport_cdev_kill_fasync() argument
2487 mutex_lock(&md->file_mutex); in mport_cdev_kill_fasync()
2488 list_for_each_entry(client, &md->file_list, list) { in mport_cdev_kill_fasync()
2493 mutex_unlock(&md->file_mutex); in mport_cdev_kill_fasync()
2501 static void mport_cdev_remove(struct mport_dev *md) in mport_cdev_remove() argument
2505 rmcd_debug(EXIT, "Remove %s cdev", md->mport->name); in mport_cdev_remove()
2506 atomic_set(&md->active, 0); in mport_cdev_remove()
2507 mport_cdev_terminate_dma(md); in mport_cdev_remove()
2508 rio_del_mport_pw_handler(md->mport, md, rio_mport_pw_handler); in mport_cdev_remove()
2509 cdev_device_del(&md->cdev, &md->dev); in mport_cdev_remove()
2510 mport_cdev_kill_fasync(md); in mport_cdev_remove()
2520 mutex_lock(&md->buf_mutex); in mport_cdev_remove()
2521 list_for_each_entry_safe(map, _map, &md->mappings, node) { in mport_cdev_remove()
2524 mutex_unlock(&md->buf_mutex); in mport_cdev_remove()
2526 if (!list_empty(&md->mappings)) in mport_cdev_remove()
2528 md->mport->name); in mport_cdev_remove()
2530 rio_release_inb_dbell(md->mport, 0, 0x0fff); in mport_cdev_remove()
2532 put_device(&md->dev); in mport_cdev_remove()