Lines Matching full:dev

234 void buffer_mem_free(struct device *dev, struct buffer *buf);
262 device_get_buffer(struct device *dev, uint64_t iova) in device_get_buffer() argument
266 return (struct buffer *)rb_tree_search(&dev->buffers, &iova, in device_get_buffer()
271 device_mark_buffers(struct device *dev) in device_mark_buffers() argument
273 rb_tree_foreach_safe (struct buffer, buf, &dev->buffers, node) { in device_mark_buffers()
279 device_free_buffers(struct device *dev) in device_free_buffers() argument
281 rb_tree_foreach_safe (struct buffer, buf, &dev->buffers, node) { in device_free_buffers()
282 buffer_mem_free(dev, buf); in device_free_buffers()
283 rb_tree_remove(&dev->buffers, &buf->node); in device_free_buffers()
289 device_print_shader_log(struct device *dev) in device_print_shader_log() argument
299 if (dev->shader_log_iova != 0) in device_print_shader_log()
301 struct buffer *buf = device_get_buffer(dev, dev->shader_log_iova); in device_print_shader_log()
303 struct shader_log *log = buf->map + (dev->shader_log_iova - buf->iova); in device_print_shader_log()
304 uint32_t count = (log->cur_iova - dev->shader_log_iova - in device_print_shader_log()
320 device_print_cp_log(struct device *dev) in device_print_cp_log() argument
333 if (dev->cp_log_iova == 0) in device_print_cp_log()
336 struct buffer *buf = device_get_buffer(dev, dev->cp_log_iova); in device_print_cp_log()
340 struct cp_log *log = buf->map + (dev->cp_log_iova - buf->iova); in device_print_cp_log()
366 device_dump_wrbuf(struct device *dev) in device_dump_wrbuf() argument
368 if (!u_vector_length(&dev->wrbufs)) in device_dump_wrbuf()
378 u_vector_foreach(wrbuf, &dev->wrbufs) { in device_dump_wrbuf()
387 struct buffer *buf = device_get_buffer(dev, wrbuf->iova); in device_dump_wrbuf()
421 struct device *dev = calloc(sizeof(struct device), 1); in device_create() local
423 dev->fd = drmOpenWithType("msm", NULL, DRM_NODE_RENDER); in device_create()
424 if (dev->fd < 0) { in device_create()
435 int ret = drmCommandWriteRead(dev->fd, DRM_MSM_GET_PARAM, &req, sizeof(req)); in device_create()
440 ret = drmCommandWriteRead(dev->fd, DRM_MSM_GET_PARAM, &req, sizeof(req)); in device_create()
443 dev->has_set_iova = true; in device_create()
450 drmCommandWriteRead(dev->fd, DRM_MSM_GEM_NEW, &req_new, sizeof(req_new)); in device_create()
451 dev->va_id = req_new.handle; in device_create()
458 drmCommandWriteRead(dev->fd, in device_create()
460 dev->va_iova = req_info.value; in device_create()
467 drmCommandWriteRead(dev->fd, DRM_MSM_GEM_INFO, &req_offset, sizeof(req_offset)); in device_create()
469 dev->va_map = mmap(0, FAKE_ADDRESS_SPACE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, in device_create()
470 dev->fd, req_offset.value); in device_create()
471 if (dev->va_map == MAP_FAILED) { in device_create()
475 va_start = dev->va_iova; in device_create()
478 printf("Allocated iova %" PRIx64 "\n", dev->va_iova); in device_create()
486 ret = drmCommandWriteRead(dev->fd, DRM_MSM_SUBMITQUEUE_NEW, &req_queue, in device_create()
492 dev->queue_id = req_queue.id; in device_create()
494 rb_tree_init(&dev->buffers); in device_create()
495 util_vma_heap_init(&dev->vma, va_start, ROUND_DOWN_TO(va_size, 4096)); in device_create()
496 u_vector_init(&dev->cmdstreams, 8, sizeof(struct cmdstream)); in device_create()
497 u_vector_init(&dev->wrbufs, 8, sizeof(struct wrbuf)); in device_create()
499 return dev; in device_create()
503 device_submit_cmdstreams(struct device *dev) in device_submit_cmdstreams() argument
505 if (!u_vector_length(&dev->cmdstreams)) { in device_submit_cmdstreams()
506 device_free_buffers(dev); in device_submit_cmdstreams()
510 struct drm_msm_gem_submit_cmd cmds[u_vector_length(&dev->cmdstreams)]; in device_submit_cmdstreams()
514 u_vector_foreach(cmd, &dev->cmdstreams) { in device_submit_cmdstreams()
515 struct buffer *cmdstream_buf = device_get_buffer(dev, cmd->iova); in device_submit_cmdstreams()
518 rb_tree_foreach (struct buffer, buf, &dev->buffers, node) { in device_submit_cmdstreams()
530 submit_cmd->submit_idx = dev->has_set_iova ? bo_idx : 0; in device_submit_cmdstreams()
531 if (dev->has_set_iova) { in device_submit_cmdstreams()
534 submit_cmd->submit_offset = cmd->iova - dev->va_iova; in device_submit_cmdstreams()
545 rb_tree_foreach (struct buffer, buf, &dev->buffers, node) { in device_submit_cmdstreams()
550 if (!dev->has_set_iova) { in device_submit_cmdstreams()
557 if (dev->has_set_iova) { in device_submit_cmdstreams()
559 rb_tree_foreach (struct buffer, buf, &dev->buffers, node) { in device_submit_cmdstreams()
569 bo_list[0].handle = dev->va_id; in device_submit_cmdstreams()
572 bo_list[0].presumed = dev->va_iova; in device_submit_cmdstreams()
577 .queueid = dev->queue_id, in device_submit_cmdstreams()
581 .nr_cmds = u_vector_length(&dev->cmdstreams), in device_submit_cmdstreams()
589 int ret = drmCommandWriteRead(dev->fd, DRM_MSM_GEM_SUBMIT, &submit_req, in device_submit_cmdstreams()
602 .queueid = dev->queue_id, in device_submit_cmdstreams()
607 drmCommandWrite(dev->fd, DRM_MSM_WAIT_FENCE, &wait_req, sizeof(wait_req)); in device_submit_cmdstreams()
612 u_vector_finish(&dev->cmdstreams); in device_submit_cmdstreams()
613 u_vector_init(&dev->cmdstreams, 8, sizeof(struct cmdstream)); in device_submit_cmdstreams()
615 device_print_shader_log(dev); in device_submit_cmdstreams()
616 device_print_cp_log(dev); in device_submit_cmdstreams()
618 device_dump_wrbuf(dev); in device_submit_cmdstreams()
619 u_vector_finish(&dev->wrbufs); in device_submit_cmdstreams()
620 u_vector_init(&dev->wrbufs, 8, sizeof(struct wrbuf)); in device_submit_cmdstreams()
622 device_free_buffers(dev); in device_submit_cmdstreams()
626 buffer_mem_alloc(struct device *dev, struct buffer *buf) in buffer_mem_alloc() argument
628 bool success = util_vma_heap_alloc_addr(&dev->vma, buf->iova, buf->size); in buffer_mem_alloc()
632 if (!dev->has_set_iova) { in buffer_mem_alloc()
633 uint64_t offset = buf->iova - dev->va_iova; in buffer_mem_alloc()
635 buf->map = ((uint8_t*)dev->va_map) + offset; in buffer_mem_alloc()
643 drmCommandWriteRead(dev->fd, DRM_MSM_GEM_NEW, &req, sizeof(req)); in buffer_mem_alloc()
659 drmCommandWriteRead(dev->fd, DRM_MSM_GEM_INFO, &req, sizeof(req)); in buffer_mem_alloc()
673 drmCommandWriteRead(dev->fd, DRM_MSM_GEM_INFO, &req, sizeof(req)); in buffer_mem_alloc()
679 dev->fd, req.value); in buffer_mem_alloc()
689 buffer_mem_free(struct device *dev, struct buffer *buf) in buffer_mem_free() argument
691 if (dev->has_set_iova) { in buffer_mem_free()
700 int ret = drmCommandWriteRead(dev->fd, DRM_MSM_GEM_INFO, &req_iova, in buffer_mem_free()
710 drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req); in buffer_mem_free()
713 util_vma_heap_free(&dev->vma, buf->iova, buf->size); in buffer_mem_free()
732 struct device *dev = calloc(sizeof(struct device), 1); in device_create() local
734 static const char path[] = "/dev/kgsl-3d0"; in device_create()
736 dev->fd = open(path, O_RDWR | O_CLOEXEC); in device_create()
737 if (dev->fd < 0) { in device_create()
746 int ret = safe_ioctl(dev->fd, IOCTL_KGSL_GPUMEM_ALLOC_ID, &req); in device_create()
751 dev->va_id = req.id; in device_create()
752 dev->va_iova = req.gpuaddr; in device_create()
753 dev->va_map = mmap(0, FAKE_ADDRESS_SPACE_SIZE, PROT_READ | PROT_WRITE, in device_create()
754 MAP_SHARED, dev->fd, req.id << 12); in device_create()
756 rb_tree_init(&dev->buffers); in device_create()
757 util_vma_heap_init(&dev->vma, req.gpuaddr, ROUND_DOWN_TO(FAKE_ADDRESS_SPACE_SIZE, 4096)); in device_create()
758 u_vector_init(&dev->cmdstreams, 8, sizeof(struct cmdstream)); in device_create()
759 u_vector_init(&dev->wrbufs, 8, sizeof(struct wrbuf)); in device_create()
767 ret = safe_ioctl(dev->fd, IOCTL_KGSL_DRAWCTXT_CREATE, &drawctxt_req); in device_create()
772 printf("Allocated iova %" PRIx64 "\n", dev->va_iova); in device_create()
774 dev->context_id = drawctxt_req.drawctxt_id; in device_create()
776 return dev; in device_create()
780 device_submit_cmdstreams(struct device *dev) in device_submit_cmdstreams() argument
782 if (!u_vector_length(&dev->cmdstreams)) { in device_submit_cmdstreams()
783 device_free_buffers(dev); in device_submit_cmdstreams()
787 struct kgsl_command_object cmds[u_vector_length(&dev->cmdstreams)]; in device_submit_cmdstreams()
791 u_vector_foreach(cmd, &dev->cmdstreams) { in device_submit_cmdstreams()
796 submit_cmd->id = dev->va_id; in device_submit_cmdstreams()
803 .numcmds = u_vector_length(&dev->cmdstreams), in device_submit_cmdstreams()
805 .context_id = dev->context_id, in device_submit_cmdstreams()
808 int ret = safe_ioctl(dev->fd, IOCTL_KGSL_GPU_COMMAND, &submit_req); in device_submit_cmdstreams()
815 .context_id = dev->context_id, in device_submit_cmdstreams()
820 ret = safe_ioctl(dev->fd, IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID, &wait); in device_submit_cmdstreams()
826 u_vector_finish(&dev->cmdstreams); in device_submit_cmdstreams()
827 u_vector_init(&dev->cmdstreams, 8, sizeof(struct cmdstream)); in device_submit_cmdstreams()
829 device_print_shader_log(dev); in device_submit_cmdstreams()
830 device_print_cp_log(dev); in device_submit_cmdstreams()
832 device_dump_wrbuf(dev); in device_submit_cmdstreams()
833 u_vector_finish(&dev->wrbufs); in device_submit_cmdstreams()
834 u_vector_init(&dev->wrbufs, 8, sizeof(struct wrbuf)); in device_submit_cmdstreams()
836 device_free_buffers(dev); in device_submit_cmdstreams()
840 buffer_mem_alloc(struct device *dev, struct buffer *buf) in buffer_mem_alloc() argument
842 bool success = util_vma_heap_alloc_addr(&dev->vma, buf->iova, buf->size); in buffer_mem_alloc()
846 buf->map = ((uint8_t*)dev->va_map) + (buf->iova - dev->va_iova); in buffer_mem_alloc()
850 buffer_mem_free(struct device *dev, struct buffer *buf) in buffer_mem_free() argument
852 util_vma_heap_free(&dev->vma, buf->iova, buf->size); in buffer_mem_free()
941 struct device *dev = calloc(sizeof(struct device), 1); in device_create() local
943 static const char path[] = "/dev/dxg"; in device_create()
945 dev->fd = open(path, O_RDWR | O_CLOEXEC); in device_create()
946 if (dev->fd < 0) { in device_create()
947 errx(1, "Cannot open /dev/dxg fd"); in device_create()
955 int ret = safe_ioctl(dev->fd, LX_DXENUMADAPTERS3, &enum_adapters); in device_create()
969 ret = safe_ioctl(dev->fd, LX_DXOPENADAPTERFROMLUID, &open_adapter); in device_create()
979 ret = safe_ioctl(dev->fd, LX_DXCREATEDEVICE, &create_device); in device_create()
985 dev->device = device; in device_create()
1004 ret = safe_ioctl(dev->fd, LX_DXCREATECONTEXTVIRTUAL, &create_context); in device_create()
1009 dev->context = create_context.context; in device_create()
1016 ret = safe_ioctl(dev->fd, LX_DXCREATEPAGINGQUEUE, &create_paging_queue); in device_create()
1051 ret = safe_ioctl(dev->fd, LX_DXCREATEALLOCATION, &create_allocation); in device_create()
1069 ret = safe_ioctl(dev->fd, LX_DXMAPGPUVIRTUALADDRESS, &map_virtual_address); in device_create()
1081 ret = safe_ioctl(dev->fd, LX_DXMAKERESIDENT, &make_resident); in device_create()
1090 ret = safe_ioctl(dev->fd, LX_DXLOCK2, &lock); in device_create()
1095 dev->va_iova = map_virtual_address.virtual_address; in device_create()
1096 dev->va_map = lock.data; in device_create()
1098 rb_tree_init(&dev->buffers); in device_create()
1099 util_vma_heap_init(&dev->vma, dev->va_iova, ROUND_DOWN_TO(alloc_size, 4096)); in device_create()
1100 u_vector_init(&dev->cmdstreams, 8, sizeof(struct cmdstream)); in device_create()
1101 u_vector_init(&dev->wrbufs, 8, sizeof(struct wrbuf)); in device_create()
1103 printf("Allocated iova at 0x%" PRIx64 "\n", dev->va_iova); in device_create()
1106 dev->vma.alloc_high = true; in device_create()
1107 dev->fence_iova = util_vma_heap_alloc(&dev->vma, hole_size, 4096); in device_create()
1108 dev->fence_ib_iova = dev->fence_iova + 8; in device_create()
1109 dev->fence = (uint32_t *) ((uint8_t*)dev->va_map + (dev->fence_iova - dev->va_iova)); in device_create()
1110 dev->fence_ib = (uint32_t *) ((uint8_t*)dev->va_map + (dev->fence_ib_iova - dev->va_iova)); in device_create()
1111 dev->vma.alloc_high = false; in device_create()
1113 return dev; in device_create()
1117 device_submit_cmdstreams(struct device *dev) in device_submit_cmdstreams() argument
1119 if (!u_vector_length(&dev->cmdstreams)) { in device_submit_cmdstreams()
1120 device_free_buffers(dev); in device_submit_cmdstreams()
1124 uint32_t cmdstream_count = u_vector_length(&dev->cmdstreams) + 1; in device_submit_cmdstreams()
1146 u_vector_foreach(cmd, &dev->cmdstreams) { in device_submit_cmdstreams()
1153 priv_data->data0.cmdbuf.ibs[idx].iova = dev->fence_ib_iova; in device_submit_cmdstreams()
1155 *dev->fence = 0x00000000; in device_submit_cmdstreams()
1156 dev->fence_ib[0] = pm4_pkt7_hdr(0x3d, 3); // CP_MEM_WRITE in device_submit_cmdstreams()
1157 dev->fence_ib[1] = dev->fence_iova; in device_submit_cmdstreams()
1158 dev->fence_ib[2] = dev->fence_iova >> 32; in device_submit_cmdstreams()
1159 dev->fence_ib[3] = 0xababfcfc; in device_submit_cmdstreams()
1170 .broadcast_context[0] = dev->context, in device_submit_cmdstreams()
1175 int ret = safe_ioctl(dev->fd, LX_DXSUBMITCOMMAND, &submission); in device_submit_cmdstreams()
1182 u_vector_finish(&dev->cmdstreams); in device_submit_cmdstreams()
1183 u_vector_init(&dev->cmdstreams, 8, sizeof(struct cmdstream)); in device_submit_cmdstreams()
1188 if (*dev->fence != 0) in device_submit_cmdstreams()
1191 if (*dev->fence == 0) { in device_submit_cmdstreams()
1195 device_print_shader_log(dev); in device_submit_cmdstreams()
1196 device_print_cp_log(dev); in device_submit_cmdstreams()
1198 device_dump_wrbuf(dev); in device_submit_cmdstreams()
1199 u_vector_finish(&dev->wrbufs); in device_submit_cmdstreams()
1200 u_vector_init(&dev->wrbufs, 8, sizeof(struct wrbuf)); in device_submit_cmdstreams()
1202 device_free_buffers(dev); in device_submit_cmdstreams()
1206 buffer_mem_alloc(struct device *dev, struct buffer *buf) in buffer_mem_alloc() argument
1208 bool success = util_vma_heap_alloc_addr(&dev->vma, buf->iova, buf->size); in buffer_mem_alloc()
1212 buf->map = ((uint8_t*)dev->va_map) + (buf->iova - dev->va_iova); in buffer_mem_alloc()
1216 buffer_mem_free(struct device *dev, struct buffer *buf) in buffer_mem_free() argument
1218 util_vma_heap_free(&dev->vma, buf->iova, buf->size); in buffer_mem_free()
1224 upload_buffer(struct device *dev, uint64_t iova, unsigned int size, in upload_buffer() argument
1227 struct buffer *buf = device_get_buffer(dev, iova); in upload_buffer()
1234 rb_tree_insert(&dev->buffers, &buf->node, rb_buffer_insert_cmp); in upload_buffer()
1236 buffer_mem_alloc(dev, buf); in upload_buffer()
1238 buffer_mem_free(dev, buf); in upload_buffer()
1240 buffer_mem_alloc(dev, buf); in upload_buffer()
1249 override_cmdstream(struct device *dev, struct cmdstream *cs, in override_cmdstream() argument
1262 uint64_t hole_size = util_vma_heap_get_max_free_continuous_size(&dev->vma); in override_cmdstream()
1263 uint64_t hole_iova = util_vma_heap_alloc(&dev->vma, hole_size, 1); in override_cmdstream()
1264 util_vma_heap_free(&dev->vma, hole_iova, hole_size); in override_cmdstream()
1300 upload_buffer(dev, gpuaddr.gpuaddr, gpuaddr.len, ps.buf); in override_cmdstream()
1315 parse_addr(ps.buf, ps.sz, &sizedwords, &dev->shader_log_iova); in override_cmdstream()
1320 parse_addr(ps.buf, ps.sz, &sizedwords, &dev->cp_log_iova); in override_cmdstream()
1324 struct wrbuf *wrbuf = u_vector_add(&dev->wrbufs); in override_cmdstream()
1334 struct buffer *buf = device_get_buffer(dev, wrbuf->iova); in override_cmdstream()
1384 struct device *dev = device_create(base_addr); in handle_file() local
1413 device_submit_cmdstreams(dev); in handle_file()
1426 upload_buffer(dev, gpuaddr.gpuaddr, gpuaddr.len, ps.buf); in handle_file()
1438 struct cmdstream *cs = u_vector_add(&dev->cmdstreams); in handle_file()
1441 if (override_cmdstream(dev, cs, cmdstreamgen) < 0) in handle_file()
1471 device_submit_cmdstreams(dev); in handle_file()
1473 close(dev->fd); in handle_file()