Lines Matching +full:non +full:- +full:urgent

1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
4 * Copyright(c) 2015-2020 Intel Corporation.
149 struct hfi1_devdata *dd = container_of(inode->i_cdev, in hfi1_file_open()
153 if (!((dd->flags & HFI1_PRESENT) && dd->kregbase1)) in hfi1_file_open()
154 return -EINVAL; in hfi1_file_open()
156 if (!refcount_inc_not_zero(&dd->user_refcount)) in hfi1_file_open()
157 return -ENXIO; in hfi1_file_open()
163 if (!fd || init_srcu_struct(&fd->pq_srcu)) in hfi1_file_open()
165 spin_lock_init(&fd->pq_rcu_lock); in hfi1_file_open()
166 spin_lock_init(&fd->tid_lock); in hfi1_file_open()
167 spin_lock_init(&fd->invalid_lock); in hfi1_file_open()
168 fd->rec_cpu_num = -1; /* no cpu affinity by default */ in hfi1_file_open()
169 fd->dd = dd; in hfi1_file_open()
170 fp->private_data = fd; in hfi1_file_open()
174 fp->private_data = NULL; in hfi1_file_open()
175 if (refcount_dec_and_test(&dd->user_refcount)) in hfi1_file_open()
176 complete(&dd->user_comp); in hfi1_file_open()
177 return -ENOMEM; in hfi1_file_open()
183 struct hfi1_filedata *fd = fp->private_data; in hfi1_file_ioctl()
184 struct hfi1_ctxtdata *uctxt = fd->uctxt; in hfi1_file_ioctl()
192 return -EINVAL; in hfi1_file_ioctl()
209 sc_return_credits(uctxt->sc); in hfi1_file_ioctl()
225 ret = manage_rcvq(uctxt, fd->subctxt, arg); in hfi1_file_ioctl()
230 return -EFAULT; in hfi1_file_ioctl()
231 uctxt->poll_type = (typeof(uctxt->poll_type))uval; in hfi1_file_ioctl()
235 ret = user_event_ack(uctxt, fd->subctxt, arg); in hfi1_file_ioctl()
249 return -EFAULT; in hfi1_file_ioctl()
253 return -EINVAL; in hfi1_file_ioctl()
261 struct hfi1_filedata *fd = kiocb->ki_filp->private_data; in hfi1_write_iter()
263 struct hfi1_user_sdma_comp_q *cq = fd->cq; in hfi1_write_iter()
265 unsigned long dim = from->nr_segs; in hfi1_write_iter()
269 return -EINVAL; in hfi1_write_iter()
271 return -EINVAL; in hfi1_write_iter()
272 idx = srcu_read_lock(&fd->pq_srcu); in hfi1_write_iter()
273 pq = srcu_dereference(fd->pq, &fd->pq_srcu); in hfi1_write_iter()
275 srcu_read_unlock(&fd->pq_srcu, idx); in hfi1_write_iter()
276 return -EIO; in hfi1_write_iter()
279 trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim); in hfi1_write_iter()
281 if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) { in hfi1_write_iter()
282 srcu_read_unlock(&fd->pq_srcu, idx); in hfi1_write_iter()
283 return -ENOSPC; in hfi1_write_iter()
298 dim -= count; in hfi1_write_iter()
303 srcu_read_unlock(&fd->pq_srcu, idx); in hfi1_write_iter()
315 vma->vm_end - vma->vm_start, vma->vm_flags); in mmap_cdbg()
320 struct hfi1_filedata *fd = fp->private_data; in hfi1_file_mmap()
321 struct hfi1_ctxtdata *uctxt = fd->uctxt; in hfi1_file_mmap()
324 u64 token = vma->vm_pgoff << PAGE_SHIFT, in hfi1_file_mmap()
334 !(vma->vm_flags & VM_SHARED)) { in hfi1_file_mmap()
335 ret = -EINVAL; in hfi1_file_mmap()
338 dd = uctxt->dd; in hfi1_file_mmap()
342 if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) { in hfi1_file_mmap()
343 ret = -EINVAL; in hfi1_file_mmap()
351 vma->vm_pgoff = 0; in hfi1_file_mmap()
352 flags = vma->vm_flags; in hfi1_file_mmap()
357 memaddr = ((dd->physaddr + TXE_PIO_SEND) + in hfi1_file_mmap()
359 (uctxt->sc->hw_context * BIT(16))) + in hfi1_file_mmap()
367 memlen = PAGE_ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE); in hfi1_file_mmap()
370 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in hfi1_file_mmap()
376 ret = -EPERM; in hfi1_file_mmap()
384 cr_page_offset = ((u64)uctxt->sc->hw_free - in hfi1_file_mmap()
385 (u64)dd->cr_base[uctxt->numa_id].va) & in hfi1_file_mmap()
387 memvirt = dd->cr_base[uctxt->numa_id].va + cr_page_offset; in hfi1_file_mmap()
388 memdma = dd->cr_base[uctxt->numa_id].dma + cr_page_offset; in hfi1_file_mmap()
395 * memory been flagged as non-cached? in hfi1_file_mmap()
397 /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */ in hfi1_file_mmap()
402 memvirt = uctxt->rcvhdrq; in hfi1_file_mmap()
403 memdma = uctxt->rcvhdrq_dma; in hfi1_file_mmap()
411 * as multiple non-contiguous pages need to be mapped in hfi1_file_mmap()
414 memlen = uctxt->egrbufs.size; in hfi1_file_mmap()
415 if ((vma->vm_end - vma->vm_start) != memlen) { in hfi1_file_mmap()
417 (vma->vm_end - vma->vm_start), memlen); in hfi1_file_mmap()
418 ret = -EINVAL; in hfi1_file_mmap()
421 if (vma->vm_flags & VM_WRITE) { in hfi1_file_mmap()
422 ret = -EPERM; in hfi1_file_mmap()
433 vm_start_save = vma->vm_start; in hfi1_file_mmap()
434 vm_end_save = vma->vm_end; in hfi1_file_mmap()
435 vma->vm_end = vma->vm_start; in hfi1_file_mmap()
436 for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) { in hfi1_file_mmap()
437 memlen = uctxt->egrbufs.buffers[i].len; in hfi1_file_mmap()
438 memvirt = uctxt->egrbufs.buffers[i].addr; in hfi1_file_mmap()
439 memdma = uctxt->egrbufs.buffers[i].dma; in hfi1_file_mmap()
440 vma->vm_end += memlen; in hfi1_file_mmap()
443 ret = dma_mmap_coherent(&dd->pcidev->dev, vma, in hfi1_file_mmap()
446 vma->vm_start = vm_start_save; in hfi1_file_mmap()
447 vma->vm_end = vm_end_save; in hfi1_file_mmap()
450 vma->vm_start += memlen; in hfi1_file_mmap()
452 vma->vm_start = vm_start_save; in hfi1_file_mmap()
453 vma->vm_end = vm_end_save; in hfi1_file_mmap()
463 (dd->physaddr + RXE_PER_CONTEXT_USER) in hfi1_file_mmap()
464 + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE); in hfi1_file_mmap()
471 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in hfi1_file_mmap()
480 (dd->events + uctxt_offset(uctxt)) & PAGE_MASK; in hfi1_file_mmap()
491 ret = -EPERM; in hfi1_file_mmap()
494 memaddr = kvirt_to_phys((void *)dd->status); in hfi1_file_mmap()
504 ret = -EINVAL; in hfi1_file_mmap()
508 ret = -EPERM; in hfi1_file_mmap()
513 memdma = uctxt->rcvhdrqtailaddr_dma; in hfi1_file_mmap()
517 memaddr = (u64)uctxt->subctxt_uregbase; in hfi1_file_mmap()
523 memaddr = (u64)uctxt->subctxt_rcvhdr_base; in hfi1_file_mmap()
524 memlen = rcvhdrq_size(uctxt) * uctxt->subctxt_cnt; in hfi1_file_mmap()
529 memaddr = (u64)uctxt->subctxt_rcvegrbuf; in hfi1_file_mmap()
530 memlen = uctxt->egrbufs.size * uctxt->subctxt_cnt; in hfi1_file_mmap()
536 struct hfi1_user_sdma_comp_q *cq = fd->cq; in hfi1_file_mmap()
539 ret = -EFAULT; in hfi1_file_mmap()
542 memaddr = (u64)cq->comps; in hfi1_file_mmap()
543 memlen = PAGE_ALIGN(sizeof(*cq->comps) * cq->nentries); in hfi1_file_mmap()
549 ret = -EINVAL; in hfi1_file_mmap()
553 if ((vma->vm_end - vma->vm_start) != memlen) { in hfi1_file_mmap()
555 uctxt->ctxt, fd->subctxt, in hfi1_file_mmap()
556 (vma->vm_end - vma->vm_start), memlen); in hfi1_file_mmap()
557 ret = -EINVAL; in hfi1_file_mmap()
565 vma->vm_pgoff = PFN_DOWN(memaddr); in hfi1_file_mmap()
566 vma->vm_ops = &vm_ops; in hfi1_file_mmap()
569 ret = dma_mmap_coherent(&dd->pcidev->dev, vma, in hfi1_file_mmap()
572 ret = io_remap_pfn_range(vma, vma->vm_start, in hfi1_file_mmap()
575 vma->vm_page_prot); in hfi1_file_mmap()
577 ret = remap_pfn_range(vma, vma->vm_start, in hfi1_file_mmap()
580 vma->vm_page_prot); in hfi1_file_mmap()
582 ret = remap_pfn_range(vma, vma->vm_start, in hfi1_file_mmap()
585 vma->vm_page_prot); in hfi1_file_mmap()
592 * Local (non-chip) user memory is not mapped right away but as it is
593 * accessed by the user-level code.
599 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT)); in vma_fault()
604 vmf->page = page; in vma_fault()
614 uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt; in hfi1_poll()
617 else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT) in hfi1_poll()
619 else if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV) in hfi1_poll()
629 struct hfi1_filedata *fdata = fp->private_data; in hfi1_file_close()
630 struct hfi1_ctxtdata *uctxt = fdata->uctxt; in hfi1_file_close()
631 struct hfi1_devdata *dd = container_of(inode->i_cdev, in hfi1_file_close()
636 fp->private_data = NULL; in hfi1_file_close()
641 hfi1_cdbg(PROC, "closing ctxt %u:%u", uctxt->ctxt, fdata->subctxt); in hfi1_file_close()
648 hfi1_put_proc_affinity(fdata->rec_cpu_num); in hfi1_file_close()
654 * fdata->uctxt is used in the above cleanup. It is not ready to be in hfi1_file_close()
657 fdata->uctxt = NULL; in hfi1_file_close()
664 ev = dd->events + uctxt_offset(uctxt) + fdata->subctxt; in hfi1_file_close()
667 spin_lock_irqsave(&dd->uctxt_lock, flags); in hfi1_file_close()
668 __clear_bit(fdata->subctxt, uctxt->in_use_ctxts); in hfi1_file_close()
669 if (!bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) { in hfi1_file_close()
670 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in hfi1_file_close()
673 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in hfi1_file_close()
693 if (uctxt->sc) { in hfi1_file_close()
694 sc_disable(uctxt->sc); in hfi1_file_close()
695 set_pio_integrity(uctxt->sc); in hfi1_file_close()
701 uctxt->event_flags = 0; in hfi1_file_close()
706 if (refcount_dec_and_test(&dd->user_refcount)) in hfi1_file_close()
707 complete(&dd->user_comp); in hfi1_file_close()
709 cleanup_srcu_struct(&fdata->pq_srcu); in hfi1_file_close()
731 * complete_subctxt - complete sub-context info
734 * Sub-context info can only be set up after the base context
748 * sub-context info can only be set up after the base context in complete_subctxt()
752 fd->uctxt->wait, in complete_subctxt()
753 !test_bit(HFI1_CTXT_BASE_UNINIT, &fd->uctxt->event_flags)); in complete_subctxt()
755 if (test_bit(HFI1_CTXT_BASE_FAILED, &fd->uctxt->event_flags)) in complete_subctxt()
756 ret = -ENOMEM; in complete_subctxt()
758 /* Finish the sub-context init */ in complete_subctxt()
760 fd->rec_cpu_num = hfi1_get_proc_affinity(fd->uctxt->numa_id); in complete_subctxt()
761 ret = init_user_ctxt(fd, fd->uctxt); in complete_subctxt()
765 spin_lock_irqsave(&fd->dd->uctxt_lock, flags); in complete_subctxt()
766 __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts); in complete_subctxt()
767 spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags); in complete_subctxt()
768 hfi1_rcd_put(fd->uctxt); in complete_subctxt()
769 fd->uctxt = NULL; in complete_subctxt()
782 if (fd->uctxt) in assign_ctxt()
783 return -EINVAL; in assign_ctxt()
786 return -EINVAL; in assign_ctxt()
789 return -EFAULT; in assign_ctxt()
793 return -ENODEV; in assign_ctxt()
796 return -EINVAL; in assign_ctxt()
804 * Get a sub context if available (fd->uctxt will be set). in assign_ctxt()
805 * ret < 0 error, 0 no context, 1 sub-context found in assign_ctxt()
814 ret = allocate_ctxt(fd, fd->dd, &uinfo, &uctxt); in assign_ctxt()
836 * match_ctxt - match context
848 struct hfi1_devdata *dd = fd->dd; in match_ctxt()
853 if (uctxt->sc && (uctxt->sc->type == SC_KERNEL)) in match_ctxt()
857 if (memcmp(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid)) || in match_ctxt()
858 uctxt->jkey != generate_jkey(current_uid()) || in match_ctxt()
859 uctxt->subctxt_id != uinfo->subctxt_id || in match_ctxt()
860 uctxt->subctxt_cnt != uinfo->subctxt_cnt) in match_ctxt()
864 if (uctxt->userversion != uinfo->userversion) in match_ctxt()
865 return -EINVAL; in match_ctxt()
868 spin_lock_irqsave(&dd->uctxt_lock, flags); in match_ctxt()
869 if (bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) { in match_ctxt()
871 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in match_ctxt()
875 subctxt = find_first_zero_bit(uctxt->in_use_ctxts, in match_ctxt()
877 if (subctxt >= uctxt->subctxt_cnt) { in match_ctxt()
878 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in match_ctxt()
879 return -EBUSY; in match_ctxt()
882 fd->subctxt = subctxt; in match_ctxt()
883 __set_bit(fd->subctxt, uctxt->in_use_ctxts); in match_ctxt()
884 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in match_ctxt()
886 fd->uctxt = uctxt; in match_ctxt()
893 * find_sub_ctxt - fund sub-context
901 * 0 No sub-context found
910 struct hfi1_devdata *dd = fd->dd; in find_sub_ctxt()
914 if (!uinfo->subctxt_cnt) in find_sub_ctxt()
917 for (i = dd->first_dyn_alloc_ctxt; i < dd->num_rcv_contexts; i++) { in find_sub_ctxt()
938 if (dd->flags & HFI1_FROZEN) { in allocate_ctxt()
946 return -EIO; in allocate_ctxt()
949 if (!dd->freectxts) in allocate_ctxt()
950 return -EBUSY; in allocate_ctxt()
956 fd->rec_cpu_num = hfi1_get_proc_affinity(dd->node); in allocate_ctxt()
957 if (fd->rec_cpu_num != -1) in allocate_ctxt()
958 numa = cpu_to_node(fd->rec_cpu_num); in allocate_ctxt()
961 ret = hfi1_create_ctxtdata(dd->pport, numa, &uctxt); in allocate_ctxt()
967 uctxt->ctxt, fd->subctxt, current->pid, fd->rec_cpu_num, in allocate_ctxt()
968 uctxt->numa_id); in allocate_ctxt()
973 uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize, dd->node); in allocate_ctxt()
974 if (!uctxt->sc) { in allocate_ctxt()
975 ret = -ENOMEM; in allocate_ctxt()
978 hfi1_cdbg(PROC, "allocated send context %u(%u)", uctxt->sc->sw_index, in allocate_ctxt()
979 uctxt->sc->hw_context); in allocate_ctxt()
980 ret = sc_enable(uctxt->sc); in allocate_ctxt()
985 * Setup sub context information if the user-level has requested in allocate_ctxt()
987 * This has to be done here so the rest of the sub-contexts find the in allocate_ctxt()
993 __set_bit(0, uctxt->in_use_ctxts); in allocate_ctxt()
994 if (uinfo->subctxt_cnt) in allocate_ctxt()
996 uctxt->userversion = uinfo->userversion; in allocate_ctxt()
997 uctxt->flags = hfi1_cap_mask; /* save current flag state */ in allocate_ctxt()
998 init_waitqueue_head(&uctxt->wait); in allocate_ctxt()
999 strscpy(uctxt->comm, current->comm, sizeof(uctxt->comm)); in allocate_ctxt()
1000 memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid)); in allocate_ctxt()
1001 uctxt->jkey = generate_jkey(current_uid()); in allocate_ctxt()
1007 if (dd->freectxts-- == dd->num_user_contexts) in allocate_ctxt()
1022 hfi1_stats.sps_ctxts--; in deallocate_ctxt()
1023 if (++uctxt->dd->freectxts == uctxt->dd->num_user_contexts) in deallocate_ctxt()
1024 aspm_enable_all(uctxt->dd); in deallocate_ctxt()
1033 uctxt->subctxt_cnt = uinfo->subctxt_cnt; in init_subctxts()
1034 uctxt->subctxt_id = uinfo->subctxt_id; in init_subctxts()
1035 set_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags); in init_subctxts()
1041 u16 num_subctxts = uctxt->subctxt_cnt; in setup_subctxt()
1043 uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE); in setup_subctxt()
1044 if (!uctxt->subctxt_uregbase) in setup_subctxt()
1045 return -ENOMEM; in setup_subctxt()
1048 uctxt->subctxt_rcvhdr_base = vmalloc_user(rcvhdrq_size(uctxt) * in setup_subctxt()
1050 if (!uctxt->subctxt_rcvhdr_base) { in setup_subctxt()
1051 ret = -ENOMEM; in setup_subctxt()
1055 uctxt->subctxt_rcvegrbuf = vmalloc_user(uctxt->egrbufs.size * in setup_subctxt()
1057 if (!uctxt->subctxt_rcvegrbuf) { in setup_subctxt()
1058 ret = -ENOMEM; in setup_subctxt()
1065 vfree(uctxt->subctxt_rcvhdr_base); in setup_subctxt()
1066 uctxt->subctxt_rcvhdr_base = NULL; in setup_subctxt()
1068 vfree(uctxt->subctxt_uregbase); in setup_subctxt()
1069 uctxt->subctxt_uregbase = NULL; in setup_subctxt()
1079 uctxt->urgent = 0; in user_init()
1080 uctxt->urgent_poll = 0; in user_init()
1089 * explicitly set the in-memory tail copy to 0 beforehand, so we in user_init()
1097 hfi1_set_ctxt_jkey(uctxt->dd, uctxt, uctxt->jkey); in user_init()
1101 if (HFI1_CAP_UGET_MASK(uctxt->flags, HDRSUPP)) in user_init()
1108 if (!HFI1_CAP_UGET_MASK(uctxt->flags, MULTI_PKT_EGR)) in user_init()
1110 if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_EGR_FULL)) in user_init()
1112 if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_RHQ_FULL)) in user_init()
1120 if (HFI1_CAP_UGET_MASK(uctxt->flags, DMA_RTAIL)) in user_init()
1124 hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt); in user_init()
1130 struct hfi1_ctxtdata *uctxt = fd->uctxt; in get_ctxt_info()
1133 return -EINVAL; in get_ctxt_info()
1136 cinfo.runtime_flags = (((uctxt->flags >> HFI1_CAP_MISC_SHIFT) & in get_ctxt_info()
1138 HFI1_CAP_UGET_MASK(uctxt->flags, MASK) | in get_ctxt_info()
1139 HFI1_CAP_KGET_MASK(uctxt->flags, K2U); in get_ctxt_info()
1141 if (!fd->use_mn) in get_ctxt_info()
1145 cinfo.unit = uctxt->dd->unit; in get_ctxt_info()
1146 cinfo.ctxt = uctxt->ctxt; in get_ctxt_info()
1147 cinfo.subctxt = fd->subctxt; in get_ctxt_info()
1148 cinfo.rcvtids = roundup(uctxt->egrbufs.alloced, in get_ctxt_info()
1149 uctxt->dd->rcv_entries.group_size) + in get_ctxt_info()
1150 uctxt->expected_count; in get_ctxt_info()
1151 cinfo.credits = uctxt->sc->credits; in get_ctxt_info()
1152 cinfo.numa_node = uctxt->numa_id; in get_ctxt_info()
1153 cinfo.rec_cpu = fd->rec_cpu_num; in get_ctxt_info()
1154 cinfo.send_ctxt = uctxt->sc->hw_context; in get_ctxt_info()
1156 cinfo.egrtids = uctxt->egrbufs.alloced; in get_ctxt_info()
1159 cinfo.sdma_ring_size = fd->cq->nentries; in get_ctxt_info()
1160 cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size; in get_ctxt_info()
1162 trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, &cinfo); in get_ctxt_info()
1164 return -EFAULT; in get_ctxt_info()
1188 struct hfi1_devdata *dd = uctxt->dd; in setup_base_ctxt()
1191 hfi1_init_ctxt(uctxt->sc); in setup_base_ctxt()
1202 /* If sub-contexts are enabled, do the appropriate setup */ in setup_base_ctxt()
1203 if (uctxt->subctxt_cnt) in setup_base_ctxt()
1221 fd->uctxt = uctxt; in setup_base_ctxt()
1225 if (uctxt->subctxt_cnt) { in setup_base_ctxt()
1227 * On error, set the failed bit so sub-contexts will clean up in setup_base_ctxt()
1231 set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags); in setup_base_ctxt()
1235 * using a sub-context that is waiting for this completion. in setup_base_ctxt()
1237 clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags); in setup_base_ctxt()
1238 wake_up(&uctxt->wait); in setup_base_ctxt()
1247 struct hfi1_ctxtdata *uctxt = fd->uctxt; in get_base_info()
1248 struct hfi1_devdata *dd = uctxt->dd; in get_base_info()
1251 trace_hfi1_uctxtdata(uctxt->dd, uctxt, fd->subctxt); in get_base_info()
1254 return -EINVAL; in get_base_info()
1257 binfo.hw_version = dd->revision; in get_base_info()
1260 binfo.jkey = uctxt->jkey; in get_base_info()
1267 offset = ((u64)uctxt->sc->hw_free - in get_base_info()
1268 (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE; in get_base_info()
1269 binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt, in get_base_info()
1270 fd->subctxt, offset); in get_base_info()
1271 binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt, in get_base_info()
1272 fd->subctxt, in get_base_info()
1273 uctxt->sc->base_addr); in get_base_info()
1275 uctxt->ctxt, in get_base_info()
1276 fd->subctxt, in get_base_info()
1277 uctxt->sc->base_addr); in get_base_info()
1278 binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt, in get_base_info()
1279 fd->subctxt, in get_base_info()
1280 uctxt->rcvhdrq); in get_base_info()
1281 binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt, in get_base_info()
1282 fd->subctxt, in get_base_info()
1283 uctxt->egrbufs.rcvtids[0].dma); in get_base_info()
1284 binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt, in get_base_info()
1285 fd->subctxt, 0); in get_base_info()
1290 binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt, in get_base_info()
1291 fd->subctxt, 0); in get_base_info()
1292 offset = offset_in_page((uctxt_offset(uctxt) + fd->subctxt) * in get_base_info()
1293 sizeof(*dd->events)); in get_base_info()
1294 binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt, in get_base_info()
1295 fd->subctxt, in get_base_info()
1297 binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt, in get_base_info()
1298 fd->subctxt, in get_base_info()
1299 dd->status); in get_base_info()
1301 binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt, in get_base_info()
1302 fd->subctxt, 0); in get_base_info()
1303 if (uctxt->subctxt_cnt) { in get_base_info()
1305 uctxt->ctxt, in get_base_info()
1306 fd->subctxt, 0); in get_base_info()
1308 uctxt->ctxt, in get_base_info()
1309 fd->subctxt, 0); in get_base_info()
1311 uctxt->ctxt, in get_base_info()
1312 fd->subctxt, 0); in get_base_info()
1316 return -EFAULT; in get_base_info()
1322 * user_exp_rcv_setup - Set up the given tid rcv list
1338 return -EINVAL; in user_exp_rcv_setup()
1341 return -EFAULT; in user_exp_rcv_setup()
1352 ret = -EFAULT; in user_exp_rcv_setup()
1357 ret = -EFAULT; in user_exp_rcv_setup()
1367 * user_exp_rcv_clear - Clear the given tid rcv list
1384 return -EINVAL; in user_exp_rcv_clear()
1387 return -EFAULT; in user_exp_rcv_clear()
1394 return -EFAULT; in user_exp_rcv_clear()
1401 * user_exp_rcv_invalid - Invalidate the given tid rcv list
1417 return -EINVAL; in user_exp_rcv_invalid()
1419 if (!fd->invalid_tids) in user_exp_rcv_invalid()
1420 return -EINVAL; in user_exp_rcv_invalid()
1423 return -EFAULT; in user_exp_rcv_invalid()
1432 ret = -EFAULT; in user_exp_rcv_invalid()
1440 struct hfi1_filedata *fd = fp->private_data; in poll_urgent()
1441 struct hfi1_ctxtdata *uctxt = fd->uctxt; in poll_urgent()
1442 struct hfi1_devdata *dd = uctxt->dd; in poll_urgent()
1445 poll_wait(fp, &uctxt->wait, pt); in poll_urgent()
1447 spin_lock_irq(&dd->uctxt_lock); in poll_urgent()
1448 if (uctxt->urgent != uctxt->urgent_poll) { in poll_urgent()
1450 uctxt->urgent_poll = uctxt->urgent; in poll_urgent()
1453 set_bit(HFI1_CTXT_WAITING_URG, &uctxt->event_flags); in poll_urgent()
1455 spin_unlock_irq(&dd->uctxt_lock); in poll_urgent()
1463 struct hfi1_filedata *fd = fp->private_data; in poll_next()
1464 struct hfi1_ctxtdata *uctxt = fd->uctxt; in poll_next()
1465 struct hfi1_devdata *dd = uctxt->dd; in poll_next()
1468 poll_wait(fp, &uctxt->wait, pt); in poll_next()
1470 spin_lock_irq(&dd->uctxt_lock); in poll_next()
1472 set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags); in poll_next()
1478 spin_unlock_irq(&dd->uctxt_lock); in poll_next()
1491 struct hfi1_devdata *dd = ppd->dd; in hfi1_set_uevent_bits()
1494 if (!dd->events) in hfi1_set_uevent_bits()
1495 return -EINVAL; in hfi1_set_uevent_bits()
1497 for (ctxt = dd->first_dyn_alloc_ctxt; ctxt < dd->num_rcv_contexts; in hfi1_set_uevent_bits()
1507 evs = dd->events + uctxt_offset(uctxt); in hfi1_set_uevent_bits()
1509 for (i = 1; i < uctxt->subctxt_cnt; i++) in hfi1_set_uevent_bits()
1519 * manage_rcvq - manage a context's receive queue
1521 * @subctxt: the sub-context
1525 * overflow conditions. start_stop==1 re-enables, to be used to
1526 * re-init the software copy of the head register
1531 struct hfi1_devdata *dd = uctxt->dd; in manage_rcvq()
1539 return -EFAULT; in manage_rcvq()
1544 * On enable, force in-memory copy of the tail register to in manage_rcvq()
1546 * whether or not the chip has yet updated the in-memory in manage_rcvq()
1572 struct hfi1_devdata *dd = uctxt->dd; in user_event_ack()
1576 if (!dd->events) in user_event_ack()
1580 return -EFAULT; in user_event_ack()
1582 evs = dd->events + uctxt_offset(uctxt) + subctxt; in user_event_ack()
1595 struct hfi1_pportdata *ppd = uctxt->ppd; in set_ctxt_pkey()
1596 struct hfi1_devdata *dd = uctxt->dd; in set_ctxt_pkey()
1600 return -EPERM; in set_ctxt_pkey()
1603 return -EFAULT; in set_ctxt_pkey()
1606 return -EINVAL; in set_ctxt_pkey()
1608 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) in set_ctxt_pkey()
1609 if (pkey == ppd->pkeys[i]) in set_ctxt_pkey()
1612 return -ENOENT; in set_ctxt_pkey()
1616 * ctxt_reset - Reset the user context
1625 if (!uctxt || !uctxt->dd || !uctxt->sc) in ctxt_reset()
1626 return -EINVAL; in ctxt_reset()
1631 * re-initialized. If user level breaks that guarantee, it will in ctxt_reset()
1634 dd = uctxt->dd; in ctxt_reset()
1635 sc = uctxt->sc; in ctxt_reset()
1642 sc->halt_wait, (sc->flags & SCF_HALTED), in ctxt_reset()
1644 if (!(sc->flags & SCF_HALTED)) in ctxt_reset()
1645 return -ENOLCK; in ctxt_reset()
1651 if (sc->flags & SCF_FROZEN) { in ctxt_reset()
1653 dd->event_queue, in ctxt_reset()
1654 !(READ_ONCE(dd->flags) & HFI1_FROZEN), in ctxt_reset()
1656 if (dd->flags & HFI1_FROZEN) in ctxt_reset()
1657 return -ENOLCK; in ctxt_reset()
1659 if (dd->flags & HFI1_FORCED_FREEZE) in ctxt_reset()
1664 return -ENODEV; in ctxt_reset()
1681 hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device); in user_remove()
1689 snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit); in user_add()
1690 ret = hfi1_cdev_init(dd->unit, name, &hfi1_file_ops, in user_add()
1691 &dd->user_cdev, &dd->user_device, in user_add()
1692 true, &dd->verbs_dev.rdi.ibdev.dev.kobj); in user_add()
1700 * Create per-unit files in /dev
1708 * Remove per-unit files in /dev