Lines Matching +full:can +full:- +full:fd
1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
7 * Manage the dynamic fd arrays in the process files_struct.
30 * __file_ref_put - Slowpath of file_ref_put()
38 * possible. This signals the caller that it can safely schedule the
53 * This can fail if a concurrent get() operation has in __file_ref_put()
59 if (!atomic_long_try_cmpxchg_release(&ref->refcnt, &cnt, FILE_REF_DEAD)) in __file_ref_put()
63 * The caller can safely schedule the object for in __file_ref_put()
76 atomic_long_set(&ref->refcnt, FILE_REF_DEAD); in __file_ref_put()
86 atomic_long_set(&ref->refcnt, FILE_REF_SATURATED); in __file_ref_put()
93 /* our min() is unusable in constant expressions ;-/ */
96 __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
100 kvfree(fdt->fd); in __free_fdtable()
101 kvfree(fdt->open_fds); in __free_fdtable()
113 #define fdt_words(fdt) ((fdt)->max_fds / BITS_PER_LONG) // words in ->open_fds
115 * Copy 'count' fd bits from the old table to the new table and clear the extra
124 bitmap_copy_and_extend(nfdt->open_fds, ofdt->open_fds, in copy_fd_bitmaps()
126 bitmap_copy_and_extend(nfdt->close_on_exec, ofdt->close_on_exec, in copy_fd_bitmaps()
128 bitmap_copy_and_extend(nfdt->full_fds_bits, ofdt->full_fds_bits, in copy_fd_bitmaps()
140 BUG_ON(nfdt->max_fds < ofdt->max_fds); in copy_fdtable()
142 cpy = ofdt->max_fds * sizeof(struct file *); in copy_fdtable()
143 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *); in copy_fdtable()
144 memcpy(nfdt->fd, ofdt->fd, cpy); in copy_fdtable()
145 memset((char *)nfdt->fd + cpy, 0, set); in copy_fdtable()
167 * the fdarray into comfortable page-tuned chunks: starting at 1024B in alloc_fdtable()
169 * with slots_wanted > BITS_PER_LONG (embedded instance in files->fdtab in alloc_fdtable()
173 * 2. on 32bit skip 64 and 128 - the minimal capacity we want there is in alloc_fdtable()
174 * 256 slots (i.e. 1Kb fd array). in alloc_fdtable()
175 * 3. on 64bit don't skip anything, 1Kb fd array means 128 slots there in alloc_fdtable()
183 * Note that this can drive nr *below* what we had passed if sysctl_nr_open in alloc_fdtable()
186 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise in alloc_fdtable()
192 return ERR_PTR(-EMFILE); in alloc_fdtable()
198 fdt->max_fds = nr; in alloc_fdtable()
202 fdt->fd = data; in alloc_fdtable()
209 fdt->open_fds = data; in alloc_fdtable()
211 fdt->close_on_exec = data; in alloc_fdtable()
213 fdt->full_fds_bits = data; in alloc_fdtable()
218 kvfree(fdt->fd); in alloc_fdtable()
222 return ERR_PTR(-ENOMEM); in alloc_fdtable()
227 * This function will allocate a new fdtable and both fd array and fdset, of
230 * The files->file_lock should be held on entry, and will be held on exit.
233 __releases(files->file_lock) in expand_fdtable()
234 __acquires(files->file_lock) in expand_fdtable()
238 spin_unlock(&files->file_lock); in expand_fdtable()
244 if (atomic_read(&files->count) > 1) in expand_fdtable()
247 spin_lock(&files->file_lock); in expand_fdtable()
251 BUG_ON(nr < cur_fdt->max_fds); in expand_fdtable()
253 rcu_assign_pointer(files->fdt, new_fdt); in expand_fdtable()
254 if (cur_fdt != &files->fdtab) in expand_fdtable()
255 call_rcu(&cur_fdt->rcu, free_fdtable_rcu); in expand_fdtable()
266 * The files->file_lock should be held on entry, and will be held on exit.
269 __releases(files->file_lock) in expand_files()
270 __acquires(files->file_lock) in expand_files()
279 if (nr < fdt->max_fds) in expand_files()
282 if (unlikely(files->resize_in_progress)) { in expand_files()
283 spin_unlock(&files->file_lock); in expand_files()
284 wait_event(files->resize_wait, !files->resize_in_progress); in expand_files()
285 spin_lock(&files->file_lock); in expand_files()
289 /* Can we expand? */ in expand_files()
291 return -EMFILE; in expand_files()
294 files->resize_in_progress = true; in expand_files()
296 files->resize_in_progress = false; in expand_files()
298 wake_up_all(&files->resize_wait); in expand_files()
302 static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt, in __set_close_on_exec() argument
306 __set_bit(fd, fdt->close_on_exec); in __set_close_on_exec()
308 if (test_bit(fd, fdt->close_on_exec)) in __set_close_on_exec()
309 __clear_bit(fd, fdt->close_on_exec); in __set_close_on_exec()
313 static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt, bool set) in __set_open_fd() argument
315 __set_bit(fd, fdt->open_fds); in __set_open_fd()
316 __set_close_on_exec(fd, fdt, set); in __set_open_fd()
317 fd /= BITS_PER_LONG; in __set_open_fd()
318 if (!~fdt->open_fds[fd]) in __set_open_fd()
319 __set_bit(fd, fdt->full_fds_bits); in __set_open_fd()
322 static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt) in __clear_open_fd() argument
324 __clear_bit(fd, fdt->open_fds); in __clear_open_fd()
325 fd /= BITS_PER_LONG; in __clear_open_fd()
326 if (test_bit(fd, fdt->full_fds_bits)) in __clear_open_fd()
327 __clear_bit(fd, fdt->full_fds_bits); in __clear_open_fd()
330 static inline bool fd_is_open(unsigned int fd, const struct fdtable *fdt) in fd_is_open() argument
332 return test_bit(fd, fdt->open_fds); in fd_is_open()
339 * punch_hole is optional - when close_range() is asked to unshare
346 unsigned int last = find_last_bit(fdt->open_fds, fdt->max_fds); in sane_fdtable_size()
348 if (last == fdt->max_fds) in sane_fdtable_size()
350 if (punch_hole && punch_hole->to >= last && punch_hole->from <= last) { in sane_fdtable_size()
351 last = find_last_bit(fdt->open_fds, punch_hole->from); in sane_fdtable_size()
352 if (last == punch_hole->from) in sane_fdtable_size()
372 return ERR_PTR(-ENOMEM); in dup_fd()
374 atomic_set(&newf->count, 1); in dup_fd()
376 spin_lock_init(&newf->file_lock); in dup_fd()
377 newf->resize_in_progress = false; in dup_fd()
378 init_waitqueue_head(&newf->resize_wait); in dup_fd()
379 newf->next_fd = 0; in dup_fd()
380 new_fdt = &newf->fdtab; in dup_fd()
381 new_fdt->max_fds = NR_OPEN_DEFAULT; in dup_fd()
382 new_fdt->close_on_exec = newf->close_on_exec_init; in dup_fd()
383 new_fdt->open_fds = newf->open_fds_init; in dup_fd()
384 new_fdt->full_fds_bits = newf->full_fds_bits_init; in dup_fd()
385 new_fdt->fd = &newf->fd_array[0]; in dup_fd()
387 spin_lock(&oldf->file_lock); in dup_fd()
392 * Check whether we need to allocate a larger fd array and fd set. in dup_fd()
394 while (unlikely(open_files > new_fdt->max_fds)) { in dup_fd()
395 spin_unlock(&oldf->file_lock); in dup_fd()
397 if (new_fdt != &newf->fdtab) in dup_fd()
407 * Reacquire the oldf lock and a pointer to its fd table in dup_fd()
408 * who knows it may have a new bigger fd table. We need in dup_fd()
411 spin_lock(&oldf->file_lock); in dup_fd()
418 old_fds = old_fdt->fd; in dup_fd()
419 new_fds = new_fdt->fd; in dup_fd()
422 * We may be racing against fd allocation from other threads using this in dup_fd()
423 * files_struct, despite holding ->file_lock. in dup_fd()
427 * the file can show up as we are walking the array below. in dup_fd()
433 * ref the file if we see it and mark the fd slot as unused otherwise. in dup_fd()
435 for (i = open_files; i != 0; i--) { in dup_fd()
440 __clear_open_fd(open_files - i, new_fdt); in dup_fd()
444 spin_unlock(&oldf->file_lock); in dup_fd()
447 memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *)); in dup_fd()
449 rcu_assign_pointer(newf->fdt, new_fdt); in dup_fd()
457 * It is safe to dereference the fd table without RCU or in close_files()
458 * ->file_lock because this is the last reference to the in close_files()
461 struct fdtable *fdt = rcu_dereference_raw(files->fdt); in close_files()
467 if (i >= fdt->max_fds) in close_files()
469 set = fdt->open_fds[j++]; in close_files()
472 struct file *file = fdt->fd[i]; in close_files()
488 if (atomic_dec_and_test(&files->count)) { in put_files_struct()
492 if (fdt != &files->fdtab) in put_files_struct()
500 struct files_struct * files = tsk->files; in exit_files()
504 tsk->files = NULL; in exit_files()
515 .fd = &init_files.fd_array[0],
526 unsigned int maxfd = fdt->max_fds; /* always multiple of BITS_PER_LONG */ in find_next_fd()
534 bit = find_next_zero_bit(&fdt->open_fds[bitbit], BITS_PER_LONG, in find_next_fd()
535 start & (BITS_PER_LONG - 1)); in find_next_fd()
539 bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG; in find_next_fd()
544 return find_next_zero_bit(fdt->open_fds, maxfd, start); in find_next_fd()
552 struct files_struct *files = current->files; in alloc_fd()
553 unsigned int fd; in alloc_fd() local
557 spin_lock(&files->file_lock); in alloc_fd()
560 fd = start; in alloc_fd()
561 if (fd < files->next_fd) in alloc_fd()
562 fd = files->next_fd; in alloc_fd()
564 if (likely(fd < fdt->max_fds)) in alloc_fd()
565 fd = find_next_fd(fdt, fd); in alloc_fd()
569 * will limit the total number of files that can be opened. in alloc_fd()
571 error = -EMFILE; in alloc_fd()
572 if (unlikely(fd >= end)) in alloc_fd()
575 if (unlikely(fd >= fdt->max_fds)) { in alloc_fd()
576 error = expand_files(files, fd); in alloc_fd()
583 if (start <= files->next_fd) in alloc_fd()
584 files->next_fd = fd + 1; in alloc_fd()
586 __set_open_fd(fd, fdt, flags & O_CLOEXEC); in alloc_fd()
587 error = fd; in alloc_fd()
590 spin_unlock(&files->file_lock); in alloc_fd()
605 static void __put_unused_fd(struct files_struct *files, unsigned int fd) in __put_unused_fd() argument
608 __clear_open_fd(fd, fdt); in __put_unused_fd()
609 if (fd < files->next_fd) in __put_unused_fd()
610 files->next_fd = fd; in __put_unused_fd()
613 void put_unused_fd(unsigned int fd) in put_unused_fd() argument
615 struct files_struct *files = current->files; in put_unused_fd()
616 spin_lock(&files->file_lock); in put_unused_fd()
617 __put_unused_fd(files, fd); in put_unused_fd()
618 spin_unlock(&files->file_lock); in put_unused_fd()
624 * Install a file pointer in the fd array.
632 * It should never happen - if we allow dup2() do it, _really_ bad things
639 void fd_install(unsigned int fd, struct file *file) in fd_install() argument
641 struct files_struct *files = current->files; in fd_install()
644 if (WARN_ON_ONCE(unlikely(file->f_mode & FMODE_BACKING))) in fd_install()
649 if (unlikely(files->resize_in_progress)) { in fd_install()
651 spin_lock(&files->file_lock); in fd_install()
653 WARN_ON(fdt->fd[fd] != NULL); in fd_install()
654 rcu_assign_pointer(fdt->fd[fd], file); in fd_install()
655 spin_unlock(&files->file_lock); in fd_install()
660 fdt = rcu_dereference_sched(files->fdt); in fd_install()
661 BUG_ON(fdt->fd[fd] != NULL); in fd_install()
662 rcu_assign_pointer(fdt->fd[fd], file); in fd_install()
669 * file_close_fd_locked - return file associated with fd
671 * @fd: file descriptor to retrieve file for
677 * Returns: The file associated with @fd (NULL if @fd is not open)
679 struct file *file_close_fd_locked(struct files_struct *files, unsigned fd) in file_close_fd_locked() argument
684 lockdep_assert_held(&files->file_lock); in file_close_fd_locked()
686 if (fd >= fdt->max_fds) in file_close_fd_locked()
689 fd = array_index_nospec(fd, fdt->max_fds); in file_close_fd_locked()
690 file = rcu_dereference_raw(fdt->fd[fd]); in file_close_fd_locked()
692 rcu_assign_pointer(fdt->fd[fd], NULL); in file_close_fd_locked()
693 __put_unused_fd(files, fd); in file_close_fd_locked()
698 int close_fd(unsigned fd) in close_fd() argument
700 struct files_struct *files = current->files; in close_fd()
703 spin_lock(&files->file_lock); in close_fd()
704 file = file_close_fd_locked(files, fd); in close_fd()
705 spin_unlock(&files->file_lock); in close_fd()
707 return -EBADF; in close_fd()
714 * last_fd - return last valid index into fd table
723 return fdt->max_fds - 1; in last_fd()
727 unsigned int fd, unsigned int max_fd) in __range_cloexec() argument
732 spin_lock(&cur_fds->file_lock); in __range_cloexec()
735 if (fd <= max_fd) in __range_cloexec()
736 bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1); in __range_cloexec()
737 spin_unlock(&cur_fds->file_lock); in __range_cloexec()
740 static inline void __range_close(struct files_struct *files, unsigned int fd, in __range_close() argument
746 spin_lock(&files->file_lock); in __range_close()
750 for (; fd <= max_fd; fd++) { in __range_close()
751 file = file_close_fd_locked(files, fd); in __range_close()
753 spin_unlock(&files->file_lock); in __range_close()
756 spin_lock(&files->file_lock); in __range_close()
758 spin_unlock(&files->file_lock); in __range_close()
760 spin_lock(&files->file_lock); in __range_close()
763 spin_unlock(&files->file_lock); in __range_close()
767 * sys_close_range() - Close all file descriptors in a given range.
769 * @fd: starting file descriptor to close
774 * from @fd up to and including @max_fd are closed.
777 SYSCALL_DEFINE3(close_range, unsigned int, fd, unsigned int, max_fd, in SYSCALL_DEFINE3() argument
781 struct files_struct *cur_fds = me->files, *fds = NULL; in SYSCALL_DEFINE3()
784 return -EINVAL; in SYSCALL_DEFINE3()
786 if (fd > max_fd) in SYSCALL_DEFINE3()
787 return -EINVAL; in SYSCALL_DEFINE3()
789 if ((flags & CLOSE_RANGE_UNSHARE) && atomic_read(&cur_fds->count) > 1) { in SYSCALL_DEFINE3()
790 struct fd_range range = {fd, max_fd}, *punch_hole = ⦥ in SYSCALL_DEFINE3()
811 __range_cloexec(cur_fds, fd, max_fd); in SYSCALL_DEFINE3()
813 __range_close(cur_fds, fd, max_fd); in SYSCALL_DEFINE3()
821 me->files = cur_fds; in SYSCALL_DEFINE3()
830 * file_close_fd - return file associated with fd
831 * @fd: file descriptor to retrieve file for
835 * Returns: The file associated with @fd (NULL if @fd is not open)
837 struct file *file_close_fd(unsigned int fd) in file_close_fd() argument
839 struct files_struct *files = current->files; in file_close_fd()
842 spin_lock(&files->file_lock); in file_close_fd()
843 file = file_close_fd_locked(files, fd); in file_close_fd()
844 spin_unlock(&files->file_lock); in file_close_fd()
855 spin_lock(&files->file_lock); in do_close_on_exec()
858 unsigned fd = i * BITS_PER_LONG; in do_close_on_exec() local
860 if (fd >= fdt->max_fds) in do_close_on_exec()
862 set = fdt->close_on_exec[i]; in do_close_on_exec()
865 fdt->close_on_exec[i] = 0; in do_close_on_exec()
866 for ( ; set ; fd++, set >>= 1) { in do_close_on_exec()
870 file = fdt->fd[fd]; in do_close_on_exec()
873 rcu_assign_pointer(fdt->fd[fd], NULL); in do_close_on_exec()
874 __put_unused_fd(files, fd); in do_close_on_exec()
875 spin_unlock(&files->file_lock); in do_close_on_exec()
878 spin_lock(&files->file_lock); in do_close_on_exec()
882 spin_unlock(&files->file_lock); in do_close_on_exec()
895 if (unlikely(!file_ref_get(&file->f_ref))) in __get_file_rcu()
896 return ERR_PTR(-EAGAIN); in __get_file_rcu()
924 return ERR_PTR(-EAGAIN); in __get_file_rcu()
928 * get_file_rcu - try go get a reference to a file under rcu
952 * get_file_active - try go get a reference to a file
977 unsigned int fd, fmode_t mask) in __fget_files_rcu() argument
981 struct fdtable *fdt = rcu_dereference_raw(files->fdt); in __fget_files_rcu()
985 /* Mask is a 0 for invalid fd's, ~0 for valid ones */ in __fget_files_rcu()
986 nospec_mask = array_index_mask_nospec(fd, fdt->max_fds); in __fget_files_rcu()
989 * fdentry points to the 'fd' offset, or fdt->fd[0]. in __fget_files_rcu()
990 * Loading from fdt->fd[0] is always safe, because the in __fget_files_rcu()
993 fdentry = fdt->fd + (fd & nospec_mask); in __fget_files_rcu()
1012 if (unlikely(!file_ref_get(&file->f_ref))) in __fget_files_rcu()
1016 * Such a race can take two forms: in __fget_files_rcu()
1023 * Note that we don't need to re-check the 'fdt->fd' in __fget_files_rcu()
1025 * hand-in-hand with 'fdt'. in __fget_files_rcu()
1030 unlikely(rcu_dereference_raw(files->fdt) != fdt)) { in __fget_files_rcu()
1039 if (unlikely(file->f_mode & mask)) { in __fget_files_rcu()
1052 static struct file *__fget_files(struct files_struct *files, unsigned int fd, in __fget_files() argument
1058 file = __fget_files_rcu(files, fd, mask); in __fget_files()
1064 static inline struct file *__fget(unsigned int fd, fmode_t mask) in __fget() argument
1066 return __fget_files(current->files, fd, mask); in __fget()
1069 struct file *fget(unsigned int fd) in fget() argument
1071 return __fget(fd, FMODE_PATH); in fget()
1075 struct file *fget_raw(unsigned int fd) in fget_raw() argument
1077 return __fget(fd, 0); in fget_raw()
1081 struct file *fget_task(struct task_struct *task, unsigned int fd) in fget_task() argument
1086 if (task->files) in fget_task()
1087 file = __fget_files(task->files, fd, 0); in fget_task()
1097 unsigned int fd = *ret_fd; in fget_task_next() local
1101 files = task->files; in fget_task_next()
1104 for (; fd < files_fdtable(files)->max_fds; fd++) { in fget_task_next()
1105 file = __fget_files_rcu(files, fd, 0); in fget_task_next()
1112 *ret_fd = fd; in fget_task_next()
1118 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
1120 * You can use this instead of fget if you satisfy all of the following
1133 * (As an exception to rule 2, you can call filp_close between fget_light and
1140 static inline struct fd __fget_light(unsigned int fd, fmode_t mask) in __fget_light() argument
1142 struct files_struct *files = current->files; in __fget_light()
1148 * entry combined with the new refcount - otherwise we could in __fget_light()
1154 if (likely(atomic_read_acquire(&files->count) == 1)) { in __fget_light()
1155 file = files_lookup_fd_raw(files, fd); in __fget_light()
1156 if (!file || unlikely(file->f_mode & mask)) in __fget_light()
1160 file = __fget_files(files, fd, mask); in __fget_light()
1166 struct fd fdget(unsigned int fd) in fdget() argument
1168 return __fget_light(fd, FMODE_PATH); in fdget()
1172 struct fd fdget_raw(unsigned int fd) in fdget_raw() argument
1174 return __fget_light(fd, 0); in fdget_raw()
1179 * file is marked for FMODE_ATOMIC_POS, and it can be
1183 * can make a file accessible even if it otherwise would
1189 return (file->f_mode & FMODE_ATOMIC_POS) && in file_needs_f_pos_lock()
1190 (file_count(file) > 1 || file->f_op->iterate_shared); in file_needs_f_pos_lock()
1193 struct fd fdget_pos(unsigned int fd) in fdget_pos() argument
1195 struct fd f = fdget(fd); in fdget_pos()
1200 mutex_lock(&file->f_pos_lock); in fdget_pos()
1207 mutex_unlock(&f->f_pos_lock); in __f_unlock_pos()
1216 void set_close_on_exec(unsigned int fd, int flag) in set_close_on_exec() argument
1218 struct files_struct *files = current->files; in set_close_on_exec()
1219 spin_lock(&files->file_lock); in set_close_on_exec()
1220 __set_close_on_exec(fd, files_fdtable(files), flag); in set_close_on_exec()
1221 spin_unlock(&files->file_lock); in set_close_on_exec()
1224 bool get_close_on_exec(unsigned int fd) in get_close_on_exec() argument
1228 res = close_on_exec(fd, current->files); in get_close_on_exec()
1234 struct file *file, unsigned fd, unsigned flags) in do_dup2() argument
1235 __releases(&files->file_lock) in do_dup2()
1244 * POSIX is silent on the issue, we return -EBUSY. in do_dup2()
1247 fd = array_index_nospec(fd, fdt->max_fds); in do_dup2()
1248 tofree = rcu_dereference_raw(fdt->fd[fd]); in do_dup2()
1249 if (!tofree && fd_is_open(fd, fdt)) in do_dup2()
1252 rcu_assign_pointer(fdt->fd[fd], file); in do_dup2()
1253 __set_open_fd(fd, fdt, flags & O_CLOEXEC); in do_dup2()
1254 spin_unlock(&files->file_lock); in do_dup2()
1259 return fd; in do_dup2()
1262 spin_unlock(&files->file_lock); in do_dup2()
1263 return -EBUSY; in do_dup2()
1266 int replace_fd(unsigned fd, struct file *file, unsigned flags) in replace_fd() argument
1269 struct files_struct *files = current->files; in replace_fd()
1272 return close_fd(fd); in replace_fd()
1274 if (fd >= rlimit(RLIMIT_NOFILE)) in replace_fd()
1275 return -EBADF; in replace_fd()
1277 spin_lock(&files->file_lock); in replace_fd()
1278 err = expand_files(files, fd); in replace_fd()
1281 return do_dup2(files, file, fd, flags); in replace_fd()
1284 spin_unlock(&files->file_lock); in replace_fd()
1289 * receive_fd() - Install received file into file descriptor table
1291 * @ufd: __user pointer to write new fd number to
1292 * @o_flags: the O_* flags to apply to the new fd entry
1295 * checks and count updates. Optionally writes the fd number to userspace, if
1296 * @ufd is non-NULL.
1301 * Returns newly install fd or -ve on error.
1346 int err = -EBADF; in ksys_dup3()
1348 struct files_struct *files = current->files; in ksys_dup3()
1351 return -EINVAL; in ksys_dup3()
1354 return -EINVAL; in ksys_dup3()
1357 return -EBADF; in ksys_dup3()
1359 spin_lock(&files->file_lock); in ksys_dup3()
1365 if (err == -EMFILE) in ksys_dup3()
1372 err = -EBADF; in ksys_dup3()
1374 spin_unlock(&files->file_lock); in ksys_dup3()
1386 struct files_struct *files = current->files; in SYSCALL_DEFINE2()
1393 retval = -EBADF; in SYSCALL_DEFINE2()
1404 int ret = -EBADF; in SYSCALL_DEFINE1()
1422 return -EINVAL; in f_dupfd()
1439 spin_lock(&files->file_lock); in iterate_fd()
1440 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) { in iterate_fd()
1442 file = rcu_dereference_check_fdtable(files, fdt->fd[n]); in iterate_fd()
1449 spin_unlock(&files->file_lock); in iterate_fd()