1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 *
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5 *
6 * Regular file handling primitives for NTFS-based filesystems.
7 *
8 */
9
10 #include <linux/backing-dev.h>
11 #include <linux/blkdev.h>
12 #include <linux/buffer_head.h>
13 #include <linux/compat.h>
14 #include <linux/falloc.h>
15 #include <linux/fiemap.h>
16 #include <linux/fileattr.h>
17
18 #include "debug.h"
19 #include "ntfs.h"
20 #include "ntfs_fs.h"
21
ntfs_ioctl_fitrim(struct ntfs_sb_info * sbi,unsigned long arg)22 static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg)
23 {
24 struct fstrim_range __user *user_range;
25 struct fstrim_range range;
26 struct block_device *dev;
27 int err;
28
29 if (!capable(CAP_SYS_ADMIN))
30 return -EPERM;
31
32 dev = sbi->sb->s_bdev;
33 if (!bdev_max_discard_sectors(dev))
34 return -EOPNOTSUPP;
35
36 user_range = (struct fstrim_range __user *)arg;
37 if (copy_from_user(&range, user_range, sizeof(range)))
38 return -EFAULT;
39
40 range.minlen = max_t(u32, range.minlen, bdev_discard_granularity(dev));
41
42 err = ntfs_trim_fs(sbi, &range);
43 if (err < 0)
44 return err;
45
46 if (copy_to_user(user_range, &range, sizeof(range)))
47 return -EFAULT;
48
49 return 0;
50 }
51
52 /*
53 * ntfs_fileattr_get - inode_operations::fileattr_get
54 */
ntfs_fileattr_get(struct dentry * dentry,struct fileattr * fa)55 int ntfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
56 {
57 struct inode *inode = d_inode(dentry);
58 struct ntfs_inode *ni = ntfs_i(inode);
59 u32 flags = 0;
60
61 if (inode->i_flags & S_IMMUTABLE)
62 flags |= FS_IMMUTABLE_FL;
63
64 if (inode->i_flags & S_APPEND)
65 flags |= FS_APPEND_FL;
66
67 if (is_compressed(ni))
68 flags |= FS_COMPR_FL;
69
70 if (is_encrypted(ni))
71 flags |= FS_ENCRYPT_FL;
72
73 fileattr_fill_flags(fa, flags);
74
75 return 0;
76 }
77
78 /*
79 * ntfs_fileattr_set - inode_operations::fileattr_set
80 */
ntfs_fileattr_set(struct mnt_idmap * idmap,struct dentry * dentry,struct fileattr * fa)81 int ntfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry,
82 struct fileattr *fa)
83 {
84 struct inode *inode = d_inode(dentry);
85 struct ntfs_inode *ni = ntfs_i(inode);
86 u32 flags = fa->flags;
87 unsigned int new_fl = 0;
88
89 if (fileattr_has_fsx(fa))
90 return -EOPNOTSUPP;
91
92 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_COMPR_FL))
93 return -EOPNOTSUPP;
94
95 if (flags & FS_IMMUTABLE_FL)
96 new_fl |= S_IMMUTABLE;
97
98 if (flags & FS_APPEND_FL)
99 new_fl |= S_APPEND;
100
101 /* Allowed to change compression for empty files and for directories only. */
102 if (!is_dedup(ni) && !is_encrypted(ni) &&
103 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
104 int err = 0;
105 struct address_space *mapping = inode->i_mapping;
106
107 /* write out all data and wait. */
108 filemap_invalidate_lock(mapping);
109 err = filemap_write_and_wait(mapping);
110
111 if (err >= 0) {
112 /* Change compress state. */
113 bool compr = flags & FS_COMPR_FL;
114 err = ni_set_compress(inode, compr);
115
116 /* For files change a_ops too. */
117 if (!err)
118 mapping->a_ops = compr ? &ntfs_aops_cmpr :
119 &ntfs_aops;
120 }
121
122 filemap_invalidate_unlock(mapping);
123
124 if (err)
125 return err;
126 }
127
128 inode_set_flags(inode, new_fl, S_IMMUTABLE | S_APPEND);
129
130 inode_set_ctime_current(inode);
131 mark_inode_dirty(inode);
132
133 return 0;
134 }
135
136 /*
137 * ntfs_ioctl - file_operations::unlocked_ioctl
138 */
ntfs_ioctl(struct file * filp,u32 cmd,unsigned long arg)139 long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
140 {
141 struct inode *inode = file_inode(filp);
142 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
143
144 switch (cmd) {
145 case FITRIM:
146 return ntfs_ioctl_fitrim(sbi, arg);
147 }
148 return -ENOTTY; /* Inappropriate ioctl for device. */
149 }
150
151 #ifdef CONFIG_COMPAT
ntfs_compat_ioctl(struct file * filp,u32 cmd,unsigned long arg)152 long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg)
153
154 {
155 return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
156 }
157 #endif
158
159 /*
160 * ntfs_getattr - inode_operations::getattr
161 */
ntfs_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,u32 flags)162 int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path,
163 struct kstat *stat, u32 request_mask, u32 flags)
164 {
165 struct inode *inode = d_inode(path->dentry);
166 struct ntfs_inode *ni = ntfs_i(inode);
167
168 stat->result_mask |= STATX_BTIME;
169 stat->btime = ni->i_crtime;
170 stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */
171
172 if (inode->i_flags & S_IMMUTABLE)
173 stat->attributes |= STATX_ATTR_IMMUTABLE;
174
175 if (inode->i_flags & S_APPEND)
176 stat->attributes |= STATX_ATTR_APPEND;
177
178 if (is_compressed(ni))
179 stat->attributes |= STATX_ATTR_COMPRESSED;
180
181 if (is_encrypted(ni))
182 stat->attributes |= STATX_ATTR_ENCRYPTED;
183
184 stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED |
185 STATX_ATTR_IMMUTABLE | STATX_ATTR_APPEND;
186
187 generic_fillattr(idmap, request_mask, inode, stat);
188
189 return 0;
190 }
191
ntfs_extend_initialized_size(struct file * file,struct ntfs_inode * ni,const loff_t valid,const loff_t new_valid)192 static int ntfs_extend_initialized_size(struct file *file,
193 struct ntfs_inode *ni,
194 const loff_t valid,
195 const loff_t new_valid)
196 {
197 struct inode *inode = &ni->vfs_inode;
198 struct address_space *mapping = inode->i_mapping;
199 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
200 loff_t pos = valid;
201 int err;
202
203 if (valid >= new_valid)
204 return 0;
205
206 if (is_resident(ni)) {
207 ni->i_valid = new_valid;
208 return 0;
209 }
210
211 WARN_ON(is_compressed(ni));
212
213 for (;;) {
214 u32 zerofrom, len;
215 struct folio *folio;
216 u8 bits;
217 CLST vcn, lcn, clen;
218
219 if (is_sparsed(ni)) {
220 bits = sbi->cluster_bits;
221 vcn = pos >> bits;
222
223 err = attr_data_get_block(ni, vcn, 1, &lcn, &clen, NULL,
224 false);
225 if (err)
226 goto out;
227
228 if (lcn == SPARSE_LCN) {
229 pos = ((loff_t)clen + vcn) << bits;
230 ni->i_valid = pos;
231 goto next;
232 }
233 }
234
235 zerofrom = pos & (PAGE_SIZE - 1);
236 len = PAGE_SIZE - zerofrom;
237
238 if (pos + len > new_valid)
239 len = new_valid - pos;
240
241 err = ntfs_write_begin(file, mapping, pos, len, &folio, NULL);
242 if (err)
243 goto out;
244
245 folio_zero_range(folio, zerofrom, folio_size(folio) - zerofrom);
246
247 err = ntfs_write_end(file, mapping, pos, len, len, folio, NULL);
248 if (err < 0)
249 goto out;
250 pos += len;
251
252 next:
253 if (pos >= new_valid)
254 break;
255
256 balance_dirty_pages_ratelimited(mapping);
257 cond_resched();
258 }
259
260 return 0;
261
262 out:
263 ni->i_valid = valid;
264 ntfs_inode_warn(inode, "failed to extend initialized size to %llx.",
265 new_valid);
266 return err;
267 }
268
269 /*
270 * ntfs_zero_range - Helper function for punch_hole.
271 *
272 * It zeroes a range [vbo, vbo_to).
273 */
ntfs_zero_range(struct inode * inode,u64 vbo,u64 vbo_to)274 static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
275 {
276 int err = 0;
277 struct address_space *mapping = inode->i_mapping;
278 u32 blocksize = i_blocksize(inode);
279 pgoff_t idx = vbo >> PAGE_SHIFT;
280 u32 from = vbo & (PAGE_SIZE - 1);
281 pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT;
282 loff_t page_off;
283 struct buffer_head *head, *bh;
284 u32 bh_next, bh_off, to;
285 sector_t iblock;
286 struct folio *folio;
287 bool dirty = false;
288
289 for (; idx < idx_end; idx += 1, from = 0) {
290 page_off = (loff_t)idx << PAGE_SHIFT;
291 to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) :
292 PAGE_SIZE;
293 iblock = page_off >> inode->i_blkbits;
294
295 folio = __filemap_get_folio(
296 mapping, idx, FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
297 mapping_gfp_constraint(mapping, ~__GFP_FS));
298 if (IS_ERR(folio))
299 return PTR_ERR(folio);
300
301 head = folio_buffers(folio);
302 if (!head)
303 head = create_empty_buffers(folio, blocksize, 0);
304
305 bh = head;
306 bh_off = 0;
307 do {
308 bh_next = bh_off + blocksize;
309
310 if (bh_next <= from || bh_off >= to)
311 continue;
312
313 if (!buffer_mapped(bh)) {
314 ntfs_get_block(inode, iblock, bh, 0);
315 /* Unmapped? It's a hole - nothing to do. */
316 if (!buffer_mapped(bh))
317 continue;
318 }
319
320 /* Ok, it's mapped. Make sure it's up-to-date. */
321 if (folio_test_uptodate(folio))
322 set_buffer_uptodate(bh);
323 else if (bh_read(bh, 0) < 0) {
324 err = -EIO;
325 folio_unlock(folio);
326 folio_put(folio);
327 goto out;
328 }
329
330 mark_buffer_dirty(bh);
331 } while (bh_off = bh_next, iblock += 1,
332 head != (bh = bh->b_this_page));
333
334 folio_zero_segment(folio, from, to);
335 dirty = true;
336
337 folio_unlock(folio);
338 folio_put(folio);
339 cond_resched();
340 }
341 out:
342 if (dirty)
343 mark_inode_dirty(inode);
344 return err;
345 }
346
347 /*
348 * ntfs_file_mmap - file_operations::mmap
349 */
ntfs_file_mmap(struct file * file,struct vm_area_struct * vma)350 static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
351 {
352 struct inode *inode = file_inode(file);
353 struct ntfs_inode *ni = ntfs_i(inode);
354 u64 from = ((u64)vma->vm_pgoff << PAGE_SHIFT);
355 bool rw = vma->vm_flags & VM_WRITE;
356 int err;
357
358 if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
359 return -EIO;
360
361 if (is_encrypted(ni)) {
362 ntfs_inode_warn(inode, "mmap encrypted not supported");
363 return -EOPNOTSUPP;
364 }
365
366 if (is_dedup(ni)) {
367 ntfs_inode_warn(inode, "mmap deduplicated not supported");
368 return -EOPNOTSUPP;
369 }
370
371 if (is_compressed(ni) && rw) {
372 ntfs_inode_warn(inode, "mmap(write) compressed not supported");
373 return -EOPNOTSUPP;
374 }
375
376 if (rw) {
377 u64 to = min_t(loff_t, i_size_read(inode),
378 from + vma->vm_end - vma->vm_start);
379
380 if (is_sparsed(ni)) {
381 /* Allocate clusters for rw map. */
382 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
383 CLST lcn, len;
384 CLST vcn = from >> sbi->cluster_bits;
385 CLST end = bytes_to_cluster(sbi, to);
386 bool new;
387
388 for (; vcn < end; vcn += len) {
389 err = attr_data_get_block(ni, vcn, 1, &lcn,
390 &len, &new, true);
391 if (err)
392 goto out;
393 }
394 }
395
396 if (ni->i_valid < to) {
397 inode_lock(inode);
398 err = ntfs_extend_initialized_size(file, ni,
399 ni->i_valid, to);
400 inode_unlock(inode);
401 if (err)
402 goto out;
403 }
404 }
405
406 err = generic_file_mmap(file, vma);
407 out:
408 return err;
409 }
410
ntfs_extend(struct inode * inode,loff_t pos,size_t count,struct file * file)411 static int ntfs_extend(struct inode *inode, loff_t pos, size_t count,
412 struct file *file)
413 {
414 struct ntfs_inode *ni = ntfs_i(inode);
415 struct address_space *mapping = inode->i_mapping;
416 loff_t end = pos + count;
417 bool extend_init = file && pos > ni->i_valid;
418 int err;
419
420 if (end <= inode->i_size && !extend_init)
421 return 0;
422
423 /* Mark rw ntfs as dirty. It will be cleared at umount. */
424 ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY);
425
426 if (end > inode->i_size) {
427 err = ntfs_set_size(inode, end);
428 if (err)
429 goto out;
430 }
431
432 if (extend_init && !is_compressed(ni)) {
433 err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos);
434 if (err)
435 goto out;
436 } else {
437 err = 0;
438 }
439
440 if (file && is_sparsed(ni)) {
441 /*
442 * This code optimizes large writes to sparse file.
443 * TODO: merge this fragment with fallocate fragment.
444 */
445 struct ntfs_sb_info *sbi = ni->mi.sbi;
446 CLST vcn = pos >> sbi->cluster_bits;
447 CLST cend = bytes_to_cluster(sbi, end);
448 CLST cend_v = bytes_to_cluster(sbi, ni->i_valid);
449 CLST lcn, clen;
450 bool new;
451
452 if (cend_v > cend)
453 cend_v = cend;
454
455 /*
456 * Allocate and zero new clusters.
457 * Zeroing these clusters may be too long.
458 */
459 for (; vcn < cend_v; vcn += clen) {
460 err = attr_data_get_block(ni, vcn, cend_v - vcn, &lcn,
461 &clen, &new, true);
462 if (err)
463 goto out;
464 }
465 /*
466 * Allocate but not zero new clusters.
467 */
468 for (; vcn < cend; vcn += clen) {
469 err = attr_data_get_block(ni, vcn, cend - vcn, &lcn,
470 &clen, &new, false);
471 if (err)
472 goto out;
473 }
474 }
475
476 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
477 mark_inode_dirty(inode);
478
479 if (IS_SYNC(inode)) {
480 int err2;
481
482 err = filemap_fdatawrite_range(mapping, pos, end - 1);
483 err2 = sync_mapping_buffers(mapping);
484 if (!err)
485 err = err2;
486 err2 = write_inode_now(inode, 1);
487 if (!err)
488 err = err2;
489 if (!err)
490 err = filemap_fdatawait_range(mapping, pos, end - 1);
491 }
492
493 out:
494 return err;
495 }
496
ntfs_truncate(struct inode * inode,loff_t new_size)497 static int ntfs_truncate(struct inode *inode, loff_t new_size)
498 {
499 struct super_block *sb = inode->i_sb;
500 struct ntfs_inode *ni = ntfs_i(inode);
501 int err, dirty = 0;
502 u64 new_valid;
503
504 if (!S_ISREG(inode->i_mode))
505 return 0;
506
507 if (is_compressed(ni)) {
508 if (ni->i_valid > new_size)
509 ni->i_valid = new_size;
510 } else {
511 err = block_truncate_page(inode->i_mapping, new_size,
512 ntfs_get_block);
513 if (err)
514 return err;
515 }
516
517 new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size));
518
519 truncate_setsize(inode, new_size);
520
521 ni_lock(ni);
522
523 down_write(&ni->file.run_lock);
524 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
525 &new_valid, ni->mi.sbi->options->prealloc, NULL);
526 up_write(&ni->file.run_lock);
527
528 if (new_valid < ni->i_valid)
529 ni->i_valid = new_valid;
530
531 ni_unlock(ni);
532
533 ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
534 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
535 if (!IS_DIRSYNC(inode)) {
536 dirty = 1;
537 } else {
538 err = ntfs_sync_inode(inode);
539 if (err)
540 return err;
541 }
542
543 if (dirty)
544 mark_inode_dirty(inode);
545
546 /*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/
547
548 return 0;
549 }
550
551 /*
552 * ntfs_fallocate - file_operations::ntfs_fallocate
553 *
554 * Preallocate space for a file. This implements ntfs's fallocate file
555 * operation, which gets called from sys_fallocate system call. User
556 * space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set
557 * we just allocate clusters without zeroing them out. Otherwise we
558 * allocate and zero out clusters via an expanding truncate.
559 */
ntfs_fallocate(struct file * file,int mode,loff_t vbo,loff_t len)560 static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
561 {
562 struct inode *inode = file_inode(file);
563 struct address_space *mapping = inode->i_mapping;
564 struct super_block *sb = inode->i_sb;
565 struct ntfs_sb_info *sbi = sb->s_fs_info;
566 struct ntfs_inode *ni = ntfs_i(inode);
567 loff_t end = vbo + len;
568 loff_t vbo_down = round_down(vbo, max_t(unsigned long,
569 sbi->cluster_size, PAGE_SIZE));
570 bool is_supported_holes = is_sparsed(ni) || is_compressed(ni);
571 loff_t i_size, new_size;
572 bool map_locked;
573 int err;
574
575 /* No support for dir. */
576 if (!S_ISREG(inode->i_mode))
577 return -EOPNOTSUPP;
578
579 /*
580 * vfs_fallocate checks all possible combinations of mode.
581 * Do additional checks here before ntfs_set_state(dirty).
582 */
583 if (mode & FALLOC_FL_PUNCH_HOLE) {
584 if (!is_supported_holes)
585 return -EOPNOTSUPP;
586 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
587 } else if (mode & FALLOC_FL_INSERT_RANGE) {
588 if (!is_supported_holes)
589 return -EOPNOTSUPP;
590 } else if (mode &
591 ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
592 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)) {
593 ntfs_inode_warn(inode, "fallocate(0x%x) is not supported",
594 mode);
595 return -EOPNOTSUPP;
596 }
597
598 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
599
600 inode_lock(inode);
601 i_size = inode->i_size;
602 new_size = max(end, i_size);
603 map_locked = false;
604
605 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
606 /* Should never be here, see ntfs_file_open. */
607 err = -EOPNOTSUPP;
608 goto out;
609 }
610
611 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
612 FALLOC_FL_INSERT_RANGE)) {
613 inode_dio_wait(inode);
614 filemap_invalidate_lock(mapping);
615 map_locked = true;
616 }
617
618 if (mode & FALLOC_FL_PUNCH_HOLE) {
619 u32 frame_size;
620 loff_t mask, vbo_a, end_a, tmp;
621
622 err = filemap_write_and_wait_range(mapping, vbo_down,
623 LLONG_MAX);
624 if (err)
625 goto out;
626
627 truncate_pagecache(inode, vbo_down);
628
629 ni_lock(ni);
630 err = attr_punch_hole(ni, vbo, len, &frame_size);
631 ni_unlock(ni);
632 if (!err)
633 goto ok;
634
635 if (err != E_NTFS_NOTALIGNED)
636 goto out;
637
638 /* Process not aligned punch. */
639 err = 0;
640 mask = frame_size - 1;
641 vbo_a = (vbo + mask) & ~mask;
642 end_a = end & ~mask;
643
644 tmp = min(vbo_a, end);
645 if (tmp > vbo) {
646 err = ntfs_zero_range(inode, vbo, tmp);
647 if (err)
648 goto out;
649 }
650
651 if (vbo < end_a && end_a < end) {
652 err = ntfs_zero_range(inode, end_a, end);
653 if (err)
654 goto out;
655 }
656
657 /* Aligned punch_hole */
658 if (end_a > vbo_a) {
659 ni_lock(ni);
660 err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL);
661 ni_unlock(ni);
662 if (err)
663 goto out;
664 }
665 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
666 /*
667 * Write tail of the last page before removed range since
668 * it will get removed from the page cache below.
669 */
670 err = filemap_write_and_wait_range(mapping, vbo_down, vbo);
671 if (err)
672 goto out;
673
674 /*
675 * Write data that will be shifted to preserve them
676 * when discarding page cache below.
677 */
678 err = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
679 if (err)
680 goto out;
681
682 truncate_pagecache(inode, vbo_down);
683
684 ni_lock(ni);
685 err = attr_collapse_range(ni, vbo, len);
686 ni_unlock(ni);
687 if (err)
688 goto out;
689 } else if (mode & FALLOC_FL_INSERT_RANGE) {
690 /* Check new size. */
691 err = inode_newsize_ok(inode, new_size);
692 if (err)
693 goto out;
694
695 /* Write out all dirty pages. */
696 err = filemap_write_and_wait_range(mapping, vbo_down,
697 LLONG_MAX);
698 if (err)
699 goto out;
700 truncate_pagecache(inode, vbo_down);
701
702 ni_lock(ni);
703 err = attr_insert_range(ni, vbo, len);
704 ni_unlock(ni);
705 if (err)
706 goto out;
707 } else {
708 /* Check new size. */
709 u8 cluster_bits = sbi->cluster_bits;
710
711 /* Be sure file is non resident. */
712 if (is_resident(ni)) {
713 ni_lock(ni);
714 err = attr_force_nonresident(ni);
715 ni_unlock(ni);
716 if (err)
717 goto out;
718 }
719
720 /* generic/213: expected -ENOSPC instead of -EFBIG. */
721 if (!is_supported_holes) {
722 loff_t to_alloc = new_size - inode_get_bytes(inode);
723
724 if (to_alloc > 0 &&
725 (to_alloc >> cluster_bits) >
726 wnd_zeroes(&sbi->used.bitmap)) {
727 err = -ENOSPC;
728 goto out;
729 }
730 }
731
732 err = inode_newsize_ok(inode, new_size);
733 if (err)
734 goto out;
735
736 if (new_size > i_size) {
737 /*
738 * Allocate clusters, do not change 'valid' size.
739 */
740 err = ntfs_set_size(inode, new_size);
741 if (err)
742 goto out;
743 }
744
745 if (is_supported_holes) {
746 CLST vcn = vbo >> cluster_bits;
747 CLST cend = bytes_to_cluster(sbi, end);
748 CLST cend_v = bytes_to_cluster(sbi, ni->i_valid);
749 CLST lcn, clen;
750 bool new;
751
752 if (cend_v > cend)
753 cend_v = cend;
754
755 /*
756 * Allocate and zero new clusters.
757 * Zeroing these clusters may be too long.
758 */
759 for (; vcn < cend_v; vcn += clen) {
760 err = attr_data_get_block(ni, vcn, cend_v - vcn,
761 &lcn, &clen, &new,
762 true);
763 if (err)
764 goto out;
765 }
766 /*
767 * Allocate but not zero new clusters.
768 */
769 for (; vcn < cend; vcn += clen) {
770 err = attr_data_get_block(ni, vcn, cend - vcn,
771 &lcn, &clen, &new,
772 false);
773 if (err)
774 goto out;
775 }
776 }
777
778 if (mode & FALLOC_FL_KEEP_SIZE) {
779 ni_lock(ni);
780 /* True - Keep preallocated. */
781 err = attr_set_size(ni, ATTR_DATA, NULL, 0,
782 &ni->file.run, i_size, &ni->i_valid,
783 true, NULL);
784 ni_unlock(ni);
785 if (err)
786 goto out;
787 } else if (new_size > i_size) {
788 i_size_write(inode, new_size);
789 }
790 }
791
792 ok:
793 err = file_modified(file);
794 if (err)
795 goto out;
796
797 out:
798 if (map_locked)
799 filemap_invalidate_unlock(mapping);
800
801 if (!err) {
802 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
803 mark_inode_dirty(inode);
804 }
805
806 inode_unlock(inode);
807 return err;
808 }
809
810 /*
811 * ntfs_setattr - inode_operations::setattr
812 */
ntfs_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)813 int ntfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
814 struct iattr *attr)
815 {
816 struct inode *inode = d_inode(dentry);
817 struct ntfs_inode *ni = ntfs_i(inode);
818 u32 ia_valid = attr->ia_valid;
819 umode_t mode = inode->i_mode;
820 int err;
821
822 if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
823 return -EIO;
824
825 err = setattr_prepare(idmap, dentry, attr);
826 if (err)
827 goto out;
828
829 if (ia_valid & ATTR_SIZE) {
830 loff_t newsize, oldsize;
831
832 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
833 /* Should never be here, see ntfs_file_open(). */
834 err = -EOPNOTSUPP;
835 goto out;
836 }
837 inode_dio_wait(inode);
838 oldsize = i_size_read(inode);
839 newsize = attr->ia_size;
840
841 if (newsize <= oldsize)
842 err = ntfs_truncate(inode, newsize);
843 else
844 err = ntfs_extend(inode, newsize, 0, NULL);
845
846 if (err)
847 goto out;
848
849 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
850 i_size_write(inode, newsize);
851 }
852
853 setattr_copy(idmap, inode, attr);
854
855 if (mode != inode->i_mode) {
856 err = ntfs_acl_chmod(idmap, dentry);
857 if (err)
858 goto out;
859
860 /* Linux 'w' -> Windows 'ro'. */
861 if (0222 & inode->i_mode)
862 ni->std_fa &= ~FILE_ATTRIBUTE_READONLY;
863 else
864 ni->std_fa |= FILE_ATTRIBUTE_READONLY;
865 }
866
867 if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE))
868 ntfs_save_wsl_perm(inode, NULL);
869 mark_inode_dirty(inode);
870 out:
871 return err;
872 }
873
874 /*
875 * check_read_restriction:
876 * common code for ntfs_file_read_iter and ntfs_file_splice_read
877 */
check_read_restriction(struct inode * inode)878 static int check_read_restriction(struct inode *inode)
879 {
880 struct ntfs_inode *ni = ntfs_i(inode);
881
882 if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
883 return -EIO;
884
885 if (is_encrypted(ni)) {
886 ntfs_inode_warn(inode, "encrypted i/o not supported");
887 return -EOPNOTSUPP;
888 }
889
890 #ifndef CONFIG_NTFS3_LZX_XPRESS
891 if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
892 ntfs_inode_warn(
893 inode,
894 "activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files");
895 return -EOPNOTSUPP;
896 }
897 #endif
898
899 if (is_dedup(ni)) {
900 ntfs_inode_warn(inode, "read deduplicated not supported");
901 return -EOPNOTSUPP;
902 }
903
904 return 0;
905 }
906
907 /*
908 * ntfs_file_read_iter - file_operations::read_iter
909 */
ntfs_file_read_iter(struct kiocb * iocb,struct iov_iter * iter)910 static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
911 {
912 struct file *file = iocb->ki_filp;
913 struct inode *inode = file_inode(file);
914 struct ntfs_inode *ni = ntfs_i(inode);
915 ssize_t err;
916
917 err = check_read_restriction(inode);
918 if (err)
919 return err;
920
921 if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
922 ntfs_inode_warn(inode, "direct i/o + compressed not supported");
923 return -EOPNOTSUPP;
924 }
925
926 return generic_file_read_iter(iocb, iter);
927 }
928
929 /*
930 * ntfs_file_splice_read - file_operations::splice_read
931 */
ntfs_file_splice_read(struct file * in,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)932 static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos,
933 struct pipe_inode_info *pipe, size_t len,
934 unsigned int flags)
935 {
936 struct inode *inode = file_inode(in);
937 ssize_t err;
938
939 err = check_read_restriction(inode);
940 if (err)
941 return err;
942
943 return filemap_splice_read(in, ppos, pipe, len, flags);
944 }
945
946 /*
947 * ntfs_get_frame_pages
948 *
949 * Return: Array of locked pages.
950 */
ntfs_get_frame_pages(struct address_space * mapping,pgoff_t index,struct page ** pages,u32 pages_per_frame,bool * frame_uptodate)951 static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index,
952 struct page **pages, u32 pages_per_frame,
953 bool *frame_uptodate)
954 {
955 gfp_t gfp_mask = mapping_gfp_mask(mapping);
956 u32 npages;
957
958 *frame_uptodate = true;
959
960 for (npages = 0; npages < pages_per_frame; npages++, index++) {
961 struct folio *folio;
962
963 folio = __filemap_get_folio(mapping, index,
964 FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
965 gfp_mask);
966 if (IS_ERR(folio)) {
967 while (npages--) {
968 folio = page_folio(pages[npages]);
969 folio_unlock(folio);
970 folio_put(folio);
971 }
972
973 return -ENOMEM;
974 }
975
976 if (!folio_test_uptodate(folio))
977 *frame_uptodate = false;
978
979 pages[npages] = &folio->page;
980 }
981
982 return 0;
983 }
984
985 /*
986 * ntfs_compress_write - Helper for ntfs_file_write_iter() (compressed files).
987 */
ntfs_compress_write(struct kiocb * iocb,struct iov_iter * from)988 static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
989 {
990 int err;
991 struct file *file = iocb->ki_filp;
992 size_t count = iov_iter_count(from);
993 loff_t pos = iocb->ki_pos;
994 struct inode *inode = file_inode(file);
995 loff_t i_size = i_size_read(inode);
996 struct address_space *mapping = inode->i_mapping;
997 struct ntfs_inode *ni = ntfs_i(inode);
998 u64 valid = ni->i_valid;
999 struct ntfs_sb_info *sbi = ni->mi.sbi;
1000 struct page *page, **pages = NULL;
1001 size_t written = 0;
1002 u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
1003 u32 frame_size = 1u << frame_bits;
1004 u32 pages_per_frame = frame_size >> PAGE_SHIFT;
1005 u32 ip, off;
1006 CLST frame;
1007 u64 frame_vbo;
1008 pgoff_t index;
1009 bool frame_uptodate;
1010 struct folio *folio;
1011
1012 if (frame_size < PAGE_SIZE) {
1013 /*
1014 * frame_size == 8K if cluster 512
1015 * frame_size == 64K if cluster 4096
1016 */
1017 ntfs_inode_warn(inode, "page size is bigger than frame size");
1018 return -EOPNOTSUPP;
1019 }
1020
1021 pages = kmalloc_array(pages_per_frame, sizeof(struct page *), GFP_NOFS);
1022 if (!pages)
1023 return -ENOMEM;
1024
1025 err = file_remove_privs(file);
1026 if (err)
1027 goto out;
1028
1029 err = file_update_time(file);
1030 if (err)
1031 goto out;
1032
1033 /* Zero range [valid : pos). */
1034 while (valid < pos) {
1035 CLST lcn, clen;
1036
1037 frame = valid >> frame_bits;
1038 frame_vbo = valid & ~(frame_size - 1);
1039 off = valid & (frame_size - 1);
1040
1041 err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 1, &lcn,
1042 &clen, NULL, false);
1043 if (err)
1044 goto out;
1045
1046 if (lcn == SPARSE_LCN) {
1047 ni->i_valid = valid =
1048 frame_vbo + ((u64)clen << sbi->cluster_bits);
1049 continue;
1050 }
1051
1052 /* Load full frame. */
1053 err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT,
1054 pages, pages_per_frame,
1055 &frame_uptodate);
1056 if (err)
1057 goto out;
1058
1059 if (!frame_uptodate && off) {
1060 err = ni_read_frame(ni, frame_vbo, pages,
1061 pages_per_frame);
1062 if (err) {
1063 for (ip = 0; ip < pages_per_frame; ip++) {
1064 page = pages[ip];
1065 folio = page_folio(page);
1066 folio_unlock(folio);
1067 folio_put(folio);
1068 }
1069 goto out;
1070 }
1071 }
1072
1073 ip = off >> PAGE_SHIFT;
1074 off = offset_in_page(valid);
1075 for (; ip < pages_per_frame; ip++, off = 0) {
1076 page = pages[ip];
1077 folio = page_folio(page);
1078 zero_user_segment(page, off, PAGE_SIZE);
1079 flush_dcache_page(page);
1080 folio_mark_uptodate(folio);
1081 }
1082
1083 ni_lock(ni);
1084 err = ni_write_frame(ni, pages, pages_per_frame);
1085 ni_unlock(ni);
1086
1087 for (ip = 0; ip < pages_per_frame; ip++) {
1088 page = pages[ip];
1089 folio = page_folio(page);
1090 folio_mark_uptodate(folio);
1091 folio_unlock(folio);
1092 folio_put(folio);
1093 }
1094
1095 if (err)
1096 goto out;
1097
1098 ni->i_valid = valid = frame_vbo + frame_size;
1099 }
1100
1101 /* Copy user data [pos : pos + count). */
1102 while (count) {
1103 size_t copied, bytes;
1104
1105 off = pos & (frame_size - 1);
1106 bytes = frame_size - off;
1107 if (bytes > count)
1108 bytes = count;
1109
1110 frame_vbo = pos & ~(frame_size - 1);
1111 index = frame_vbo >> PAGE_SHIFT;
1112
1113 if (unlikely(fault_in_iov_iter_readable(from, bytes))) {
1114 err = -EFAULT;
1115 goto out;
1116 }
1117
1118 /* Load full frame. */
1119 err = ntfs_get_frame_pages(mapping, index, pages,
1120 pages_per_frame, &frame_uptodate);
1121 if (err)
1122 goto out;
1123
1124 if (!frame_uptodate) {
1125 loff_t to = pos + bytes;
1126
1127 if (off || (to < i_size && (to & (frame_size - 1)))) {
1128 err = ni_read_frame(ni, frame_vbo, pages,
1129 pages_per_frame);
1130 if (err) {
1131 for (ip = 0; ip < pages_per_frame;
1132 ip++) {
1133 page = pages[ip];
1134 folio = page_folio(page);
1135 folio_unlock(folio);
1136 folio_put(folio);
1137 }
1138 goto out;
1139 }
1140 }
1141 }
1142
1143 WARN_ON(!bytes);
1144 copied = 0;
1145 ip = off >> PAGE_SHIFT;
1146 off = offset_in_page(pos);
1147
1148 /* Copy user data to pages. */
1149 for (;;) {
1150 size_t cp, tail = PAGE_SIZE - off;
1151
1152 page = pages[ip];
1153 cp = copy_page_from_iter_atomic(page, off,
1154 min(tail, bytes), from);
1155 flush_dcache_page(page);
1156
1157 copied += cp;
1158 bytes -= cp;
1159 if (!bytes || !cp)
1160 break;
1161
1162 if (cp < tail) {
1163 off += cp;
1164 } else {
1165 ip++;
1166 off = 0;
1167 }
1168 }
1169
1170 ni_lock(ni);
1171 err = ni_write_frame(ni, pages, pages_per_frame);
1172 ni_unlock(ni);
1173
1174 for (ip = 0; ip < pages_per_frame; ip++) {
1175 page = pages[ip];
1176 ClearPageDirty(page);
1177 folio = page_folio(page);
1178 folio_mark_uptodate(folio);
1179 folio_unlock(folio);
1180 folio_put(folio);
1181 }
1182
1183 if (err)
1184 goto out;
1185
1186 /*
1187 * We can loop for a long time in here. Be nice and allow
1188 * us to schedule out to avoid softlocking if preempt
1189 * is disabled.
1190 */
1191 cond_resched();
1192
1193 pos += copied;
1194 written += copied;
1195
1196 count = iov_iter_count(from);
1197 }
1198
1199 out:
1200 kfree(pages);
1201
1202 if (err < 0)
1203 return err;
1204
1205 iocb->ki_pos += written;
1206 if (iocb->ki_pos > ni->i_valid)
1207 ni->i_valid = iocb->ki_pos;
1208 if (iocb->ki_pos > i_size)
1209 i_size_write(inode, iocb->ki_pos);
1210
1211 return written;
1212 }
1213
1214 /*
1215 * check_write_restriction:
1216 * common code for ntfs_file_write_iter and ntfs_file_splice_write
1217 */
check_write_restriction(struct inode * inode)1218 static int check_write_restriction(struct inode *inode)
1219 {
1220 struct ntfs_inode *ni = ntfs_i(inode);
1221
1222 if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
1223 return -EIO;
1224
1225 if (is_encrypted(ni)) {
1226 ntfs_inode_warn(inode, "encrypted i/o not supported");
1227 return -EOPNOTSUPP;
1228 }
1229
1230 if (is_dedup(ni)) {
1231 ntfs_inode_warn(inode, "write into deduplicated not supported");
1232 return -EOPNOTSUPP;
1233 }
1234
1235 return 0;
1236 }
1237
1238 /*
1239 * ntfs_file_write_iter - file_operations::write_iter
1240 */
ntfs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)1241 static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1242 {
1243 struct file *file = iocb->ki_filp;
1244 struct inode *inode = file_inode(file);
1245 struct ntfs_inode *ni = ntfs_i(inode);
1246 ssize_t ret;
1247 int err;
1248
1249 err = check_write_restriction(inode);
1250 if (err)
1251 return err;
1252
1253 if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
1254 ntfs_inode_warn(inode, "direct i/o + compressed not supported");
1255 return -EOPNOTSUPP;
1256 }
1257
1258 if (!inode_trylock(inode)) {
1259 if (iocb->ki_flags & IOCB_NOWAIT)
1260 return -EAGAIN;
1261 inode_lock(inode);
1262 }
1263
1264 ret = generic_write_checks(iocb, from);
1265 if (ret <= 0)
1266 goto out;
1267
1268 err = file_modified(iocb->ki_filp);
1269 if (err) {
1270 ret = err;
1271 goto out;
1272 }
1273
1274 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
1275 /* Should never be here, see ntfs_file_open(). */
1276 ret = -EOPNOTSUPP;
1277 goto out;
1278 }
1279
1280 ret = ntfs_extend(inode, iocb->ki_pos, ret, file);
1281 if (ret)
1282 goto out;
1283
1284 ret = is_compressed(ni) ? ntfs_compress_write(iocb, from) :
1285 __generic_file_write_iter(iocb, from);
1286
1287 out:
1288 inode_unlock(inode);
1289
1290 if (ret > 0)
1291 ret = generic_write_sync(iocb, ret);
1292
1293 return ret;
1294 }
1295
1296 /*
1297 * ntfs_file_open - file_operations::open
1298 */
ntfs_file_open(struct inode * inode,struct file * file)1299 int ntfs_file_open(struct inode *inode, struct file *file)
1300 {
1301 struct ntfs_inode *ni = ntfs_i(inode);
1302
1303 if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
1304 return -EIO;
1305
1306 if (unlikely((is_compressed(ni) || is_encrypted(ni)) &&
1307 (file->f_flags & O_DIRECT))) {
1308 return -EOPNOTSUPP;
1309 }
1310
1311 /* Decompress "external compressed" file if opened for rw. */
1312 if ((ni->ni_flags & NI_FLAG_COMPRESSED_MASK) &&
1313 (file->f_flags & (O_WRONLY | O_RDWR | O_TRUNC))) {
1314 #ifdef CONFIG_NTFS3_LZX_XPRESS
1315 int err = ni_decompress_file(ni);
1316
1317 if (err)
1318 return err;
1319 #else
1320 ntfs_inode_warn(
1321 inode,
1322 "activate CONFIG_NTFS3_LZX_XPRESS to write external compressed files");
1323 return -EOPNOTSUPP;
1324 #endif
1325 }
1326
1327 return generic_file_open(inode, file);
1328 }
1329
1330 /*
1331 * ntfs_file_release - file_operations::release
1332 */
ntfs_file_release(struct inode * inode,struct file * file)1333 static int ntfs_file_release(struct inode *inode, struct file *file)
1334 {
1335 struct ntfs_inode *ni = ntfs_i(inode);
1336 struct ntfs_sb_info *sbi = ni->mi.sbi;
1337 int err = 0;
1338
1339 /* If we are last writer on the inode, drop the block reservation. */
1340 if (sbi->options->prealloc &&
1341 ((file->f_mode & FMODE_WRITE) &&
1342 atomic_read(&inode->i_writecount) == 1)
1343 /*
1344 * The only file when inode->i_fop = &ntfs_file_operations and
1345 * init_rwsem(&ni->file.run_lock) is not called explicitly is MFT.
1346 *
1347 * Add additional check here.
1348 */
1349 && inode->i_ino != MFT_REC_MFT) {
1350 ni_lock(ni);
1351 down_write(&ni->file.run_lock);
1352
1353 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
1354 i_size_read(inode), &ni->i_valid, false,
1355 NULL);
1356
1357 up_write(&ni->file.run_lock);
1358 ni_unlock(ni);
1359 }
1360 return err;
1361 }
1362
1363 /*
1364 * ntfs_fiemap - inode_operations::fiemap
1365 */
ntfs_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,__u64 start,__u64 len)1366 int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1367 __u64 start, __u64 len)
1368 {
1369 int err;
1370 struct ntfs_inode *ni = ntfs_i(inode);
1371
1372 err = fiemap_prep(inode, fieinfo, start, &len, ~FIEMAP_FLAG_XATTR);
1373 if (err)
1374 return err;
1375
1376 ni_lock(ni);
1377
1378 err = ni_fiemap(ni, fieinfo, start, len);
1379
1380 ni_unlock(ni);
1381
1382 return err;
1383 }
1384
1385 /*
1386 * ntfs_file_splice_write - file_operations::splice_write
1387 */
ntfs_file_splice_write(struct pipe_inode_info * pipe,struct file * file,loff_t * ppos,size_t len,unsigned int flags)1388 static ssize_t ntfs_file_splice_write(struct pipe_inode_info *pipe,
1389 struct file *file, loff_t *ppos,
1390 size_t len, unsigned int flags)
1391 {
1392 ssize_t err;
1393 struct inode *inode = file_inode(file);
1394
1395 err = check_write_restriction(inode);
1396 if (err)
1397 return err;
1398
1399 return iter_file_splice_write(pipe, file, ppos, len, flags);
1400 }
1401
1402 // clang-format off
1403 const struct inode_operations ntfs_file_inode_operations = {
1404 .getattr = ntfs_getattr,
1405 .setattr = ntfs_setattr,
1406 .listxattr = ntfs_listxattr,
1407 .get_acl = ntfs_get_acl,
1408 .set_acl = ntfs_set_acl,
1409 .fiemap = ntfs_fiemap,
1410 .fileattr_get = ntfs_fileattr_get,
1411 .fileattr_set = ntfs_fileattr_set,
1412 };
1413
1414 const struct file_operations ntfs_file_operations = {
1415 .llseek = generic_file_llseek,
1416 .read_iter = ntfs_file_read_iter,
1417 .write_iter = ntfs_file_write_iter,
1418 .unlocked_ioctl = ntfs_ioctl,
1419 #ifdef CONFIG_COMPAT
1420 .compat_ioctl = ntfs_compat_ioctl,
1421 #endif
1422 .splice_read = ntfs_file_splice_read,
1423 .splice_write = ntfs_file_splice_write,
1424 .mmap = ntfs_file_mmap,
1425 .open = ntfs_file_open,
1426 .fsync = generic_file_fsync,
1427 .fallocate = ntfs_fallocate,
1428 .release = ntfs_file_release,
1429 };
1430
1431 #if IS_ENABLED(CONFIG_NTFS_FS)
1432 const struct file_operations ntfs_legacy_file_operations = {
1433 .llseek = generic_file_llseek,
1434 .read_iter = ntfs_file_read_iter,
1435 .splice_read = ntfs_file_splice_read,
1436 .open = ntfs_file_open,
1437 .release = ntfs_file_release,
1438 };
1439 #endif
1440 // clang-format on
1441