]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2010 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <sys/systm.h> | |
30 | #include <sys/param.h> | |
31 | #include <sys/kernel.h> | |
32 | #include <sys/file_internal.h> | |
33 | #include <sys/dirent.h> | |
34 | #include <sys/stat.h> | |
35 | #include <sys/buf.h> | |
36 | #include <sys/mount.h> | |
37 | #include <sys/vnode_if.h> | |
38 | #include <sys/vnode_internal.h> | |
39 | #include <sys/malloc.h> | |
40 | #include <sys/ubc.h> | |
41 | #include <sys/ubc_internal.h> | |
42 | #include <sys/paths.h> | |
43 | #include <sys/quota.h> | |
44 | #include <sys/time.h> | |
45 | #include <sys/disk.h> | |
46 | #include <sys/kauth.h> | |
47 | #include <sys/uio_internal.h> | |
48 | #include <sys/fsctl.h> | |
49 | #include <sys/cprotect.h> | |
50 | ||
51 | #include <string.h> | |
52 | ||
53 | #include <miscfs/specfs/specdev.h> | |
54 | #include <miscfs/fifofs/fifo.h> | |
55 | #include <vfs/vfs_support.h> | |
56 | #include <machine/spl.h> | |
57 | ||
58 | #include <sys/kdebug.h> | |
59 | #include <sys/sysctl.h> | |
60 | ||
61 | #include "hfs.h" | |
62 | #include "hfs_catalog.h" | |
63 | #include "hfs_cnode.h" | |
64 | #include "hfs_dbg.h" | |
65 | #include "hfs_mount.h" | |
66 | #include "hfs_quota.h" | |
67 | #include "hfs_endian.h" | |
68 | ||
69 | #include "hfscommon/headers/BTreesInternal.h" | |
70 | #include "hfscommon/headers/FileMgrInternal.h" | |
71 | ||
72 | #define KNDETACH_VNLOCKED 0x00000001 | |
73 | ||
74 | /* Global vfs data structures for hfs */ | |
75 | ||
76 | /* Always F_FULLFSYNC? 1=yes,0=no (default due to "various" reasons is 'no') */ | |
77 | int always_do_fullfsync = 0; | |
78 | SYSCTL_DECL(_vfs_generic); | |
79 | SYSCTL_INT (_vfs_generic, OID_AUTO, always_do_fullfsync, CTLFLAG_RW | CTLFLAG_LOCKED, &always_do_fullfsync, 0, "always F_FULLFSYNC when fsync is called"); | |
80 | ||
81 | int hfs_makenode(struct vnode *dvp, struct vnode **vpp, | |
82 | struct componentname *cnp, struct vnode_attr *vap, | |
83 | vfs_context_t ctx); | |
84 | int hfs_metasync(struct hfsmount *hfsmp, daddr64_t node, __unused struct proc *p); | |
85 | int hfs_metasync_all(struct hfsmount *hfsmp); | |
86 | ||
87 | int hfs_removedir(struct vnode *, struct vnode *, struct componentname *, | |
88 | int, int); | |
89 | int hfs_removefile(struct vnode *, struct vnode *, struct componentname *, | |
90 | int, int, int, struct vnode *, int); | |
91 | ||
92 | int hfs_movedata (struct vnode *, struct vnode*); | |
93 | static int hfs_move_fork (struct filefork *srcfork, struct cnode *src, | |
94 | struct filefork *dstfork, struct cnode *dst); | |
95 | ||
96 | ||
97 | #if FIFO | |
98 | static int hfsfifo_read(struct vnop_read_args *); | |
99 | static int hfsfifo_write(struct vnop_write_args *); | |
100 | static int hfsfifo_close(struct vnop_close_args *); | |
101 | ||
102 | extern int (**fifo_vnodeop_p)(void *); | |
103 | #endif /* FIFO */ | |
104 | ||
105 | int hfs_vnop_close(struct vnop_close_args*); | |
106 | int hfs_vnop_create(struct vnop_create_args*); | |
107 | int hfs_vnop_exchange(struct vnop_exchange_args*); | |
108 | int hfs_vnop_fsync(struct vnop_fsync_args*); | |
109 | int hfs_vnop_mkdir(struct vnop_mkdir_args*); | |
110 | int hfs_vnop_mknod(struct vnop_mknod_args*); | |
111 | int hfs_vnop_getattr(struct vnop_getattr_args*); | |
112 | int hfs_vnop_open(struct vnop_open_args*); | |
113 | int hfs_vnop_readdir(struct vnop_readdir_args*); | |
114 | int hfs_vnop_remove(struct vnop_remove_args*); | |
115 | int hfs_vnop_rename(struct vnop_rename_args*); | |
116 | int hfs_vnop_rmdir(struct vnop_rmdir_args*); | |
117 | int hfs_vnop_symlink(struct vnop_symlink_args*); | |
118 | int hfs_vnop_setattr(struct vnop_setattr_args*); | |
119 | int hfs_vnop_readlink(struct vnop_readlink_args *); | |
120 | int hfs_vnop_pathconf(struct vnop_pathconf_args *); | |
121 | int hfs_vnop_whiteout(struct vnop_whiteout_args *); | |
122 | int hfs_vnop_mmap(struct vnop_mmap_args *ap); | |
123 | int hfsspec_read(struct vnop_read_args *); | |
124 | int hfsspec_write(struct vnop_write_args *); | |
125 | int hfsspec_close(struct vnop_close_args *); | |
126 | ||
127 | /* Options for hfs_removedir and hfs_removefile */ | |
128 | #define HFSRM_SKIP_RESERVE 0x01 | |
129 | ||
130 | ||
131 | ||
132 | ||
133 | /***************************************************************************** | |
134 | * | |
135 | * Common Operations on vnodes | |
136 | * | |
137 | *****************************************************************************/ | |
138 | ||
139 | /* | |
140 | * Create a regular file. | |
141 | */ | |
142 | int | |
143 | hfs_vnop_create(struct vnop_create_args *ap) | |
144 | { | |
145 | int error; | |
146 | ||
147 | again: | |
148 | error = hfs_makenode(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap, ap->a_context); | |
149 | ||
150 | /* | |
151 | * We speculatively skipped the original lookup of the leaf | |
152 | * for CREATE. Since it exists, go get it as long as they | |
153 | * didn't want an exclusive create. | |
154 | */ | |
155 | if ((error == EEXIST) && !(ap->a_vap->va_vaflags & VA_EXCLUSIVE)) { | |
156 | struct vnop_lookup_args args; | |
157 | ||
158 | args.a_desc = &vnop_lookup_desc; | |
159 | args.a_dvp = ap->a_dvp; | |
160 | args.a_vpp = ap->a_vpp; | |
161 | args.a_cnp = ap->a_cnp; | |
162 | args.a_context = ap->a_context; | |
163 | args.a_cnp->cn_nameiop = LOOKUP; | |
164 | error = hfs_vnop_lookup(&args); | |
165 | /* | |
166 | * We can also race with remove for this file. | |
167 | */ | |
168 | if (error == ENOENT) { | |
169 | goto again; | |
170 | } | |
171 | ||
172 | /* Make sure it was file. */ | |
173 | if ((error == 0) && !vnode_isreg(*args.a_vpp)) { | |
174 | vnode_put(*args.a_vpp); | |
175 | *args.a_vpp = NULLVP; | |
176 | error = EEXIST; | |
177 | } | |
178 | args.a_cnp->cn_nameiop = CREATE; | |
179 | } | |
180 | return (error); | |
181 | } | |
182 | ||
183 | /* | |
184 | * Make device special file. | |
185 | */ | |
186 | int | |
187 | hfs_vnop_mknod(struct vnop_mknod_args *ap) | |
188 | { | |
189 | struct vnode_attr *vap = ap->a_vap; | |
190 | struct vnode *dvp = ap->a_dvp; | |
191 | struct vnode **vpp = ap->a_vpp; | |
192 | struct cnode *cp; | |
193 | int error; | |
194 | ||
195 | if (VTOVCB(dvp)->vcbSigWord != kHFSPlusSigWord) { | |
196 | return (ENOTSUP); | |
197 | } | |
198 | ||
199 | /* Create the vnode */ | |
200 | error = hfs_makenode(dvp, vpp, ap->a_cnp, vap, ap->a_context); | |
201 | if (error) | |
202 | return (error); | |
203 | ||
204 | cp = VTOC(*vpp); | |
205 | cp->c_touch_acctime = TRUE; | |
206 | cp->c_touch_chgtime = TRUE; | |
207 | cp->c_touch_modtime = TRUE; | |
208 | ||
209 | if ((vap->va_rdev != VNOVAL) && | |
210 | (vap->va_type == VBLK || vap->va_type == VCHR)) | |
211 | cp->c_rdev = vap->va_rdev; | |
212 | ||
213 | return (0); | |
214 | } | |
215 | ||
216 | #if HFS_COMPRESSION | |
217 | /* | |
218 | * hfs_ref_data_vp(): returns the data fork vnode for a given cnode. | |
219 | * In the (hopefully rare) case where the data fork vnode is not | |
220 | * present, it will use hfs_vget() to create a new vnode for the | |
221 | * data fork. | |
222 | * | |
223 | * NOTE: If successful and a vnode is returned, the caller is responsible | |
224 | * for releasing the returned vnode with vnode_rele(). | |
225 | */ | |
226 | static int | |
227 | hfs_ref_data_vp(struct cnode *cp, struct vnode **data_vp, int skiplock) | |
228 | { | |
229 | int vref = 0; | |
230 | ||
231 | if (!data_vp || !cp) /* sanity check incoming parameters */ | |
232 | return EINVAL; | |
233 | ||
234 | /* maybe we should take the hfs cnode lock here, and if so, use the skiplock parameter to tell us not to */ | |
235 | ||
236 | if (!skiplock) hfs_lock(cp, HFS_SHARED_LOCK); | |
237 | struct vnode *c_vp = cp->c_vp; | |
238 | if (c_vp) { | |
239 | /* we already have a data vnode */ | |
240 | *data_vp = c_vp; | |
241 | vref = vnode_ref(*data_vp); | |
242 | if (!skiplock) hfs_unlock(cp); | |
243 | if (vref == 0) { | |
244 | return 0; | |
245 | } | |
246 | return EINVAL; | |
247 | } | |
248 | /* no data fork vnode in the cnode, so ask hfs for one. */ | |
249 | ||
250 | if (!cp->c_rsrc_vp) { | |
251 | /* if we don't have either a c_vp or c_rsrc_vp, we can't really do anything useful */ | |
252 | *data_vp = NULL; | |
253 | if (!skiplock) hfs_unlock(cp); | |
254 | return EINVAL; | |
255 | } | |
256 | ||
257 | if (0 == hfs_vget(VTOHFS(cp->c_rsrc_vp), cp->c_cnid, data_vp, 1, 0) && | |
258 | 0 != data_vp) { | |
259 | vref = vnode_ref(*data_vp); | |
260 | vnode_put(*data_vp); | |
261 | if (!skiplock) hfs_unlock(cp); | |
262 | if (vref == 0) { | |
263 | return 0; | |
264 | } | |
265 | return EINVAL; | |
266 | } | |
267 | /* there was an error getting the vnode */ | |
268 | *data_vp = NULL; | |
269 | if (!skiplock) hfs_unlock(cp); | |
270 | return EINVAL; | |
271 | } | |
272 | ||
273 | /* | |
274 | * hfs_lazy_init_decmpfs_cnode(): returns the decmpfs_cnode for a cnode, | |
275 | * allocating it if necessary; returns NULL if there was an allocation error | |
276 | */ | |
277 | static decmpfs_cnode * | |
278 | hfs_lazy_init_decmpfs_cnode(struct cnode *cp) | |
279 | { | |
280 | if (!cp->c_decmp) { | |
281 | decmpfs_cnode *dp = NULL; | |
282 | MALLOC_ZONE(dp, decmpfs_cnode *, sizeof(decmpfs_cnode), M_DECMPFS_CNODE, M_WAITOK); | |
283 | if (!dp) { | |
284 | /* error allocating a decmpfs cnode */ | |
285 | return NULL; | |
286 | } | |
287 | decmpfs_cnode_init(dp); | |
288 | if (!OSCompareAndSwapPtr(NULL, dp, (void * volatile *)&cp->c_decmp)) { | |
289 | /* another thread got here first, so free the decmpfs_cnode we allocated */ | |
290 | decmpfs_cnode_destroy(dp); | |
291 | FREE_ZONE(dp, sizeof(*dp), M_DECMPFS_CNODE); | |
292 | } | |
293 | } | |
294 | ||
295 | return cp->c_decmp; | |
296 | } | |
297 | ||
298 | /* | |
299 | * hfs_file_is_compressed(): returns 1 if the file is compressed, and 0 (zero) if not. | |
300 | * if the file's compressed flag is set, makes sure that the decmpfs_cnode field | |
301 | * is allocated by calling hfs_lazy_init_decmpfs_cnode(), then makes sure it is populated, | |
302 | * or else fills it in via the decmpfs_file_is_compressed() function. | |
303 | */ | |
304 | int | |
305 | hfs_file_is_compressed(struct cnode *cp, int skiplock) | |
306 | { | |
307 | int ret = 0; | |
308 | ||
309 | /* fast check to see if file is compressed. If flag is clear, just answer no */ | |
310 | if (!(cp->c_flags & UF_COMPRESSED)) { | |
311 | return 0; | |
312 | } | |
313 | ||
314 | decmpfs_cnode *dp = hfs_lazy_init_decmpfs_cnode(cp); | |
315 | if (!dp) { | |
316 | /* error allocating a decmpfs cnode, treat the file as uncompressed */ | |
317 | return 0; | |
318 | } | |
319 | ||
320 | /* flag was set, see if the decmpfs_cnode state is valid (zero == invalid) */ | |
321 | uint32_t decmpfs_state = decmpfs_cnode_get_vnode_state(dp); | |
322 | switch(decmpfs_state) { | |
323 | case FILE_IS_COMPRESSED: | |
324 | case FILE_IS_CONVERTING: /* treat decompressing files as if they are compressed */ | |
325 | return 1; | |
326 | case FILE_IS_NOT_COMPRESSED: | |
327 | return 0; | |
328 | /* otherwise the state is not cached yet */ | |
329 | } | |
330 | ||
331 | /* decmpfs hasn't seen this file yet, so call decmpfs_file_is_compressed() to init the decmpfs_cnode struct */ | |
332 | struct vnode *data_vp = NULL; | |
333 | if (0 == hfs_ref_data_vp(cp, &data_vp, skiplock)) { | |
334 | if (data_vp) { | |
335 | ret = decmpfs_file_is_compressed(data_vp, VTOCMP(data_vp)); // fill in decmpfs_cnode | |
336 | vnode_rele(data_vp); | |
337 | } | |
338 | } | |
339 | return ret; | |
340 | } | |
341 | ||
342 | /* hfs_uncompressed_size_of_compressed_file() - get the uncompressed size of the file. | |
343 | * if the caller has passed a valid vnode (has a ref count > 0), then hfsmp and fid are not required. | |
344 | * if the caller doesn't have a vnode, pass NULL in vp, and pass valid hfsmp and fid. | |
345 | * files size is returned in size (required) | |
346 | * if the indicated file is a directory (or something that doesn't have a data fork), then this call | |
347 | * will return an error and the caller should fall back to treating the item as an uncompressed file | |
348 | */ | |
349 | int | |
350 | hfs_uncompressed_size_of_compressed_file(struct hfsmount *hfsmp, struct vnode *vp, cnid_t fid, off_t *size, int skiplock) | |
351 | { | |
352 | int ret = 0; | |
353 | int putaway = 0; /* flag to remember if we used hfs_vget() */ | |
354 | ||
355 | if (!size) { | |
356 | return EINVAL; /* no place to put the file size */ | |
357 | } | |
358 | ||
359 | if (NULL == vp) { | |
360 | if (!hfsmp || !fid) { /* make sure we have the required parameters */ | |
361 | return EINVAL; | |
362 | } | |
363 | if (0 != hfs_vget(hfsmp, fid, &vp, skiplock, 0)) { /* vnode is null, use hfs_vget() to get it */ | |
364 | vp = NULL; | |
365 | } else { | |
366 | putaway = 1; /* note that hfs_vget() was used to aquire the vnode */ | |
367 | } | |
368 | } | |
369 | /* this double check for compression (hfs_file_is_compressed) | |
370 | * ensures the cached size is present in case decmpfs hasn't | |
371 | * encountered this node yet. | |
372 | */ | |
373 | if (vp) { | |
374 | if (hfs_file_is_compressed(VTOC(vp), skiplock) ) { | |
375 | *size = decmpfs_cnode_get_vnode_cached_size(VTOCMP(vp)); /* file info will be cached now, so get size */ | |
376 | } else { | |
377 | if (VTOCMP(vp) && VTOCMP(vp)->cmp_type >= CMP_MAX) { | |
378 | if (VTOCMP(vp)->cmp_type != DATALESS_CMPFS_TYPE) { | |
379 | // if we don't recognize this type, just use the real data fork size | |
380 | if (VTOC(vp)->c_datafork) { | |
381 | *size = VTOC(vp)->c_datafork->ff_size; | |
382 | ret = 0; | |
383 | } else { | |
384 | ret = EINVAL; | |
385 | } | |
386 | } else { | |
387 | *size = decmpfs_cnode_get_vnode_cached_size(VTOCMP(vp)); /* file info will be cached now, so get size */ | |
388 | ret = 0; | |
389 | } | |
390 | } else { | |
391 | ret = EINVAL; | |
392 | } | |
393 | } | |
394 | } | |
395 | ||
396 | if (putaway) { /* did we use hfs_vget() to get this vnode? */ | |
397 | vnode_put(vp); /* if so, release it and set it to null */ | |
398 | vp = NULL; | |
399 | } | |
400 | return ret; | |
401 | } | |
402 | ||
403 | int | |
404 | hfs_hides_rsrc(vfs_context_t ctx, struct cnode *cp, int skiplock) | |
405 | { | |
406 | if (ctx == decmpfs_ctx) | |
407 | return 0; | |
408 | if (!hfs_file_is_compressed(cp, skiplock)) | |
409 | return 0; | |
410 | return decmpfs_hides_rsrc(ctx, cp->c_decmp); | |
411 | } | |
412 | ||
413 | int | |
414 | hfs_hides_xattr(vfs_context_t ctx, struct cnode *cp, const char *name, int skiplock) | |
415 | { | |
416 | if (ctx == decmpfs_ctx) | |
417 | return 0; | |
418 | if (!hfs_file_is_compressed(cp, skiplock)) | |
419 | return 0; | |
420 | return decmpfs_hides_xattr(ctx, cp->c_decmp, name); | |
421 | } | |
422 | #endif /* HFS_COMPRESSION */ | |
423 | ||
424 | /* | |
425 | * Open a file/directory. | |
426 | */ | |
427 | int | |
428 | hfs_vnop_open(struct vnop_open_args *ap) | |
429 | { | |
430 | struct vnode *vp = ap->a_vp; | |
431 | struct filefork *fp; | |
432 | struct timeval tv; | |
433 | int error; | |
434 | static int past_bootup = 0; | |
435 | struct cnode *cp = VTOC(vp); | |
436 | struct hfsmount *hfsmp = VTOHFS(vp); | |
437 | ||
438 | #if HFS_COMPRESSION | |
439 | if (ap->a_mode & FWRITE) { | |
440 | /* open for write */ | |
441 | if ( hfs_file_is_compressed(cp, 1) ) { /* 1 == don't take the cnode lock */ | |
442 | /* opening a compressed file for write, so convert it to decompressed */ | |
443 | struct vnode *data_vp = NULL; | |
444 | error = hfs_ref_data_vp(cp, &data_vp, 1); /* 1 == don't take the cnode lock */ | |
445 | if (0 == error) { | |
446 | if (data_vp) { | |
447 | error = decmpfs_decompress_file(data_vp, VTOCMP(data_vp), -1, 1, 0); | |
448 | vnode_rele(data_vp); | |
449 | } else { | |
450 | error = EINVAL; | |
451 | } | |
452 | } | |
453 | if (error != 0) | |
454 | return error; | |
455 | } | |
456 | } else { | |
457 | /* open for read */ | |
458 | if (hfs_file_is_compressed(cp, 1) ) { /* 1 == don't take the cnode lock */ | |
459 | if (VNODE_IS_RSRC(vp)) { | |
460 | /* opening the resource fork of a compressed file, so nothing to do */ | |
461 | } else { | |
462 | /* opening a compressed file for read, make sure it validates */ | |
463 | error = decmpfs_validate_compressed_file(vp, VTOCMP(vp)); | |
464 | if (error != 0) | |
465 | return error; | |
466 | } | |
467 | } | |
468 | } | |
469 | #endif | |
470 | ||
471 | /* | |
472 | * Files marked append-only must be opened for appending. | |
473 | */ | |
474 | if ((cp->c_flags & APPEND) && !vnode_isdir(vp) && | |
475 | (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE) | |
476 | return (EPERM); | |
477 | ||
478 | if (vnode_isreg(vp) && !UBCINFOEXISTS(vp)) | |
479 | return (EBUSY); /* file is in use by the kernel */ | |
480 | ||
481 | /* Don't allow journal file to be opened externally. */ | |
482 | if (cp->c_fileid == hfsmp->hfs_jnlfileid) | |
483 | return (EPERM); | |
484 | ||
485 | /* If we're going to write to the file, initialize quotas. */ | |
486 | #if QUOTA | |
487 | if ((ap->a_mode & FWRITE) && (hfsmp->hfs_flags & HFS_QUOTAS)) | |
488 | (void)hfs_getinoquota(cp); | |
489 | #endif /* QUOTA */ | |
490 | ||
491 | /* | |
492 | * On the first (non-busy) open of a fragmented | |
493 | * file attempt to de-frag it (if its less than 20MB). | |
494 | */ | |
495 | if ((hfsmp->hfs_flags & HFS_READ_ONLY) || | |
496 | (hfsmp->jnl == NULL) || | |
497 | #if NAMEDSTREAMS | |
498 | !vnode_isreg(vp) || vnode_isinuse(vp, 0) || vnode_isnamedstream(vp)) { | |
499 | #else | |
500 | !vnode_isreg(vp) || vnode_isinuse(vp, 0)) { | |
501 | #endif | |
502 | return (0); | |
503 | } | |
504 | ||
505 | if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK))) | |
506 | return (error); | |
507 | fp = VTOF(vp); | |
508 | if (fp->ff_blocks && | |
509 | fp->ff_extents[7].blockCount != 0 && | |
510 | fp->ff_size <= (20 * 1024 * 1024)) { | |
511 | int no_mods = 0; | |
512 | struct timeval now; | |
513 | /* | |
514 | * Wait until system bootup is done (3 min). | |
515 | * And don't relocate a file that's been modified | |
516 | * within the past minute -- this can lead to | |
517 | * system thrashing. | |
518 | */ | |
519 | ||
520 | if (!past_bootup) { | |
521 | microuptime(&tv); | |
522 | if (tv.tv_sec > (60*3)) { | |
523 | past_bootup = 1; | |
524 | } | |
525 | } | |
526 | ||
527 | microtime(&now); | |
528 | if ((now.tv_sec - cp->c_mtime) > 60) { | |
529 | no_mods = 1; | |
530 | } | |
531 | ||
532 | if (past_bootup && no_mods) { | |
533 | (void) hfs_relocate(vp, hfsmp->nextAllocation + 4096, | |
534 | vfs_context_ucred(ap->a_context), | |
535 | vfs_context_proc(ap->a_context)); | |
536 | } | |
537 | } | |
538 | hfs_unlock(cp); | |
539 | ||
540 | return (0); | |
541 | } | |
542 | ||
543 | ||
544 | /* | |
545 | * Close a file/directory. | |
546 | */ | |
547 | int | |
548 | hfs_vnop_close(ap) | |
549 | struct vnop_close_args /* { | |
550 | struct vnode *a_vp; | |
551 | int a_fflag; | |
552 | vfs_context_t a_context; | |
553 | } */ *ap; | |
554 | { | |
555 | register struct vnode *vp = ap->a_vp; | |
556 | register struct cnode *cp; | |
557 | struct proc *p = vfs_context_proc(ap->a_context); | |
558 | struct hfsmount *hfsmp; | |
559 | int busy; | |
560 | int tooktrunclock = 0; | |
561 | int knownrefs = 0; | |
562 | ||
563 | if ( hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) != 0) | |
564 | return (0); | |
565 | cp = VTOC(vp); | |
566 | hfsmp = VTOHFS(vp); | |
567 | ||
568 | /* | |
569 | * If the rsrc fork is a named stream, it can cause the data fork to | |
570 | * stay around, preventing de-allocation of these blocks. | |
571 | * Do checks for truncation on close. Purge extra extents if they exist. | |
572 | * Make sure the vp is not a directory, and that it has a resource fork, | |
573 | * and that resource fork is also a named stream. | |
574 | */ | |
575 | ||
576 | if ((vp->v_type == VREG) && (cp->c_rsrc_vp) | |
577 | && (vnode_isnamedstream(cp->c_rsrc_vp))) { | |
578 | uint32_t blks; | |
579 | ||
580 | blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize); | |
581 | /* | |
582 | * If there are extra blocks and there are only 2 refs on | |
583 | * this vp (ourselves + rsrc fork holding ref on us), go ahead | |
584 | * and try to truncate. | |
585 | */ | |
586 | if ((blks < VTOF(vp)->ff_blocks) && (!vnode_isinuse(vp, 2))) { | |
587 | // release cnode lock; must acquire truncate lock BEFORE cnode lock | |
588 | hfs_unlock(cp); | |
589 | ||
590 | hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK); | |
591 | tooktrunclock = 1; | |
592 | ||
593 | if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) != 0) { | |
594 | hfs_unlock_truncate(cp, 0); | |
595 | // bail out if we can't re-acquire cnode lock | |
596 | return 0; | |
597 | } | |
598 | // now re-test to make sure it's still valid | |
599 | if (cp->c_rsrc_vp) { | |
600 | knownrefs = 1 + vnode_isnamedstream(cp->c_rsrc_vp); | |
601 | if (!vnode_isinuse(vp, knownrefs)){ | |
602 | // now we can truncate the file, if necessary | |
603 | blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize); | |
604 | if (blks < VTOF(vp)->ff_blocks){ | |
605 | (void) hfs_truncate(vp, VTOF(vp)->ff_size, IO_NDELAY, 0, 0, ap->a_context); | |
606 | } | |
607 | } | |
608 | } | |
609 | } | |
610 | } | |
611 | ||
612 | ||
613 | // if we froze the fs and we're exiting, then "thaw" the fs | |
614 | if (hfsmp->hfs_freezing_proc == p && proc_exiting(p)) { | |
615 | hfsmp->hfs_freezing_proc = NULL; | |
616 | hfs_unlock_global (hfsmp); | |
617 | lck_rw_unlock_exclusive(&hfsmp->hfs_insync); | |
618 | } | |
619 | ||
620 | busy = vnode_isinuse(vp, 1); | |
621 | ||
622 | if (busy) { | |
623 | hfs_touchtimes(VTOHFS(vp), cp); | |
624 | } | |
625 | if (vnode_isdir(vp)) { | |
626 | hfs_reldirhints(cp, busy); | |
627 | } else if (vnode_issystem(vp) && !busy) { | |
628 | vnode_recycle(vp); | |
629 | } | |
630 | ||
631 | if (tooktrunclock){ | |
632 | hfs_unlock_truncate(cp, 0); | |
633 | } | |
634 | hfs_unlock(cp); | |
635 | ||
636 | if (ap->a_fflag & FWASWRITTEN) { | |
637 | hfs_sync_ejectable(hfsmp); | |
638 | } | |
639 | ||
640 | return (0); | |
641 | } | |
642 | ||
643 | /* | |
644 | * Get basic attributes. | |
645 | */ | |
646 | int | |
647 | hfs_vnop_getattr(struct vnop_getattr_args *ap) | |
648 | { | |
649 | #define VNODE_ATTR_TIMES \ | |
650 | (VNODE_ATTR_va_access_time|VNODE_ATTR_va_change_time|VNODE_ATTR_va_modify_time) | |
651 | #define VNODE_ATTR_AUTH \ | |
652 | (VNODE_ATTR_va_mode | VNODE_ATTR_va_uid | VNODE_ATTR_va_gid | \ | |
653 | VNODE_ATTR_va_flags | VNODE_ATTR_va_acl) | |
654 | ||
655 | struct vnode *vp = ap->a_vp; | |
656 | struct vnode_attr *vap = ap->a_vap; | |
657 | struct vnode *rvp = NULLVP; | |
658 | struct hfsmount *hfsmp; | |
659 | struct cnode *cp; | |
660 | uint64_t data_size; | |
661 | enum vtype v_type; | |
662 | int error = 0; | |
663 | cp = VTOC(vp); | |
664 | ||
665 | #if HFS_COMPRESSION | |
666 | /* we need to inspect the decmpfs state of the file before we take the hfs cnode lock */ | |
667 | int compressed = 0; | |
668 | int hide_size = 0; | |
669 | off_t uncompressed_size = -1; | |
670 | if (VATTR_IS_ACTIVE(vap, va_data_size) || VATTR_IS_ACTIVE(vap, va_total_alloc) || VATTR_IS_ACTIVE(vap, va_data_alloc) || VATTR_IS_ACTIVE(vap, va_total_size)) { | |
671 | /* we only care about whether the file is compressed if asked for the uncompressed size */ | |
672 | if (VNODE_IS_RSRC(vp)) { | |
673 | /* if it's a resource fork, decmpfs may want us to hide the size */ | |
674 | hide_size = hfs_hides_rsrc(ap->a_context, cp, 0); | |
675 | } else { | |
676 | /* if it's a data fork, we need to know if it was compressed so we can report the uncompressed size */ | |
677 | compressed = hfs_file_is_compressed(cp, 0); | |
678 | } | |
679 | if ((VATTR_IS_ACTIVE(vap, va_data_size) || VATTR_IS_ACTIVE(vap, va_total_size))) { | |
680 | // if it's compressed | |
681 | if (compressed || (!VNODE_IS_RSRC(vp) && cp->c_decmp && cp->c_decmp->cmp_type >= CMP_MAX)) { | |
682 | if (0 != hfs_uncompressed_size_of_compressed_file(NULL, vp, 0, &uncompressed_size, 0)) { | |
683 | /* failed to get the uncompressed size, we'll check for this later */ | |
684 | uncompressed_size = -1; | |
685 | } else { | |
686 | // fake that it's compressed | |
687 | compressed = 1; | |
688 | } | |
689 | } | |
690 | } | |
691 | } | |
692 | #endif | |
693 | ||
694 | /* | |
695 | * Shortcut for vnode_authorize path. Each of the attributes | |
696 | * in this set is updated atomically so we don't need to take | |
697 | * the cnode lock to access them. | |
698 | */ | |
699 | if ((vap->va_active & ~VNODE_ATTR_AUTH) == 0) { | |
700 | /* Make sure file still exists. */ | |
701 | if (cp->c_flag & C_NOEXISTS) | |
702 | return (ENOENT); | |
703 | ||
704 | vap->va_uid = cp->c_uid; | |
705 | vap->va_gid = cp->c_gid; | |
706 | vap->va_mode = cp->c_mode; | |
707 | vap->va_flags = cp->c_flags; | |
708 | vap->va_supported |= VNODE_ATTR_AUTH & ~VNODE_ATTR_va_acl; | |
709 | ||
710 | if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) { | |
711 | vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE; | |
712 | VATTR_SET_SUPPORTED(vap, va_acl); | |
713 | } | |
714 | ||
715 | return (0); | |
716 | } | |
717 | ||
718 | hfsmp = VTOHFS(vp); | |
719 | v_type = vnode_vtype(vp); | |
720 | /* | |
721 | * If time attributes are requested and we have cnode times | |
722 | * that require updating, then acquire an exclusive lock on | |
723 | * the cnode before updating the times. Otherwise we can | |
724 | * just acquire a shared lock. | |
725 | */ | |
726 | if ((vap->va_active & VNODE_ATTR_TIMES) && | |
727 | (cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime)) { | |
728 | if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK))) | |
729 | return (error); | |
730 | hfs_touchtimes(hfsmp, cp); | |
731 | } | |
732 | else { | |
733 | if ((error = hfs_lock(cp, HFS_SHARED_LOCK))) | |
734 | return (error); | |
735 | } | |
736 | ||
737 | if (v_type == VDIR) { | |
738 | data_size = (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE; | |
739 | ||
740 | if (VATTR_IS_ACTIVE(vap, va_nlink)) { | |
741 | int nlink; | |
742 | ||
743 | /* | |
744 | * For directories, the va_nlink is esentially a count | |
745 | * of the ".." references to a directory plus the "." | |
746 | * reference and the directory itself. So for HFS+ this | |
747 | * becomes the sub-directory count plus two. | |
748 | * | |
749 | * In the absence of a sub-directory count we use the | |
750 | * directory's item count. This will be too high in | |
751 | * most cases since it also includes files. | |
752 | */ | |
753 | if ((hfsmp->hfs_flags & HFS_FOLDERCOUNT) && | |
754 | (cp->c_attr.ca_recflags & kHFSHasFolderCountMask)) | |
755 | nlink = cp->c_attr.ca_dircount; /* implied ".." entries */ | |
756 | else | |
757 | nlink = cp->c_entries; | |
758 | ||
759 | /* Account for ourself and our "." entry */ | |
760 | nlink += 2; | |
761 | /* Hide our private directories. */ | |
762 | if (cp->c_cnid == kHFSRootFolderID) { | |
763 | if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid != 0) { | |
764 | --nlink; | |
765 | } | |
766 | if (hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid != 0) { | |
767 | --nlink; | |
768 | } | |
769 | } | |
770 | VATTR_RETURN(vap, va_nlink, (u_int64_t)nlink); | |
771 | } | |
772 | if (VATTR_IS_ACTIVE(vap, va_nchildren)) { | |
773 | int entries; | |
774 | ||
775 | entries = cp->c_entries; | |
776 | /* Hide our private files and directories. */ | |
777 | if (cp->c_cnid == kHFSRootFolderID) { | |
778 | if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid != 0) | |
779 | --entries; | |
780 | if (hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid != 0) | |
781 | --entries; | |
782 | if (hfsmp->jnl || ((hfsmp->vcbAtrb & kHFSVolumeJournaledMask) && (hfsmp->hfs_flags & HFS_READ_ONLY))) | |
783 | entries -= 2; /* hide the journal files */ | |
784 | } | |
785 | VATTR_RETURN(vap, va_nchildren, entries); | |
786 | } | |
787 | /* | |
788 | * The va_dirlinkcount is the count of real directory hard links. | |
789 | * (i.e. its not the sum of the implied "." and ".." references) | |
790 | */ | |
791 | if (VATTR_IS_ACTIVE(vap, va_dirlinkcount)) { | |
792 | VATTR_RETURN(vap, va_dirlinkcount, (uint32_t)cp->c_linkcount); | |
793 | } | |
794 | } else /* !VDIR */ { | |
795 | data_size = VCTOF(vp, cp)->ff_size; | |
796 | ||
797 | VATTR_RETURN(vap, va_nlink, (u_int64_t)cp->c_linkcount); | |
798 | if (VATTR_IS_ACTIVE(vap, va_data_alloc)) { | |
799 | u_int64_t blocks; | |
800 | ||
801 | #if HFS_COMPRESSION | |
802 | if (hide_size) { | |
803 | VATTR_RETURN(vap, va_data_alloc, 0); | |
804 | } else if (compressed) { | |
805 | /* for compressed files, we report all allocated blocks as belonging to the data fork */ | |
806 | blocks = cp->c_blocks; | |
807 | VATTR_RETURN(vap, va_data_alloc, blocks * (u_int64_t)hfsmp->blockSize); | |
808 | } | |
809 | else | |
810 | #endif | |
811 | { | |
812 | blocks = VCTOF(vp, cp)->ff_blocks; | |
813 | VATTR_RETURN(vap, va_data_alloc, blocks * (u_int64_t)hfsmp->blockSize); | |
814 | } | |
815 | } | |
816 | } | |
817 | ||
818 | /* conditional because 64-bit arithmetic can be expensive */ | |
819 | if (VATTR_IS_ACTIVE(vap, va_total_size)) { | |
820 | if (v_type == VDIR) { | |
821 | VATTR_RETURN(vap, va_total_size, (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE); | |
822 | } else { | |
823 | u_int64_t total_size = ~0ULL; | |
824 | struct cnode *rcp; | |
825 | #if HFS_COMPRESSION | |
826 | if (hide_size) { | |
827 | /* we're hiding the size of this file, so just return 0 */ | |
828 | total_size = 0; | |
829 | } else if (compressed) { | |
830 | if (uncompressed_size == -1) { | |
831 | /* | |
832 | * We failed to get the uncompressed size above, | |
833 | * so we'll fall back to the standard path below | |
834 | * since total_size is still -1 | |
835 | */ | |
836 | } else { | |
837 | /* use the uncompressed size we fetched above */ | |
838 | total_size = uncompressed_size; | |
839 | } | |
840 | } | |
841 | #endif | |
842 | if (total_size == ~0ULL) { | |
843 | if (cp->c_datafork) { | |
844 | total_size = cp->c_datafork->ff_size; | |
845 | } | |
846 | ||
847 | if (cp->c_blocks - VTOF(vp)->ff_blocks) { | |
848 | /* We deal with rsrc fork vnode iocount at the end of the function */ | |
849 | error = hfs_vgetrsrc(hfsmp, vp, &rvp, TRUE, FALSE); | |
850 | if (error) { | |
851 | /* | |
852 | * Note that we call hfs_vgetrsrc with error_on_unlinked | |
853 | * set to FALSE. This is because we may be invoked via | |
854 | * fstat() on an open-unlinked file descriptor and we must | |
855 | * continue to support access to the rsrc fork until it disappears. | |
856 | * The code at the end of this function will be | |
857 | * responsible for releasing the iocount generated by | |
858 | * hfs_vgetrsrc. This is because we can't drop the iocount | |
859 | * without unlocking the cnode first. | |
860 | */ | |
861 | goto out; | |
862 | } | |
863 | ||
864 | rcp = VTOC(rvp); | |
865 | if (rcp && rcp->c_rsrcfork) { | |
866 | total_size += rcp->c_rsrcfork->ff_size; | |
867 | } | |
868 | } | |
869 | } | |
870 | ||
871 | VATTR_RETURN(vap, va_total_size, total_size); | |
872 | } | |
873 | } | |
874 | if (VATTR_IS_ACTIVE(vap, va_total_alloc)) { | |
875 | if (v_type == VDIR) { | |
876 | VATTR_RETURN(vap, va_total_alloc, 0); | |
877 | } else { | |
878 | VATTR_RETURN(vap, va_total_alloc, (u_int64_t)cp->c_blocks * (u_int64_t)hfsmp->blockSize); | |
879 | } | |
880 | } | |
881 | ||
882 | /* | |
883 | * If the VFS wants extended security data, and we know that we | |
884 | * don't have any (because it never told us it was setting any) | |
885 | * then we can return the supported bit and no data. If we do | |
886 | * have extended security, we can just leave the bit alone and | |
887 | * the VFS will use the fallback path to fetch it. | |
888 | */ | |
889 | if (VATTR_IS_ACTIVE(vap, va_acl)) { | |
890 | if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) { | |
891 | vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE; | |
892 | VATTR_SET_SUPPORTED(vap, va_acl); | |
893 | } | |
894 | } | |
895 | if (VATTR_IS_ACTIVE(vap, va_access_time)) { | |
896 | /* Access times are lazily updated, get current time if needed */ | |
897 | if (cp->c_touch_acctime) { | |
898 | struct timeval tv; | |
899 | ||
900 | microtime(&tv); | |
901 | vap->va_access_time.tv_sec = tv.tv_sec; | |
902 | } else { | |
903 | vap->va_access_time.tv_sec = cp->c_atime; | |
904 | } | |
905 | vap->va_access_time.tv_nsec = 0; | |
906 | VATTR_SET_SUPPORTED(vap, va_access_time); | |
907 | } | |
908 | vap->va_create_time.tv_sec = cp->c_itime; | |
909 | vap->va_create_time.tv_nsec = 0; | |
910 | vap->va_modify_time.tv_sec = cp->c_mtime; | |
911 | vap->va_modify_time.tv_nsec = 0; | |
912 | vap->va_change_time.tv_sec = cp->c_ctime; | |
913 | vap->va_change_time.tv_nsec = 0; | |
914 | vap->va_backup_time.tv_sec = cp->c_btime; | |
915 | vap->va_backup_time.tv_nsec = 0; | |
916 | ||
917 | /* See if we need to emit the date added field to the user */ | |
918 | if (VATTR_IS_ACTIVE(vap, va_addedtime)) { | |
919 | u_int32_t dateadded = hfs_get_dateadded (cp); | |
920 | if (dateadded) { | |
921 | vap->va_addedtime.tv_sec = dateadded; | |
922 | vap->va_addedtime.tv_nsec = 0; | |
923 | VATTR_SET_SUPPORTED (vap, va_addedtime); | |
924 | } | |
925 | } | |
926 | ||
927 | ||
928 | /* XXX is this really a good 'optimal I/O size'? */ | |
929 | vap->va_iosize = hfsmp->hfs_logBlockSize; | |
930 | vap->va_uid = cp->c_uid; | |
931 | vap->va_gid = cp->c_gid; | |
932 | vap->va_mode = cp->c_mode; | |
933 | vap->va_flags = cp->c_flags; | |
934 | ||
935 | /* | |
936 | * Exporting file IDs from HFS Plus: | |
937 | * | |
938 | * For "normal" files the c_fileid is the same value as the | |
939 | * c_cnid. But for hard link files, they are different - the | |
940 | * c_cnid belongs to the active directory entry (ie the link) | |
941 | * and the c_fileid is for the actual inode (ie the data file). | |
942 | * | |
943 | * The stat call (getattr) uses va_fileid and the Carbon APIs, | |
944 | * which are hardlink-ignorant, will ask for va_linkid. | |
945 | */ | |
946 | vap->va_fileid = (u_int64_t)cp->c_fileid; | |
947 | /* | |
948 | * We need to use the origin cache for both hardlinked files | |
949 | * and directories. Hardlinked directories have multiple cnids | |
950 | * and parents (one per link). Hardlinked files also have their | |
951 | * own parents and link IDs separate from the indirect inode number. | |
952 | * If we don't use the cache, we could end up vending the wrong ID | |
953 | * because the cnode will only reflect the link that was looked up most recently. | |
954 | */ | |
955 | if (cp->c_flag & C_HARDLINK) { | |
956 | vap->va_linkid = (u_int64_t)hfs_currentcnid(cp); | |
957 | vap->va_parentid = (u_int64_t)hfs_currentparent(cp); | |
958 | } else { | |
959 | vap->va_linkid = (u_int64_t)cp->c_cnid; | |
960 | vap->va_parentid = (u_int64_t)cp->c_parentcnid; | |
961 | } | |
962 | vap->va_fsid = hfsmp->hfs_raw_dev; | |
963 | vap->va_filerev = 0; | |
964 | vap->va_encoding = cp->c_encoding; | |
965 | vap->va_rdev = (v_type == VBLK || v_type == VCHR) ? cp->c_rdev : 0; | |
966 | #if HFS_COMPRESSION | |
967 | if (VATTR_IS_ACTIVE(vap, va_data_size)) { | |
968 | if (hide_size) | |
969 | vap->va_data_size = 0; | |
970 | else if (compressed) { | |
971 | if (uncompressed_size == -1) { | |
972 | /* failed to get the uncompressed size above, so just return data_size */ | |
973 | vap->va_data_size = data_size; | |
974 | } else { | |
975 | /* use the uncompressed size we fetched above */ | |
976 | vap->va_data_size = uncompressed_size; | |
977 | } | |
978 | } else | |
979 | vap->va_data_size = data_size; | |
980 | // vap->va_supported |= VNODE_ATTR_va_data_size; | |
981 | VATTR_SET_SUPPORTED(vap, va_data_size); | |
982 | } | |
983 | #else | |
984 | vap->va_data_size = data_size; | |
985 | vap->va_supported |= VNODE_ATTR_va_data_size; | |
986 | #endif | |
987 | ||
988 | /* Mark them all at once instead of individual VATTR_SET_SUPPORTED calls. */ | |
989 | vap->va_supported |= VNODE_ATTR_va_create_time | VNODE_ATTR_va_modify_time | | |
990 | VNODE_ATTR_va_change_time| VNODE_ATTR_va_backup_time | | |
991 | VNODE_ATTR_va_iosize | VNODE_ATTR_va_uid | | |
992 | VNODE_ATTR_va_gid | VNODE_ATTR_va_mode | | |
993 | VNODE_ATTR_va_flags |VNODE_ATTR_va_fileid | | |
994 | VNODE_ATTR_va_linkid | VNODE_ATTR_va_parentid | | |
995 | VNODE_ATTR_va_fsid | VNODE_ATTR_va_filerev | | |
996 | VNODE_ATTR_va_encoding | VNODE_ATTR_va_rdev; | |
997 | ||
998 | /* If this is the root, let VFS to find out the mount name, which | |
999 | * may be different from the real name. Otherwise, we need to take care | |
1000 | * for hardlinked files, which need to be looked up, if necessary | |
1001 | */ | |
1002 | if (VATTR_IS_ACTIVE(vap, va_name) && (cp->c_cnid != kHFSRootFolderID)) { | |
1003 | struct cat_desc linkdesc; | |
1004 | int lockflags; | |
1005 | int uselinkdesc = 0; | |
1006 | cnid_t nextlinkid = 0; | |
1007 | cnid_t prevlinkid = 0; | |
1008 | ||
1009 | /* Get the name for ATTR_CMN_NAME. We need to take special care for hardlinks | |
1010 | * here because the info. for the link ID requested by getattrlist may be | |
1011 | * different than what's currently in the cnode. This is because the cnode | |
1012 | * will be filled in with the information for the most recent link ID that went | |
1013 | * through namei/lookup(). If there are competing lookups for hardlinks that point | |
1014 | * to the same inode, one (or more) getattrlists could be vended incorrect name information. | |
1015 | * Also, we need to beware of open-unlinked files which could have a namelen of 0. | |
1016 | */ | |
1017 | ||
1018 | if ((cp->c_flag & C_HARDLINK) && | |
1019 | ((cp->c_desc.cd_namelen == 0) || (vap->va_linkid != cp->c_cnid))) { | |
1020 | /* If we have no name and our link ID is the raw inode number, then we may | |
1021 | * have an open-unlinked file. Go to the next link in this case. | |
1022 | */ | |
1023 | if ((cp->c_desc.cd_namelen == 0) && (vap->va_linkid == cp->c_fileid)) { | |
1024 | if ((error = hfs_lookup_siblinglinks(hfsmp, vap->va_linkid, &prevlinkid, &nextlinkid))){ | |
1025 | goto out; | |
1026 | } | |
1027 | } | |
1028 | else { | |
1029 | /* just use link obtained from vap above */ | |
1030 | nextlinkid = vap->va_linkid; | |
1031 | } | |
1032 | ||
1033 | /* We need to probe the catalog for the descriptor corresponding to the link ID | |
1034 | * stored in nextlinkid. Note that we don't know if we have the exclusive lock | |
1035 | * for the cnode here, so we can't just update the descriptor. Instead, | |
1036 | * we should just store the descriptor's value locally and then use it to pass | |
1037 | * out the name value as needed below. | |
1038 | */ | |
1039 | if (nextlinkid){ | |
1040 | lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); | |
1041 | error = cat_findname(hfsmp, nextlinkid, &linkdesc); | |
1042 | hfs_systemfile_unlock(hfsmp, lockflags); | |
1043 | if (error == 0) { | |
1044 | uselinkdesc = 1; | |
1045 | } | |
1046 | } | |
1047 | } | |
1048 | ||
1049 | /* By this point, we've either patched up the name above and the c_desc | |
1050 | * points to the correct data, or it already did, in which case we just proceed | |
1051 | * by copying the name into the vap. Note that we will never set va_name to | |
1052 | * supported if nextlinkid is never initialized. This could happen in the degenerate | |
1053 | * case above involving the raw inode number, where it has no nextlinkid. In this case | |
1054 | * we will simply not mark the name bit as supported. | |
1055 | */ | |
1056 | if (uselinkdesc) { | |
1057 | strlcpy(vap->va_name, (const char*) linkdesc.cd_nameptr, MAXPATHLEN); | |
1058 | VATTR_SET_SUPPORTED(vap, va_name); | |
1059 | cat_releasedesc(&linkdesc); | |
1060 | } | |
1061 | else if (cp->c_desc.cd_namelen) { | |
1062 | strlcpy(vap->va_name, (const char*) cp->c_desc.cd_nameptr, MAXPATHLEN); | |
1063 | VATTR_SET_SUPPORTED(vap, va_name); | |
1064 | } | |
1065 | } | |
1066 | ||
1067 | out: | |
1068 | hfs_unlock(cp); | |
1069 | /* | |
1070 | * We need to vnode_put the rsrc fork vnode only *after* we've released | |
1071 | * the cnode lock, since vnode_put can trigger an inactive call, which | |
1072 | * will go back into HFS and try to acquire a cnode lock. | |
1073 | */ | |
1074 | if (rvp) { | |
1075 | vnode_put (rvp); | |
1076 | } | |
1077 | ||
1078 | return (error); | |
1079 | } | |
1080 | ||
1081 | int | |
1082 | hfs_vnop_setattr(ap) | |
1083 | struct vnop_setattr_args /* { | |
1084 | struct vnode *a_vp; | |
1085 | struct vnode_attr *a_vap; | |
1086 | vfs_context_t a_context; | |
1087 | } */ *ap; | |
1088 | { | |
1089 | struct vnode_attr *vap = ap->a_vap; | |
1090 | struct vnode *vp = ap->a_vp; | |
1091 | struct cnode *cp = NULL; | |
1092 | struct hfsmount *hfsmp; | |
1093 | kauth_cred_t cred = vfs_context_ucred(ap->a_context); | |
1094 | struct proc *p = vfs_context_proc(ap->a_context); | |
1095 | int error = 0; | |
1096 | uid_t nuid; | |
1097 | gid_t ngid; | |
1098 | time_t orig_ctime; | |
1099 | ||
1100 | orig_ctime = VTOC(vp)->c_ctime; | |
1101 | ||
1102 | #if HFS_COMPRESSION | |
1103 | int decmpfs_reset_state = 0; | |
1104 | /* | |
1105 | we call decmpfs_update_attributes even if the file is not compressed | |
1106 | because we want to update the incoming flags if the xattrs are invalid | |
1107 | */ | |
1108 | error = decmpfs_update_attributes(vp, vap); | |
1109 | if (error) | |
1110 | return error; | |
1111 | ||
1112 | // | |
1113 | // if this is not a size-changing setattr and it is not just | |
1114 | // an atime update, then check for a snapshot. | |
1115 | // | |
1116 | if (!VATTR_IS_ACTIVE(vap, va_data_size) && !(vap->va_active == VNODE_ATTR_va_access_time)) { | |
1117 | check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_METADATA_MOD, NULL); | |
1118 | } | |
1119 | #endif | |
1120 | ||
1121 | ||
1122 | #if CONFIG_PROTECT | |
1123 | if ((error = cp_handle_vnop(VTOC(vp), CP_WRITE_ACCESS)) != 0) { | |
1124 | return (error); | |
1125 | } | |
1126 | #endif /* CONFIG_PROTECT */ | |
1127 | ||
1128 | hfsmp = VTOHFS(vp); | |
1129 | ||
1130 | /* Don't allow modification of the journal file. */ | |
1131 | if (hfsmp->hfs_jnlfileid == VTOC(vp)->c_fileid) { | |
1132 | return (EPERM); | |
1133 | } | |
1134 | ||
1135 | /* | |
1136 | * File size change request. | |
1137 | * We are guaranteed that this is not a directory, and that | |
1138 | * the filesystem object is writeable. | |
1139 | * | |
1140 | * NOTE: HFS COMPRESSION depends on the data_size being set *before* the bsd flags are updated | |
1141 | */ | |
1142 | VATTR_SET_SUPPORTED(vap, va_data_size); | |
1143 | if (VATTR_IS_ACTIVE(vap, va_data_size) && !vnode_islnk(vp)) { | |
1144 | #if HFS_COMPRESSION | |
1145 | /* keep the compressed state locked until we're done truncating the file */ | |
1146 | decmpfs_cnode *dp = VTOCMP(vp); | |
1147 | if (!dp) { | |
1148 | /* | |
1149 | * call hfs_lazy_init_decmpfs_cnode() to make sure that the decmpfs_cnode | |
1150 | * is filled in; we need a decmpfs_cnode to lock out decmpfs state changes | |
1151 | * on this file while it's truncating | |
1152 | */ | |
1153 | dp = hfs_lazy_init_decmpfs_cnode(VTOC(vp)); | |
1154 | if (!dp) { | |
1155 | /* failed to allocate a decmpfs_cnode */ | |
1156 | return ENOMEM; /* what should this be? */ | |
1157 | } | |
1158 | } | |
1159 | ||
1160 | check_for_tracked_file(vp, orig_ctime, vap->va_data_size == 0 ? NAMESPACE_HANDLER_TRUNCATE_OP|NAMESPACE_HANDLER_DELETE_OP : NAMESPACE_HANDLER_TRUNCATE_OP, NULL); | |
1161 | ||
1162 | decmpfs_lock_compressed_data(dp, 1); | |
1163 | if (hfs_file_is_compressed(VTOC(vp), 1)) { | |
1164 | error = decmpfs_decompress_file(vp, dp, -1/*vap->va_data_size*/, 0, 1); | |
1165 | if (error != 0) { | |
1166 | decmpfs_unlock_compressed_data(dp, 1); | |
1167 | return error; | |
1168 | } | |
1169 | } | |
1170 | #endif | |
1171 | ||
1172 | /* Take truncate lock before taking cnode lock. */ | |
1173 | hfs_lock_truncate(VTOC(vp), HFS_EXCLUSIVE_LOCK); | |
1174 | ||
1175 | /* Perform the ubc_setsize before taking the cnode lock. */ | |
1176 | ubc_setsize(vp, vap->va_data_size); | |
1177 | ||
1178 | if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK))) { | |
1179 | hfs_unlock_truncate(VTOC(vp), 0); | |
1180 | #if HFS_COMPRESSION | |
1181 | decmpfs_unlock_compressed_data(dp, 1); | |
1182 | #endif | |
1183 | return (error); | |
1184 | } | |
1185 | cp = VTOC(vp); | |
1186 | ||
1187 | error = hfs_truncate(vp, vap->va_data_size, vap->va_vaflags & 0xffff, 1, 0, ap->a_context); | |
1188 | ||
1189 | hfs_unlock_truncate(cp, 0); | |
1190 | #if HFS_COMPRESSION | |
1191 | decmpfs_unlock_compressed_data(dp, 1); | |
1192 | #endif | |
1193 | if (error) | |
1194 | goto out; | |
1195 | } | |
1196 | if (cp == NULL) { | |
1197 | if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK))) | |
1198 | return (error); | |
1199 | cp = VTOC(vp); | |
1200 | } | |
1201 | ||
1202 | /* | |
1203 | * If it is just an access time update request by itself | |
1204 | * we know the request is from kernel level code, and we | |
1205 | * can delay it without being as worried about consistency. | |
1206 | * This change speeds up mmaps, in the rare case that they | |
1207 | * get caught behind a sync. | |
1208 | */ | |
1209 | ||
1210 | if (vap->va_active == VNODE_ATTR_va_access_time) { | |
1211 | cp->c_touch_acctime=TRUE; | |
1212 | goto out; | |
1213 | } | |
1214 | ||
1215 | ||
1216 | ||
1217 | /* | |
1218 | * Owner/group change request. | |
1219 | * We are guaranteed that the new owner/group is valid and legal. | |
1220 | */ | |
1221 | VATTR_SET_SUPPORTED(vap, va_uid); | |
1222 | VATTR_SET_SUPPORTED(vap, va_gid); | |
1223 | nuid = VATTR_IS_ACTIVE(vap, va_uid) ? vap->va_uid : (uid_t)VNOVAL; | |
1224 | ngid = VATTR_IS_ACTIVE(vap, va_gid) ? vap->va_gid : (gid_t)VNOVAL; | |
1225 | if (((nuid != (uid_t)VNOVAL) || (ngid != (gid_t)VNOVAL)) && | |
1226 | ((error = hfs_chown(vp, nuid, ngid, cred, p)) != 0)) | |
1227 | goto out; | |
1228 | ||
1229 | /* | |
1230 | * Mode change request. | |
1231 | * We are guaranteed that the mode value is valid and that in | |
1232 | * conjunction with the owner and group, this change is legal. | |
1233 | */ | |
1234 | VATTR_SET_SUPPORTED(vap, va_mode); | |
1235 | if (VATTR_IS_ACTIVE(vap, va_mode) && | |
1236 | ((error = hfs_chmod(vp, (int)vap->va_mode, cred, p)) != 0)) | |
1237 | goto out; | |
1238 | ||
1239 | /* | |
1240 | * File flags change. | |
1241 | * We are guaranteed that only flags allowed to change given the | |
1242 | * current securelevel are being changed. | |
1243 | */ | |
1244 | VATTR_SET_SUPPORTED(vap, va_flags); | |
1245 | if (VATTR_IS_ACTIVE(vap, va_flags)) { | |
1246 | u_int16_t *fdFlags; | |
1247 | ||
1248 | #if HFS_COMPRESSION | |
1249 | if ((cp->c_flags ^ vap->va_flags) & UF_COMPRESSED) { | |
1250 | /* | |
1251 | * the UF_COMPRESSED was toggled, so reset our cached compressed state | |
1252 | * but we don't want to actually do the update until we've released the cnode lock down below | |
1253 | * NOTE: turning the flag off doesn't actually decompress the file, so that we can | |
1254 | * turn off the flag and look at the "raw" file for debugging purposes | |
1255 | */ | |
1256 | decmpfs_reset_state = 1; | |
1257 | } | |
1258 | #endif | |
1259 | ||
1260 | cp->c_flags = vap->va_flags; | |
1261 | cp->c_touch_chgtime = TRUE; | |
1262 | ||
1263 | /* | |
1264 | * Mirror the UF_HIDDEN flag to the invisible bit of the Finder Info. | |
1265 | * | |
1266 | * The fdFlags for files and frFlags for folders are both 8 bytes | |
1267 | * into the userInfo (the first 16 bytes of the Finder Info). They | |
1268 | * are both 16-bit fields. | |
1269 | */ | |
1270 | fdFlags = (u_int16_t *) &cp->c_finderinfo[8]; | |
1271 | if (vap->va_flags & UF_HIDDEN) | |
1272 | *fdFlags |= OSSwapHostToBigConstInt16(kFinderInvisibleMask); | |
1273 | else | |
1274 | *fdFlags &= ~OSSwapHostToBigConstInt16(kFinderInvisibleMask); | |
1275 | } | |
1276 | ||
1277 | /* | |
1278 | * Timestamp updates. | |
1279 | */ | |
1280 | VATTR_SET_SUPPORTED(vap, va_create_time); | |
1281 | VATTR_SET_SUPPORTED(vap, va_access_time); | |
1282 | VATTR_SET_SUPPORTED(vap, va_modify_time); | |
1283 | VATTR_SET_SUPPORTED(vap, va_backup_time); | |
1284 | VATTR_SET_SUPPORTED(vap, va_change_time); | |
1285 | if (VATTR_IS_ACTIVE(vap, va_create_time) || | |
1286 | VATTR_IS_ACTIVE(vap, va_access_time) || | |
1287 | VATTR_IS_ACTIVE(vap, va_modify_time) || | |
1288 | VATTR_IS_ACTIVE(vap, va_backup_time)) { | |
1289 | if (VATTR_IS_ACTIVE(vap, va_create_time)) | |
1290 | cp->c_itime = vap->va_create_time.tv_sec; | |
1291 | if (VATTR_IS_ACTIVE(vap, va_access_time)) { | |
1292 | cp->c_atime = vap->va_access_time.tv_sec; | |
1293 | cp->c_touch_acctime = FALSE; | |
1294 | } | |
1295 | if (VATTR_IS_ACTIVE(vap, va_modify_time)) { | |
1296 | cp->c_mtime = vap->va_modify_time.tv_sec; | |
1297 | cp->c_touch_modtime = FALSE; | |
1298 | cp->c_touch_chgtime = TRUE; | |
1299 | ||
1300 | /* | |
1301 | * The utimes system call can reset the modification | |
1302 | * time but it doesn't know about HFS create times. | |
1303 | * So we need to ensure that the creation time is | |
1304 | * always at least as old as the modification time. | |
1305 | */ | |
1306 | if ((VTOVCB(vp)->vcbSigWord == kHFSPlusSigWord) && | |
1307 | (cp->c_cnid != kHFSRootFolderID) && | |
1308 | (cp->c_mtime < cp->c_itime)) { | |
1309 | cp->c_itime = cp->c_mtime; | |
1310 | } | |
1311 | } | |
1312 | if (VATTR_IS_ACTIVE(vap, va_backup_time)) | |
1313 | cp->c_btime = vap->va_backup_time.tv_sec; | |
1314 | cp->c_flag |= C_MODIFIED; | |
1315 | } | |
1316 | ||
1317 | /* | |
1318 | * Set name encoding. | |
1319 | */ | |
1320 | VATTR_SET_SUPPORTED(vap, va_encoding); | |
1321 | if (VATTR_IS_ACTIVE(vap, va_encoding)) { | |
1322 | cp->c_encoding = vap->va_encoding; | |
1323 | hfs_setencodingbits(hfsmp, cp->c_encoding); | |
1324 | } | |
1325 | ||
1326 | if ((error = hfs_update(vp, TRUE)) != 0) | |
1327 | goto out; | |
1328 | out: | |
1329 | if (cp) { | |
1330 | /* Purge origin cache for cnode, since caller now has correct link ID for it | |
1331 | * We purge it here since it was acquired for us during lookup, and we no longer need it. | |
1332 | */ | |
1333 | if ((cp->c_flag & C_HARDLINK) && (vp->v_type != VDIR)){ | |
1334 | hfs_relorigin(cp, 0); | |
1335 | } | |
1336 | ||
1337 | hfs_unlock(cp); | |
1338 | #if HFS_COMPRESSION | |
1339 | if (decmpfs_reset_state) { | |
1340 | /* | |
1341 | * we've changed the UF_COMPRESSED flag, so reset the decmpfs state for this cnode | |
1342 | * but don't do it while holding the hfs cnode lock | |
1343 | */ | |
1344 | decmpfs_cnode *dp = VTOCMP(vp); | |
1345 | if (!dp) { | |
1346 | /* | |
1347 | * call hfs_lazy_init_decmpfs_cnode() to make sure that the decmpfs_cnode | |
1348 | * is filled in; we need a decmpfs_cnode to prevent decmpfs state changes | |
1349 | * on this file if it's locked | |
1350 | */ | |
1351 | dp = hfs_lazy_init_decmpfs_cnode(VTOC(vp)); | |
1352 | if (!dp) { | |
1353 | /* failed to allocate a decmpfs_cnode */ | |
1354 | return ENOMEM; /* what should this be? */ | |
1355 | } | |
1356 | } | |
1357 | decmpfs_cnode_set_vnode_state(dp, FILE_TYPE_UNKNOWN, 0); | |
1358 | } | |
1359 | #endif | |
1360 | } | |
1361 | return (error); | |
1362 | } | |
1363 | ||
1364 | ||
1365 | /* | |
1366 | * Change the mode on a file. | |
1367 | * cnode must be locked before calling. | |
1368 | */ | |
1369 | int | |
1370 | hfs_chmod(struct vnode *vp, int mode, __unused kauth_cred_t cred, __unused struct proc *p) | |
1371 | { | |
1372 | register struct cnode *cp = VTOC(vp); | |
1373 | ||
1374 | if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord) | |
1375 | return (0); | |
1376 | ||
1377 | // XXXdbg - don't allow modification of the journal or journal_info_block | |
1378 | if (VTOHFS(vp)->jnl && cp && cp->c_datafork) { | |
1379 | struct HFSPlusExtentDescriptor *extd; | |
1380 | ||
1381 | extd = &cp->c_datafork->ff_extents[0]; | |
1382 | if (extd->startBlock == VTOVCB(vp)->vcbJinfoBlock || extd->startBlock == VTOHFS(vp)->jnl_start) { | |
1383 | return EPERM; | |
1384 | } | |
1385 | } | |
1386 | ||
1387 | #if OVERRIDE_UNKNOWN_PERMISSIONS | |
1388 | if (((unsigned int)vfs_flags(VTOVFS(vp))) & MNT_UNKNOWNPERMISSIONS) { | |
1389 | return (0); | |
1390 | }; | |
1391 | #endif | |
1392 | cp->c_mode &= ~ALLPERMS; | |
1393 | cp->c_mode |= (mode & ALLPERMS); | |
1394 | cp->c_touch_chgtime = TRUE; | |
1395 | return (0); | |
1396 | } | |
1397 | ||
1398 | ||
1399 | int | |
1400 | hfs_write_access(struct vnode *vp, kauth_cred_t cred, struct proc *p, Boolean considerFlags) | |
1401 | { | |
1402 | struct cnode *cp = VTOC(vp); | |
1403 | int retval = 0; | |
1404 | int is_member; | |
1405 | ||
1406 | /* | |
1407 | * Disallow write attempts on read-only file systems; | |
1408 | * unless the file is a socket, fifo, or a block or | |
1409 | * character device resident on the file system. | |
1410 | */ | |
1411 | switch (vnode_vtype(vp)) { | |
1412 | case VDIR: | |
1413 | case VLNK: | |
1414 | case VREG: | |
1415 | if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) | |
1416 | return (EROFS); | |
1417 | break; | |
1418 | default: | |
1419 | break; | |
1420 | } | |
1421 | ||
1422 | /* If immutable bit set, nobody gets to write it. */ | |
1423 | if (considerFlags && (cp->c_flags & IMMUTABLE)) | |
1424 | return (EPERM); | |
1425 | ||
1426 | /* Otherwise, user id 0 always gets access. */ | |
1427 | if (!suser(cred, NULL)) | |
1428 | return (0); | |
1429 | ||
1430 | /* Otherwise, check the owner. */ | |
1431 | if ((retval = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, false)) == 0) | |
1432 | return ((cp->c_mode & S_IWUSR) == S_IWUSR ? 0 : EACCES); | |
1433 | ||
1434 | /* Otherwise, check the groups. */ | |
1435 | if (kauth_cred_ismember_gid(cred, cp->c_gid, &is_member) == 0 && is_member) { | |
1436 | return ((cp->c_mode & S_IWGRP) == S_IWGRP ? 0 : EACCES); | |
1437 | } | |
1438 | ||
1439 | /* Otherwise, check everyone else. */ | |
1440 | return ((cp->c_mode & S_IWOTH) == S_IWOTH ? 0 : EACCES); | |
1441 | } | |
1442 | ||
1443 | ||
1444 | /* | |
1445 | * Perform chown operation on cnode cp; | |
1446 | * code must be locked prior to call. | |
1447 | */ | |
1448 | int | |
1449 | #if !QUOTA | |
1450 | hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, __unused kauth_cred_t cred, | |
1451 | __unused struct proc *p) | |
1452 | #else | |
1453 | hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, kauth_cred_t cred, | |
1454 | __unused struct proc *p) | |
1455 | #endif | |
1456 | { | |
1457 | register struct cnode *cp = VTOC(vp); | |
1458 | uid_t ouid; | |
1459 | gid_t ogid; | |
1460 | #if QUOTA | |
1461 | int error = 0; | |
1462 | register int i; | |
1463 | int64_t change; | |
1464 | #endif /* QUOTA */ | |
1465 | ||
1466 | if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord) | |
1467 | return (ENOTSUP); | |
1468 | ||
1469 | if (((unsigned int)vfs_flags(VTOVFS(vp))) & MNT_UNKNOWNPERMISSIONS) | |
1470 | return (0); | |
1471 | ||
1472 | if (uid == (uid_t)VNOVAL) | |
1473 | uid = cp->c_uid; | |
1474 | if (gid == (gid_t)VNOVAL) | |
1475 | gid = cp->c_gid; | |
1476 | ||
1477 | #if 0 /* we are guaranteed that this is already the case */ | |
1478 | /* | |
1479 | * If we don't own the file, are trying to change the owner | |
1480 | * of the file, or are not a member of the target group, | |
1481 | * the caller must be superuser or the call fails. | |
1482 | */ | |
1483 | if ((kauth_cred_getuid(cred) != cp->c_uid || uid != cp->c_uid || | |
1484 | (gid != cp->c_gid && | |
1485 | (kauth_cred_ismember_gid(cred, gid, &is_member) || !is_member))) && | |
1486 | (error = suser(cred, 0))) | |
1487 | return (error); | |
1488 | #endif | |
1489 | ||
1490 | ogid = cp->c_gid; | |
1491 | ouid = cp->c_uid; | |
1492 | #if QUOTA | |
1493 | if ((error = hfs_getinoquota(cp))) | |
1494 | return (error); | |
1495 | if (ouid == uid) { | |
1496 | dqrele(cp->c_dquot[USRQUOTA]); | |
1497 | cp->c_dquot[USRQUOTA] = NODQUOT; | |
1498 | } | |
1499 | if (ogid == gid) { | |
1500 | dqrele(cp->c_dquot[GRPQUOTA]); | |
1501 | cp->c_dquot[GRPQUOTA] = NODQUOT; | |
1502 | } | |
1503 | ||
1504 | /* | |
1505 | * Eventually need to account for (fake) a block per directory | |
1506 | * if (vnode_isdir(vp)) | |
1507 | * change = VTOHFS(vp)->blockSize; | |
1508 | * else | |
1509 | */ | |
1510 | ||
1511 | change = (int64_t)(cp->c_blocks) * (int64_t)VTOVCB(vp)->blockSize; | |
1512 | (void) hfs_chkdq(cp, -change, cred, CHOWN); | |
1513 | (void) hfs_chkiq(cp, -1, cred, CHOWN); | |
1514 | for (i = 0; i < MAXQUOTAS; i++) { | |
1515 | dqrele(cp->c_dquot[i]); | |
1516 | cp->c_dquot[i] = NODQUOT; | |
1517 | } | |
1518 | #endif /* QUOTA */ | |
1519 | cp->c_gid = gid; | |
1520 | cp->c_uid = uid; | |
1521 | #if QUOTA | |
1522 | if ((error = hfs_getinoquota(cp)) == 0) { | |
1523 | if (ouid == uid) { | |
1524 | dqrele(cp->c_dquot[USRQUOTA]); | |
1525 | cp->c_dquot[USRQUOTA] = NODQUOT; | |
1526 | } | |
1527 | if (ogid == gid) { | |
1528 | dqrele(cp->c_dquot[GRPQUOTA]); | |
1529 | cp->c_dquot[GRPQUOTA] = NODQUOT; | |
1530 | } | |
1531 | if ((error = hfs_chkdq(cp, change, cred, CHOWN)) == 0) { | |
1532 | if ((error = hfs_chkiq(cp, 1, cred, CHOWN)) == 0) | |
1533 | goto good; | |
1534 | else | |
1535 | (void) hfs_chkdq(cp, -change, cred, CHOWN|FORCE); | |
1536 | } | |
1537 | for (i = 0; i < MAXQUOTAS; i++) { | |
1538 | dqrele(cp->c_dquot[i]); | |
1539 | cp->c_dquot[i] = NODQUOT; | |
1540 | } | |
1541 | } | |
1542 | cp->c_gid = ogid; | |
1543 | cp->c_uid = ouid; | |
1544 | if (hfs_getinoquota(cp) == 0) { | |
1545 | if (ouid == uid) { | |
1546 | dqrele(cp->c_dquot[USRQUOTA]); | |
1547 | cp->c_dquot[USRQUOTA] = NODQUOT; | |
1548 | } | |
1549 | if (ogid == gid) { | |
1550 | dqrele(cp->c_dquot[GRPQUOTA]); | |
1551 | cp->c_dquot[GRPQUOTA] = NODQUOT; | |
1552 | } | |
1553 | (void) hfs_chkdq(cp, change, cred, FORCE|CHOWN); | |
1554 | (void) hfs_chkiq(cp, 1, cred, FORCE|CHOWN); | |
1555 | (void) hfs_getinoquota(cp); | |
1556 | } | |
1557 | return (error); | |
1558 | good: | |
1559 | if (hfs_getinoquota(cp)) | |
1560 | panic("hfs_chown: lost quota"); | |
1561 | #endif /* QUOTA */ | |
1562 | ||
1563 | ||
1564 | /* | |
1565 | According to the SUSv3 Standard, chown() shall mark | |
1566 | for update the st_ctime field of the file. | |
1567 | (No exceptions mentioned) | |
1568 | */ | |
1569 | cp->c_touch_chgtime = TRUE; | |
1570 | return (0); | |
1571 | } | |
1572 | ||
1573 | ||
1574 | /* | |
1575 | * The hfs_exchange routine swaps the fork data in two files by | |
1576 | * exchanging some of the information in the cnode. It is used | |
1577 | * to preserve the file ID when updating an existing file, in | |
1578 | * case the file is being tracked through its file ID. Typically | |
1579 | * its used after creating a new file during a safe-save. | |
1580 | */ | |
1581 | int | |
1582 | hfs_vnop_exchange(ap) | |
1583 | struct vnop_exchange_args /* { | |
1584 | struct vnode *a_fvp; | |
1585 | struct vnode *a_tvp; | |
1586 | int a_options; | |
1587 | vfs_context_t a_context; | |
1588 | } */ *ap; | |
1589 | { | |
1590 | struct vnode *from_vp = ap->a_fvp; | |
1591 | struct vnode *to_vp = ap->a_tvp; | |
1592 | struct cnode *from_cp; | |
1593 | struct cnode *to_cp; | |
1594 | struct hfsmount *hfsmp; | |
1595 | struct cat_desc tempdesc; | |
1596 | struct cat_attr tempattr; | |
1597 | const unsigned char *from_nameptr; | |
1598 | const unsigned char *to_nameptr; | |
1599 | char from_iname[32]; | |
1600 | char to_iname[32]; | |
1601 | u_int32_t tempflag; | |
1602 | cnid_t from_parid; | |
1603 | cnid_t to_parid; | |
1604 | int lockflags; | |
1605 | int error = 0, started_tr = 0, got_cookie = 0; | |
1606 | cat_cookie_t cookie; | |
1607 | time_t orig_from_ctime, orig_to_ctime; | |
1608 | ||
1609 | /* The files must be on the same volume. */ | |
1610 | if (vnode_mount(from_vp) != vnode_mount(to_vp)) | |
1611 | return (EXDEV); | |
1612 | ||
1613 | if (from_vp == to_vp) | |
1614 | return (EINVAL); | |
1615 | ||
1616 | orig_from_ctime = VTOC(from_vp)->c_ctime; | |
1617 | orig_to_ctime = VTOC(to_vp)->c_ctime; | |
1618 | ||
1619 | #if HFS_COMPRESSION | |
1620 | if ( hfs_file_is_compressed(VTOC(from_vp), 0) ) { | |
1621 | if ( 0 != ( error = decmpfs_decompress_file(from_vp, VTOCMP(from_vp), -1, 0, 1) ) ) { | |
1622 | return error; | |
1623 | } | |
1624 | } | |
1625 | ||
1626 | if ( hfs_file_is_compressed(VTOC(to_vp), 0) ) { | |
1627 | if ( 0 != ( error = decmpfs_decompress_file(to_vp, VTOCMP(to_vp), -1, 0, 1) ) ) { | |
1628 | return error; | |
1629 | } | |
1630 | } | |
1631 | #endif // HFS_COMPRESSION | |
1632 | ||
1633 | /* | |
1634 | * Normally, we want to notify the user handlers about the event, | |
1635 | * except if it's a handler driving the event. | |
1636 | */ | |
1637 | if ((ap->a_options & FSOPT_EXCHANGE_DATA_ONLY) == 0) { | |
1638 | check_for_tracked_file(from_vp, orig_from_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL); | |
1639 | check_for_tracked_file(to_vp, orig_to_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL); | |
1640 | } | |
1641 | else { | |
1642 | /* | |
1643 | * We're doing a data-swap. | |
1644 | * Take the truncate lock/cnode lock, then verify there are no mmap references. | |
1645 | * Issue a hfs_filedone to flush out all of the remaining state for this file. | |
1646 | * Allow the rest of the codeflow to re-acquire the cnode locks in order. | |
1647 | */ | |
1648 | ||
1649 | hfs_lock_truncate (VTOC(from_vp), HFS_SHARED_LOCK); | |
1650 | ||
1651 | if ((error = hfs_lock(VTOC(from_vp), HFS_EXCLUSIVE_LOCK))) { | |
1652 | hfs_unlock_truncate (VTOC(from_vp), 0); | |
1653 | return error; | |
1654 | } | |
1655 | ||
1656 | /* Verify the source file is not in use by anyone besides us (including mmap refs) */ | |
1657 | if (vnode_isinuse(from_vp, 1)) { | |
1658 | error = EBUSY; | |
1659 | hfs_unlock(VTOC(from_vp)); | |
1660 | hfs_unlock_truncate (VTOC(from_vp), 0); | |
1661 | return error; | |
1662 | } | |
1663 | ||
1664 | /* Flush out the data in the source file */ | |
1665 | VTOC(from_vp)->c_flag |= C_SWAPINPROGRESS; | |
1666 | error = hfs_filedone (from_vp, ap->a_context); | |
1667 | VTOC(from_vp)->c_flag &= ~C_SWAPINPROGRESS; | |
1668 | hfs_unlock(VTOC(from_vp)); | |
1669 | hfs_unlock_truncate(VTOC(from_vp), 0); | |
1670 | ||
1671 | if (error) { | |
1672 | return error; | |
1673 | } | |
1674 | } | |
1675 | ||
1676 | ||
1677 | if ((error = hfs_lockpair(VTOC(from_vp), VTOC(to_vp), HFS_EXCLUSIVE_LOCK))) | |
1678 | return (error); | |
1679 | ||
1680 | from_cp = VTOC(from_vp); | |
1681 | to_cp = VTOC(to_vp); | |
1682 | hfsmp = VTOHFS(from_vp); | |
1683 | ||
1684 | /* Only normal files can be exchanged. */ | |
1685 | if (!vnode_isreg(from_vp) || !vnode_isreg(to_vp) || | |
1686 | VNODE_IS_RSRC(from_vp) || VNODE_IS_RSRC(to_vp)) { | |
1687 | error = EINVAL; | |
1688 | goto exit; | |
1689 | } | |
1690 | ||
1691 | // XXXdbg - don't allow modification of the journal or journal_info_block | |
1692 | if (hfsmp->jnl) { | |
1693 | struct HFSPlusExtentDescriptor *extd; | |
1694 | ||
1695 | if (from_cp->c_datafork) { | |
1696 | extd = &from_cp->c_datafork->ff_extents[0]; | |
1697 | if (extd->startBlock == VTOVCB(from_vp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) { | |
1698 | error = EPERM; | |
1699 | goto exit; | |
1700 | } | |
1701 | } | |
1702 | ||
1703 | if (to_cp->c_datafork) { | |
1704 | extd = &to_cp->c_datafork->ff_extents[0]; | |
1705 | if (extd->startBlock == VTOVCB(to_vp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) { | |
1706 | error = EPERM; | |
1707 | goto exit; | |
1708 | } | |
1709 | } | |
1710 | } | |
1711 | ||
1712 | /* | |
1713 | * Ok, now that all of the pre-flighting is done, call the underlying | |
1714 | * function if needed. | |
1715 | */ | |
1716 | if (ap->a_options & FSOPT_EXCHANGE_DATA_ONLY) { | |
1717 | error = hfs_movedata(from_vp, to_vp); | |
1718 | goto exit; | |
1719 | } | |
1720 | ||
1721 | ||
1722 | if ((error = hfs_start_transaction(hfsmp)) != 0) { | |
1723 | goto exit; | |
1724 | } | |
1725 | started_tr = 1; | |
1726 | ||
1727 | /* | |
1728 | * Reserve some space in the Catalog file. | |
1729 | */ | |
1730 | if ((error = cat_preflight(hfsmp, CAT_EXCHANGE, &cookie, vfs_context_proc(ap->a_context)))) { | |
1731 | goto exit; | |
1732 | } | |
1733 | got_cookie = 1; | |
1734 | ||
1735 | /* The backend code always tries to delete the virtual | |
1736 | * extent id for exchanging files so we need to lock | |
1737 | * the extents b-tree. | |
1738 | */ | |
1739 | lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK); | |
1740 | ||
1741 | /* Account for the location of the catalog objects. */ | |
1742 | if (from_cp->c_flag & C_HARDLINK) { | |
1743 | MAKE_INODE_NAME(from_iname, sizeof(from_iname), | |
1744 | from_cp->c_attr.ca_linkref); | |
1745 | from_nameptr = (unsigned char *)from_iname; | |
1746 | from_parid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid; | |
1747 | from_cp->c_hint = 0; | |
1748 | } else { | |
1749 | from_nameptr = from_cp->c_desc.cd_nameptr; | |
1750 | from_parid = from_cp->c_parentcnid; | |
1751 | } | |
1752 | if (to_cp->c_flag & C_HARDLINK) { | |
1753 | MAKE_INODE_NAME(to_iname, sizeof(to_iname), | |
1754 | to_cp->c_attr.ca_linkref); | |
1755 | to_nameptr = (unsigned char *)to_iname; | |
1756 | to_parid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid; | |
1757 | to_cp->c_hint = 0; | |
1758 | } else { | |
1759 | to_nameptr = to_cp->c_desc.cd_nameptr; | |
1760 | to_parid = to_cp->c_parentcnid; | |
1761 | } | |
1762 | ||
1763 | /* Do the exchange */ | |
1764 | error = ExchangeFileIDs(hfsmp, from_nameptr, to_nameptr, from_parid, | |
1765 | to_parid, from_cp->c_hint, to_cp->c_hint); | |
1766 | hfs_systemfile_unlock(hfsmp, lockflags); | |
1767 | ||
1768 | /* | |
1769 | * Note that we don't need to exchange any extended attributes | |
1770 | * since the attributes are keyed by file ID. | |
1771 | */ | |
1772 | ||
1773 | if (error != E_NONE) { | |
1774 | error = MacToVFSError(error); | |
1775 | goto exit; | |
1776 | } | |
1777 | ||
1778 | /* Purge the vnodes from the name cache */ | |
1779 | if (from_vp) | |
1780 | cache_purge(from_vp); | |
1781 | if (to_vp) | |
1782 | cache_purge(to_vp); | |
1783 | ||
1784 | /* Save a copy of from attributes before swapping. */ | |
1785 | bcopy(&from_cp->c_desc, &tempdesc, sizeof(struct cat_desc)); | |
1786 | bcopy(&from_cp->c_attr, &tempattr, sizeof(struct cat_attr)); | |
1787 | tempflag = from_cp->c_flag & (C_HARDLINK | C_HASXATTRS); | |
1788 | ||
1789 | /* | |
1790 | * Swap the descriptors and all non-fork related attributes. | |
1791 | * (except the modify date) | |
1792 | */ | |
1793 | bcopy(&to_cp->c_desc, &from_cp->c_desc, sizeof(struct cat_desc)); | |
1794 | ||
1795 | from_cp->c_hint = 0; | |
1796 | from_cp->c_fileid = from_cp->c_cnid; | |
1797 | from_cp->c_itime = to_cp->c_itime; | |
1798 | from_cp->c_btime = to_cp->c_btime; | |
1799 | from_cp->c_atime = to_cp->c_atime; | |
1800 | from_cp->c_ctime = to_cp->c_ctime; | |
1801 | from_cp->c_gid = to_cp->c_gid; | |
1802 | from_cp->c_uid = to_cp->c_uid; | |
1803 | from_cp->c_flags = to_cp->c_flags; | |
1804 | from_cp->c_mode = to_cp->c_mode; | |
1805 | from_cp->c_linkcount = to_cp->c_linkcount; | |
1806 | from_cp->c_flag = to_cp->c_flag & (C_HARDLINK | C_HASXATTRS); | |
1807 | from_cp->c_attr.ca_recflags = to_cp->c_attr.ca_recflags; | |
1808 | bcopy(to_cp->c_finderinfo, from_cp->c_finderinfo, 32); | |
1809 | ||
1810 | bcopy(&tempdesc, &to_cp->c_desc, sizeof(struct cat_desc)); | |
1811 | to_cp->c_hint = 0; | |
1812 | to_cp->c_fileid = to_cp->c_cnid; | |
1813 | to_cp->c_itime = tempattr.ca_itime; | |
1814 | to_cp->c_btime = tempattr.ca_btime; | |
1815 | to_cp->c_atime = tempattr.ca_atime; | |
1816 | to_cp->c_ctime = tempattr.ca_ctime; | |
1817 | to_cp->c_gid = tempattr.ca_gid; | |
1818 | to_cp->c_uid = tempattr.ca_uid; | |
1819 | to_cp->c_flags = tempattr.ca_flags; | |
1820 | to_cp->c_mode = tempattr.ca_mode; | |
1821 | to_cp->c_linkcount = tempattr.ca_linkcount; | |
1822 | to_cp->c_flag = tempflag; | |
1823 | to_cp->c_attr.ca_recflags = tempattr.ca_recflags; | |
1824 | bcopy(tempattr.ca_finderinfo, to_cp->c_finderinfo, 32); | |
1825 | ||
1826 | /* Rehash the cnodes using their new file IDs */ | |
1827 | hfs_chash_rehash(hfsmp, from_cp, to_cp); | |
1828 | ||
1829 | /* | |
1830 | * When a file moves out of "Cleanup At Startup" | |
1831 | * we can drop its NODUMP status. | |
1832 | */ | |
1833 | if ((from_cp->c_flags & UF_NODUMP) && | |
1834 | (from_cp->c_parentcnid != to_cp->c_parentcnid)) { | |
1835 | from_cp->c_flags &= ~UF_NODUMP; | |
1836 | from_cp->c_touch_chgtime = TRUE; | |
1837 | } | |
1838 | if ((to_cp->c_flags & UF_NODUMP) && | |
1839 | (to_cp->c_parentcnid != from_cp->c_parentcnid)) { | |
1840 | to_cp->c_flags &= ~UF_NODUMP; | |
1841 | to_cp->c_touch_chgtime = TRUE; | |
1842 | } | |
1843 | ||
1844 | exit: | |
1845 | if (got_cookie) { | |
1846 | cat_postflight(hfsmp, &cookie, vfs_context_proc(ap->a_context)); | |
1847 | } | |
1848 | if (started_tr) { | |
1849 | hfs_end_transaction(hfsmp); | |
1850 | } | |
1851 | ||
1852 | hfs_unlockpair(from_cp, to_cp); | |
1853 | return (error); | |
1854 | } | |
1855 | ||
1856 | int | |
1857 | hfs_vnop_mmap(struct vnop_mmap_args *ap) | |
1858 | { | |
1859 | struct vnode *vp = ap->a_vp; | |
1860 | int error; | |
1861 | ||
1862 | if (VNODE_IS_RSRC(vp)) { | |
1863 | /* allow pageins of the resource fork */ | |
1864 | } else { | |
1865 | int compressed = hfs_file_is_compressed(VTOC(vp), 1); /* 1 == don't take the cnode lock */ | |
1866 | time_t orig_ctime = VTOC(vp)->c_ctime; | |
1867 | ||
1868 | if (!compressed && (VTOC(vp)->c_flags & UF_COMPRESSED)) { | |
1869 | error = check_for_dataless_file(vp, NAMESPACE_HANDLER_READ_OP); | |
1870 | if (error != 0) { | |
1871 | return error; | |
1872 | } | |
1873 | } | |
1874 | ||
1875 | if (ap->a_fflags & PROT_WRITE) { | |
1876 | check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL); | |
1877 | } | |
1878 | } | |
1879 | ||
1880 | // | |
1881 | // NOTE: we return ENOTSUP because we want the cluster layer | |
1882 | // to actually do all the real work. | |
1883 | // | |
1884 | return (ENOTSUP); | |
1885 | } | |
1886 | ||
1887 | /* | |
1888 | * hfs_movedata | |
1889 | * | |
1890 | * This is a non-symmetric variant of exchangedata. In this function, | |
1891 | * the contents of the fork in from_vp are moved to the fork | |
1892 | * specified by to_vp. | |
1893 | * | |
1894 | * The cnodes pointed to by 'from_vp' and 'to_vp' must be locked. | |
1895 | * | |
1896 | * The vnode pointed to by 'to_vp' *must* be empty prior to invoking this function. | |
1897 | * We impose this restriction because we may not be able to fully delete the entire | |
1898 | * file's contents in a single transaction, particularly if it has a lot of extents. | |
1899 | * In the normal file deletion codepath, the file is screened for two conditions: | |
1900 | * 1) bigger than 400MB, and 2) more than 8 extents. If so, the file is relocated to | |
1901 | * the hidden directory and the deletion is broken up into multiple truncates. We can't | |
1902 | * do that here because both files need to exist in the namespace. The main reason this | |
1903 | * is imposed is that we may have to touch a whole lot of bitmap blocks if there are | |
1904 | * many extents. | |
1905 | * | |
1906 | * Any data written to 'from_vp' after this call completes is not guaranteed | |
1907 | * to be moved. | |
1908 | * | |
1909 | * Arguments: | |
1910 | * vnode from_vp: source file | |
1911 | * vnode to_vp: destination file; must be empty | |
1912 | * | |
1913 | * Returns: | |
1914 | * EFBIG - Destination file was not empty | |
1915 | * 0 - success | |
1916 | * | |
1917 | * | |
1918 | */ | |
1919 | int hfs_movedata (struct vnode *from_vp, struct vnode *to_vp) { | |
1920 | ||
1921 | struct cnode *from_cp; | |
1922 | struct cnode *to_cp; | |
1923 | struct hfsmount *hfsmp = NULL; | |
1924 | int error = 0; | |
1925 | int started_tr = 0; | |
1926 | int lockflags = 0; | |
1927 | int overflow_blocks; | |
1928 | int rsrc = 0; | |
1929 | ||
1930 | ||
1931 | /* Get the HFS pointers */ | |
1932 | from_cp = VTOC(from_vp); | |
1933 | to_cp = VTOC(to_vp); | |
1934 | hfsmp = VTOHFS(from_vp); | |
1935 | ||
1936 | /* Verify that neither source/dest file is open-unlinked */ | |
1937 | if (from_cp->c_flag & (C_DELETED | C_NOEXISTS)) { | |
1938 | error = EBUSY; | |
1939 | goto movedata_exit; | |
1940 | } | |
1941 | ||
1942 | if (to_cp->c_flag & (C_DELETED | C_NOEXISTS)) { | |
1943 | error = EBUSY; | |
1944 | goto movedata_exit; | |
1945 | } | |
1946 | ||
1947 | /* | |
1948 | * Verify the source file is not in use by anyone besides us. | |
1949 | * | |
1950 | * This function is typically invoked by a namespace handler | |
1951 | * process responding to a temporarily stalled system call. | |
1952 | * The FD that it is working off of is opened O_EVTONLY, so | |
1953 | * it really has no active usecounts (the kusecount from O_EVTONLY | |
1954 | * is subtracted from the total usecounts). | |
1955 | * | |
1956 | * As a result, we shouldn't have any active usecounts against | |
1957 | * this vnode when we go to check it below. | |
1958 | */ | |
1959 | if (vnode_isinuse(from_vp, 0)) { | |
1960 | error = EBUSY; | |
1961 | goto movedata_exit; | |
1962 | } | |
1963 | ||
1964 | if (from_cp->c_rsrc_vp == from_vp) { | |
1965 | rsrc = 1; | |
1966 | } | |
1967 | ||
1968 | /* | |
1969 | * We assume that the destination file is already empty. | |
1970 | * Verify that it is. | |
1971 | */ | |
1972 | if (rsrc) { | |
1973 | if (to_cp->c_rsrcfork->ff_size > 0) { | |
1974 | error = EFBIG; | |
1975 | goto movedata_exit; | |
1976 | } | |
1977 | } | |
1978 | else { | |
1979 | if (to_cp->c_datafork->ff_size > 0) { | |
1980 | error = EFBIG; | |
1981 | goto movedata_exit; | |
1982 | } | |
1983 | } | |
1984 | ||
1985 | /* If the source has the rsrc open, make sure the destination is also the rsrc */ | |
1986 | if (rsrc) { | |
1987 | if (to_vp != to_cp->c_rsrc_vp) { | |
1988 | error = EINVAL; | |
1989 | goto movedata_exit; | |
1990 | } | |
1991 | } | |
1992 | else { | |
1993 | /* Verify that both forks are data forks */ | |
1994 | if (to_vp != to_cp->c_vp) { | |
1995 | error = EINVAL; | |
1996 | goto movedata_exit; | |
1997 | } | |
1998 | } | |
1999 | ||
2000 | /* | |
2001 | * See if the source file has overflow extents. If it doesn't, we don't | |
2002 | * need to call into MoveData, and the catalog will be enough. | |
2003 | */ | |
2004 | if (rsrc) { | |
2005 | overflow_blocks = overflow_extents(from_cp->c_rsrcfork); | |
2006 | } | |
2007 | else { | |
2008 | overflow_blocks = overflow_extents(from_cp->c_datafork); | |
2009 | } | |
2010 | ||
2011 | if ((error = hfs_start_transaction (hfsmp)) != 0) { | |
2012 | goto movedata_exit; | |
2013 | } | |
2014 | started_tr = 1; | |
2015 | ||
2016 | /* Lock the system files: catalog, extents, attributes */ | |
2017 | lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK); | |
2018 | ||
2019 | /* Copy over any catalog allocation data into the new spot. */ | |
2020 | if (rsrc) { | |
2021 | if ((error = hfs_move_fork (from_cp->c_rsrcfork, from_cp, to_cp->c_rsrcfork, to_cp))){ | |
2022 | hfs_systemfile_unlock(hfsmp, lockflags); | |
2023 | goto movedata_exit; | |
2024 | } | |
2025 | } | |
2026 | else { | |
2027 | if ((error = hfs_move_fork (from_cp->c_datafork, from_cp, to_cp->c_datafork, to_cp))) { | |
2028 | hfs_systemfile_unlock(hfsmp, lockflags); | |
2029 | goto movedata_exit; | |
2030 | } | |
2031 | } | |
2032 | ||
2033 | /* | |
2034 | * Note that because all we're doing is moving the extents around, we can | |
2035 | * probably do this in a single transaction: Each extent record (group of 8) | |
2036 | * is 64 bytes. A extent overflow B-Tree node is typically 4k. This means | |
2037 | * each node can hold roughly ~60 extent records == (480 extents). | |
2038 | * | |
2039 | * If a file was massively fragmented and had 20k extents, this means we'd | |
2040 | * roughly touch 20k/480 == 41 to 42 nodes, plus the index nodes, for half | |
2041 | * of the operation. (inserting or deleting). So if we're manipulating 80-100 | |
2042 | * nodes, this is basically 320k of data to write to the journal in | |
2043 | * a bad case. | |
2044 | */ | |
2045 | if (overflow_blocks != 0) { | |
2046 | if (rsrc) { | |
2047 | error = MoveData(hfsmp, from_cp->c_cnid, to_cp->c_cnid, 1); | |
2048 | } | |
2049 | else { | |
2050 | error = MoveData (hfsmp, from_cp->c_cnid, to_cp->c_cnid, 0); | |
2051 | } | |
2052 | } | |
2053 | ||
2054 | if (error) { | |
2055 | /* Reverse the operation. Copy the fork data back into the source */ | |
2056 | if (rsrc) { | |
2057 | hfs_move_fork (to_cp->c_rsrcfork, to_cp, from_cp->c_rsrcfork, from_cp); | |
2058 | } | |
2059 | else { | |
2060 | hfs_move_fork (to_cp->c_datafork, to_cp, from_cp->c_datafork, from_cp); | |
2061 | } | |
2062 | } | |
2063 | else { | |
2064 | struct cat_fork *src_data = NULL; | |
2065 | struct cat_fork *src_rsrc = NULL; | |
2066 | struct cat_fork *dst_data = NULL; | |
2067 | struct cat_fork *dst_rsrc = NULL; | |
2068 | ||
2069 | /* Touch the times*/ | |
2070 | to_cp->c_touch_acctime = TRUE; | |
2071 | to_cp->c_touch_chgtime = TRUE; | |
2072 | to_cp->c_touch_modtime = TRUE; | |
2073 | ||
2074 | from_cp->c_touch_acctime = TRUE; | |
2075 | from_cp->c_touch_chgtime = TRUE; | |
2076 | from_cp->c_touch_modtime = TRUE; | |
2077 | ||
2078 | hfs_touchtimes(hfsmp, to_cp); | |
2079 | hfs_touchtimes(hfsmp, from_cp); | |
2080 | ||
2081 | if (from_cp->c_datafork) { | |
2082 | src_data = &from_cp->c_datafork->ff_data; | |
2083 | } | |
2084 | if (from_cp->c_rsrcfork) { | |
2085 | src_rsrc = &from_cp->c_rsrcfork->ff_data; | |
2086 | } | |
2087 | ||
2088 | if (to_cp->c_datafork) { | |
2089 | dst_data = &to_cp->c_datafork->ff_data; | |
2090 | } | |
2091 | if (to_cp->c_rsrcfork) { | |
2092 | dst_rsrc = &to_cp->c_rsrcfork->ff_data; | |
2093 | } | |
2094 | ||
2095 | /* Update the catalog nodes */ | |
2096 | (void) cat_update(hfsmp, &from_cp->c_desc, &from_cp->c_attr, | |
2097 | src_data, src_rsrc); | |
2098 | ||
2099 | (void) cat_update(hfsmp, &to_cp->c_desc, &to_cp->c_attr, | |
2100 | dst_data, dst_rsrc); | |
2101 | ||
2102 | } | |
2103 | /* unlock the system files */ | |
2104 | hfs_systemfile_unlock(hfsmp, lockflags); | |
2105 | ||
2106 | ||
2107 | movedata_exit: | |
2108 | if (started_tr) { | |
2109 | hfs_end_transaction(hfsmp); | |
2110 | } | |
2111 | ||
2112 | return error; | |
2113 | ||
2114 | } | |
2115 | ||
2116 | /* | |
2117 | * Copy all of the catalog and runtime data in srcfork to dstfork. | |
2118 | * | |
2119 | * This allows us to maintain the invalid ranges across the movedata operation so | |
2120 | * we don't need to force all of the pending IO right now. In addition, we move all | |
2121 | * non overflow-extent extents into the destination here. | |
2122 | */ | |
2123 | static int hfs_move_fork (struct filefork *srcfork, struct cnode *src_cp, | |
2124 | struct filefork *dstfork, struct cnode *dst_cp) { | |
2125 | struct rl_entry *invalid_range; | |
2126 | int size = sizeof(struct HFSPlusExtentDescriptor); | |
2127 | size = size * kHFSPlusExtentDensity; | |
2128 | ||
2129 | /* If the dstfork has any invalid ranges, bail out */ | |
2130 | invalid_range = TAILQ_FIRST(&dstfork->ff_invalidranges); | |
2131 | if (invalid_range != NULL) { | |
2132 | return EFBIG; | |
2133 | } | |
2134 | ||
2135 | if (dstfork->ff_data.cf_size != 0 || dstfork->ff_data.cf_new_size != 0) { | |
2136 | return EFBIG; | |
2137 | } | |
2138 | ||
2139 | /* First copy the invalid ranges */ | |
2140 | while ((invalid_range = TAILQ_FIRST(&srcfork->ff_invalidranges))) { | |
2141 | off_t start = invalid_range->rl_start; | |
2142 | off_t end = invalid_range->rl_end; | |
2143 | ||
2144 | /* Remove it from the srcfork and add it to dstfork */ | |
2145 | rl_remove(start, end, &srcfork->ff_invalidranges); | |
2146 | rl_add(start, end, &dstfork->ff_invalidranges); | |
2147 | } | |
2148 | ||
2149 | /* | |
2150 | * Ignore the ff_union. We don't move symlinks or system files. | |
2151 | * Now copy the in-catalog extent information | |
2152 | */ | |
2153 | dstfork->ff_data.cf_size = srcfork->ff_data.cf_size; | |
2154 | dstfork->ff_data.cf_new_size = srcfork->ff_data.cf_new_size; | |
2155 | dstfork->ff_data.cf_vblocks = srcfork->ff_data.cf_vblocks; | |
2156 | dstfork->ff_data.cf_blocks = srcfork->ff_data.cf_blocks; | |
2157 | ||
2158 | /* just memcpy the whole array of extents to the new location. */ | |
2159 | memcpy (dstfork->ff_data.cf_extents, srcfork->ff_data.cf_extents, size); | |
2160 | ||
2161 | /* | |
2162 | * Copy the cnode attribute data. | |
2163 | * | |
2164 | */ | |
2165 | src_cp->c_blocks -= srcfork->ff_data.cf_vblocks; | |
2166 | src_cp->c_blocks -= srcfork->ff_data.cf_blocks; | |
2167 | ||
2168 | dst_cp->c_blocks += srcfork->ff_data.cf_vblocks; | |
2169 | dst_cp->c_blocks += srcfork->ff_data.cf_blocks; | |
2170 | ||
2171 | /* Now delete the entries in the source fork */ | |
2172 | srcfork->ff_data.cf_size = 0; | |
2173 | srcfork->ff_data.cf_new_size = 0; | |
2174 | srcfork->ff_data.cf_union.cfu_bytesread = 0; | |
2175 | srcfork->ff_data.cf_vblocks = 0; | |
2176 | srcfork->ff_data.cf_blocks = 0; | |
2177 | ||
2178 | /* Zero out the old extents */ | |
2179 | bzero (srcfork->ff_data.cf_extents, size); | |
2180 | return 0; | |
2181 | } | |
2182 | ||
2183 | ||
2184 | ||
2185 | /* | |
2186 | * cnode must be locked | |
2187 | */ | |
2188 | int | |
2189 | hfs_fsync(struct vnode *vp, int waitfor, int fullsync, struct proc *p) | |
2190 | { | |
2191 | struct cnode *cp = VTOC(vp); | |
2192 | struct filefork *fp = NULL; | |
2193 | int retval = 0; | |
2194 | struct hfsmount *hfsmp = VTOHFS(vp); | |
2195 | struct rl_entry *invalid_range; | |
2196 | struct timeval tv; | |
2197 | int waitdata; /* attributes necessary for data retrieval */ | |
2198 | int wait; /* all other attributes (e.g. atime, etc.) */ | |
2199 | int lockflag; | |
2200 | int took_trunc_lock = 0; | |
2201 | ||
2202 | /* | |
2203 | * Applications which only care about data integrity rather than full | |
2204 | * file integrity may opt out of (delay) expensive metadata update | |
2205 | * operations as a performance optimization. | |
2206 | */ | |
2207 | wait = (waitfor == MNT_WAIT); | |
2208 | waitdata = (waitfor == MNT_DWAIT) | wait; | |
2209 | if (always_do_fullfsync) | |
2210 | fullsync = 1; | |
2211 | ||
2212 | /* HFS directories don't have any data blocks. */ | |
2213 | if (vnode_isdir(vp)) | |
2214 | goto metasync; | |
2215 | fp = VTOF(vp); | |
2216 | ||
2217 | /* | |
2218 | * For system files flush the B-tree header and | |
2219 | * for regular files write out any clusters | |
2220 | */ | |
2221 | if (vnode_issystem(vp)) { | |
2222 | if (VTOF(vp)->fcbBTCBPtr != NULL) { | |
2223 | // XXXdbg | |
2224 | if (hfsmp->jnl == NULL) { | |
2225 | BTFlushPath(VTOF(vp)); | |
2226 | } | |
2227 | } | |
2228 | } else if (UBCINFOEXISTS(vp)) { | |
2229 | hfs_unlock(cp); | |
2230 | hfs_lock_truncate(cp, HFS_SHARED_LOCK); | |
2231 | took_trunc_lock = 1; | |
2232 | ||
2233 | if (fp->ff_unallocblocks != 0) { | |
2234 | hfs_unlock_truncate(cp, 0); | |
2235 | ||
2236 | hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK); | |
2237 | } | |
2238 | /* Don't hold cnode lock when calling into cluster layer. */ | |
2239 | (void) cluster_push(vp, waitdata ? IO_SYNC : 0); | |
2240 | ||
2241 | hfs_lock(cp, HFS_FORCE_LOCK); | |
2242 | } | |
2243 | /* | |
2244 | * When MNT_WAIT is requested and the zero fill timeout | |
2245 | * has expired then we must explicitly zero out any areas | |
2246 | * that are currently marked invalid (holes). | |
2247 | * | |
2248 | * Files with NODUMP can bypass zero filling here. | |
2249 | */ | |
2250 | if (fp && (((cp->c_flag & C_ALWAYS_ZEROFILL) && !TAILQ_EMPTY(&fp->ff_invalidranges)) || | |
2251 | ((wait || (cp->c_flag & C_ZFWANTSYNC)) && | |
2252 | ((cp->c_flags & UF_NODUMP) == 0) && | |
2253 | UBCINFOEXISTS(vp) && (vnode_issystem(vp) ==0) && | |
2254 | cp->c_zftimeout != 0))) { | |
2255 | ||
2256 | microuptime(&tv); | |
2257 | if ((cp->c_flag & C_ALWAYS_ZEROFILL) == 0 && !fullsync && tv.tv_sec < (long)cp->c_zftimeout) { | |
2258 | /* Remember that a force sync was requested. */ | |
2259 | cp->c_flag |= C_ZFWANTSYNC; | |
2260 | goto datasync; | |
2261 | } | |
2262 | if (!TAILQ_EMPTY(&fp->ff_invalidranges)) { | |
2263 | if (!took_trunc_lock || (cp->c_truncatelockowner == HFS_SHARED_OWNER)) { | |
2264 | hfs_unlock(cp); | |
2265 | if (took_trunc_lock) { | |
2266 | hfs_unlock_truncate(cp, 0); | |
2267 | } | |
2268 | hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK); | |
2269 | hfs_lock(cp, HFS_FORCE_LOCK); | |
2270 | took_trunc_lock = 1; | |
2271 | } | |
2272 | while ((invalid_range = TAILQ_FIRST(&fp->ff_invalidranges))) { | |
2273 | off_t start = invalid_range->rl_start; | |
2274 | off_t end = invalid_range->rl_end; | |
2275 | ||
2276 | /* The range about to be written must be validated | |
2277 | * first, so that VNOP_BLOCKMAP() will return the | |
2278 | * appropriate mapping for the cluster code: | |
2279 | */ | |
2280 | rl_remove(start, end, &fp->ff_invalidranges); | |
2281 | ||
2282 | /* Don't hold cnode lock when calling into cluster layer. */ | |
2283 | hfs_unlock(cp); | |
2284 | (void) cluster_write(vp, (struct uio *) 0, | |
2285 | fp->ff_size, end + 1, start, (off_t)0, | |
2286 | IO_HEADZEROFILL | IO_NOZERODIRTY | IO_NOCACHE); | |
2287 | hfs_lock(cp, HFS_FORCE_LOCK); | |
2288 | cp->c_flag |= C_MODIFIED; | |
2289 | } | |
2290 | hfs_unlock(cp); | |
2291 | (void) cluster_push(vp, waitdata ? IO_SYNC : 0); | |
2292 | hfs_lock(cp, HFS_FORCE_LOCK); | |
2293 | } | |
2294 | cp->c_flag &= ~C_ZFWANTSYNC; | |
2295 | cp->c_zftimeout = 0; | |
2296 | } | |
2297 | datasync: | |
2298 | if (took_trunc_lock) { | |
2299 | hfs_unlock_truncate(cp, 0); | |
2300 | took_trunc_lock = 0; | |
2301 | } | |
2302 | /* | |
2303 | * if we have a journal and if journal_active() returns != 0 then the | |
2304 | * we shouldn't do anything to a locked block (because it is part | |
2305 | * of a transaction). otherwise we'll just go through the normal | |
2306 | * code path and flush the buffer. note journal_active() can return | |
2307 | * -1 if the journal is invalid -- however we still need to skip any | |
2308 | * locked blocks as they get cleaned up when we finish the transaction | |
2309 | * or close the journal. | |
2310 | */ | |
2311 | // if (hfsmp->jnl && journal_active(hfsmp->jnl) >= 0) | |
2312 | if (hfsmp->jnl) | |
2313 | lockflag = BUF_SKIP_LOCKED; | |
2314 | else | |
2315 | lockflag = 0; | |
2316 | ||
2317 | /* | |
2318 | * Flush all dirty buffers associated with a vnode. | |
2319 | */ | |
2320 | buf_flushdirtyblks(vp, waitdata, lockflag, "hfs_fsync"); | |
2321 | ||
2322 | metasync: | |
2323 | if (vnode_isreg(vp) && vnode_issystem(vp)) { | |
2324 | if (VTOF(vp)->fcbBTCBPtr != NULL) { | |
2325 | microuptime(&tv); | |
2326 | BTSetLastSync(VTOF(vp), tv.tv_sec); | |
2327 | } | |
2328 | cp->c_touch_acctime = FALSE; | |
2329 | cp->c_touch_chgtime = FALSE; | |
2330 | cp->c_touch_modtime = FALSE; | |
2331 | } else if ( !(vp->v_flag & VSWAP) ) /* User file */ { | |
2332 | retval = hfs_update(vp, wait); | |
2333 | ||
2334 | /* | |
2335 | * When MNT_WAIT is requested push out the catalog record for | |
2336 | * this file. If they asked for a full fsync, we can skip this | |
2337 | * because the journal_flush or hfs_metasync_all will push out | |
2338 | * all of the metadata changes. | |
2339 | */ | |
2340 | if ((retval == 0) && wait && !fullsync && cp->c_hint && | |
2341 | !ISSET(cp->c_flag, C_DELETED | C_NOEXISTS)) { | |
2342 | hfs_metasync(VTOHFS(vp), (daddr64_t)cp->c_hint, p); | |
2343 | } | |
2344 | ||
2345 | /* | |
2346 | * If this was a full fsync, make sure all metadata | |
2347 | * changes get to stable storage. | |
2348 | */ | |
2349 | if (fullsync) { | |
2350 | if (hfsmp->jnl) { | |
2351 | hfs_journal_flush(hfsmp, FALSE); | |
2352 | ||
2353 | if (journal_uses_fua(hfsmp->jnl)) { | |
2354 | /* | |
2355 | * the journal_flush did NOT issue a sync track cache command, | |
2356 | * and the fullsync indicates we are supposed to flush all cached | |
2357 | * data to the media, so issue the sync track cache command | |
2358 | * explicitly | |
2359 | */ | |
2360 | VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, NULL); | |
2361 | } | |
2362 | } else { | |
2363 | retval = hfs_metasync_all(hfsmp); | |
2364 | /* XXX need to pass context! */ | |
2365 | VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, NULL); | |
2366 | } | |
2367 | } | |
2368 | } | |
2369 | ||
2370 | return (retval); | |
2371 | } | |
2372 | ||
2373 | ||
2374 | /* Sync an hfs catalog b-tree node */ | |
2375 | int | |
2376 | hfs_metasync(struct hfsmount *hfsmp, daddr64_t node, __unused struct proc *p) | |
2377 | { | |
2378 | vnode_t vp; | |
2379 | buf_t bp; | |
2380 | int lockflags; | |
2381 | ||
2382 | vp = HFSTOVCB(hfsmp)->catalogRefNum; | |
2383 | ||
2384 | // XXXdbg - don't need to do this on a journaled volume | |
2385 | if (hfsmp->jnl) { | |
2386 | return 0; | |
2387 | } | |
2388 | ||
2389 | lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK); | |
2390 | /* | |
2391 | * Look for a matching node that has been delayed | |
2392 | * but is not part of a set (B_LOCKED). | |
2393 | * | |
2394 | * BLK_ONLYVALID causes buf_getblk to return a | |
2395 | * buf_t for the daddr64_t specified only if it's | |
2396 | * currently resident in the cache... the size | |
2397 | * parameter to buf_getblk is ignored when this flag | |
2398 | * is set | |
2399 | */ | |
2400 | bp = buf_getblk(vp, node, 0, 0, 0, BLK_META | BLK_ONLYVALID); | |
2401 | ||
2402 | if (bp) { | |
2403 | if ((buf_flags(bp) & (B_LOCKED | B_DELWRI)) == B_DELWRI) | |
2404 | (void) VNOP_BWRITE(bp); | |
2405 | else | |
2406 | buf_brelse(bp); | |
2407 | } | |
2408 | ||
2409 | hfs_systemfile_unlock(hfsmp, lockflags); | |
2410 | ||
2411 | return (0); | |
2412 | } | |
2413 | ||
2414 | ||
2415 | /* | |
2416 | * Sync all hfs B-trees. Use this instead of journal_flush for a volume | |
2417 | * without a journal. Note that the volume bitmap does not get written; | |
2418 | * we rely on fsck_hfs to fix that up (which it can do without any loss | |
2419 | * of data). | |
2420 | */ | |
2421 | int | |
2422 | hfs_metasync_all(struct hfsmount *hfsmp) | |
2423 | { | |
2424 | int lockflags; | |
2425 | ||
2426 | /* Lock all of the B-trees so we get a mutually consistent state */ | |
2427 | lockflags = hfs_systemfile_lock(hfsmp, | |
2428 | SFL_CATALOG|SFL_EXTENTS|SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK); | |
2429 | ||
2430 | /* Sync each of the B-trees */ | |
2431 | if (hfsmp->hfs_catalog_vp) | |
2432 | hfs_btsync(hfsmp->hfs_catalog_vp, 0); | |
2433 | if (hfsmp->hfs_extents_vp) | |
2434 | hfs_btsync(hfsmp->hfs_extents_vp, 0); | |
2435 | if (hfsmp->hfs_attribute_vp) | |
2436 | hfs_btsync(hfsmp->hfs_attribute_vp, 0); | |
2437 | ||
2438 | /* Wait for all of the writes to complete */ | |
2439 | if (hfsmp->hfs_catalog_vp) | |
2440 | vnode_waitforwrites(hfsmp->hfs_catalog_vp, 0, 0, 0, "hfs_metasync_all"); | |
2441 | if (hfsmp->hfs_extents_vp) | |
2442 | vnode_waitforwrites(hfsmp->hfs_extents_vp, 0, 0, 0, "hfs_metasync_all"); | |
2443 | if (hfsmp->hfs_attribute_vp) | |
2444 | vnode_waitforwrites(hfsmp->hfs_attribute_vp, 0, 0, 0, "hfs_metasync_all"); | |
2445 | ||
2446 | hfs_systemfile_unlock(hfsmp, lockflags); | |
2447 | ||
2448 | return 0; | |
2449 | } | |
2450 | ||
2451 | ||
2452 | /*ARGSUSED 1*/ | |
2453 | static int | |
2454 | hfs_btsync_callback(struct buf *bp, __unused void *dummy) | |
2455 | { | |
2456 | buf_clearflags(bp, B_LOCKED); | |
2457 | (void) buf_bawrite(bp); | |
2458 | ||
2459 | return(BUF_CLAIMED); | |
2460 | } | |
2461 | ||
2462 | ||
2463 | int | |
2464 | hfs_btsync(struct vnode *vp, int sync_transaction) | |
2465 | { | |
2466 | struct cnode *cp = VTOC(vp); | |
2467 | struct timeval tv; | |
2468 | int flags = 0; | |
2469 | ||
2470 | if (sync_transaction) | |
2471 | flags |= BUF_SKIP_NONLOCKED; | |
2472 | /* | |
2473 | * Flush all dirty buffers associated with b-tree. | |
2474 | */ | |
2475 | buf_iterate(vp, hfs_btsync_callback, flags, 0); | |
2476 | ||
2477 | microuptime(&tv); | |
2478 | if (vnode_issystem(vp) && (VTOF(vp)->fcbBTCBPtr != NULL)) | |
2479 | (void) BTSetLastSync(VTOF(vp), tv.tv_sec); | |
2480 | cp->c_touch_acctime = FALSE; | |
2481 | cp->c_touch_chgtime = FALSE; | |
2482 | cp->c_touch_modtime = FALSE; | |
2483 | ||
2484 | return 0; | |
2485 | } | |
2486 | ||
2487 | /* | |
2488 | * Remove a directory. | |
2489 | */ | |
2490 | int | |
2491 | hfs_vnop_rmdir(ap) | |
2492 | struct vnop_rmdir_args /* { | |
2493 | struct vnode *a_dvp; | |
2494 | struct vnode *a_vp; | |
2495 | struct componentname *a_cnp; | |
2496 | vfs_context_t a_context; | |
2497 | } */ *ap; | |
2498 | { | |
2499 | struct vnode *dvp = ap->a_dvp; | |
2500 | struct vnode *vp = ap->a_vp; | |
2501 | struct cnode *dcp = VTOC(dvp); | |
2502 | struct cnode *cp = VTOC(vp); | |
2503 | int error; | |
2504 | time_t orig_ctime; | |
2505 | ||
2506 | orig_ctime = VTOC(vp)->c_ctime; | |
2507 | ||
2508 | if (!S_ISDIR(cp->c_mode)) { | |
2509 | return (ENOTDIR); | |
2510 | } | |
2511 | if (dvp == vp) { | |
2512 | return (EINVAL); | |
2513 | } | |
2514 | ||
2515 | check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL); | |
2516 | cp = VTOC(vp); | |
2517 | ||
2518 | if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) { | |
2519 | return (error); | |
2520 | } | |
2521 | ||
2522 | /* Check for a race with rmdir on the parent directory */ | |
2523 | if (dcp->c_flag & (C_DELETED | C_NOEXISTS)) { | |
2524 | hfs_unlockpair (dcp, cp); | |
2525 | return ENOENT; | |
2526 | } | |
2527 | error = hfs_removedir(dvp, vp, ap->a_cnp, 0, 0); | |
2528 | ||
2529 | hfs_unlockpair(dcp, cp); | |
2530 | ||
2531 | return (error); | |
2532 | } | |
2533 | ||
2534 | /* | |
2535 | * Remove a directory | |
2536 | * | |
2537 | * Both dvp and vp cnodes are locked | |
2538 | */ | |
2539 | int | |
2540 | hfs_removedir(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, | |
2541 | int skip_reserve, int only_unlink) | |
2542 | { | |
2543 | struct cnode *cp; | |
2544 | struct cnode *dcp; | |
2545 | struct hfsmount * hfsmp; | |
2546 | struct cat_desc desc; | |
2547 | int lockflags; | |
2548 | int error = 0, started_tr = 0; | |
2549 | ||
2550 | cp = VTOC(vp); | |
2551 | dcp = VTOC(dvp); | |
2552 | hfsmp = VTOHFS(vp); | |
2553 | ||
2554 | if (dcp == cp) { | |
2555 | return (EINVAL); /* cannot remove "." */ | |
2556 | } | |
2557 | if (cp->c_flag & (C_NOEXISTS | C_DELETED)) { | |
2558 | return (0); | |
2559 | } | |
2560 | if (cp->c_entries != 0) { | |
2561 | return (ENOTEMPTY); | |
2562 | } | |
2563 | ||
2564 | /* | |
2565 | * If the directory is open or in use (e.g. opendir() or current working | |
2566 | * directory for some process); wait for inactive/reclaim to actually | |
2567 | * remove cnode from the catalog. Both inactive and reclaim codepaths are capable | |
2568 | * of removing open-unlinked directories from the catalog, as well as getting rid | |
2569 | * of EAs still on the element. So change only_unlink to true, so that it will get | |
2570 | * cleaned up below. | |
2571 | * | |
2572 | * Otherwise, we can get into a weird old mess where the directory has C_DELETED, | |
2573 | * but it really means C_NOEXISTS because the item was actually removed from the | |
2574 | * catalog. Then when we try to remove the entry from the catalog later on, it won't | |
2575 | * really be there anymore. | |
2576 | */ | |
2577 | if (vnode_isinuse(vp, 0)) { | |
2578 | only_unlink = 1; | |
2579 | } | |
2580 | ||
2581 | /* Deal with directory hardlinks */ | |
2582 | if (cp->c_flag & C_HARDLINK) { | |
2583 | /* | |
2584 | * Note that if we have a directory which was a hardlink at any point, | |
2585 | * its actual directory data is stored in the directory inode in the hidden | |
2586 | * directory rather than the leaf element(s) present in the namespace. | |
2587 | * | |
2588 | * If there are still other hardlinks to this directory, | |
2589 | * then we'll just eliminate this particular link and the vnode will still exist. | |
2590 | * If this is the last link to an empty directory, then we'll open-unlink the | |
2591 | * directory and it will be only tagged with C_DELETED (as opposed to C_NOEXISTS). | |
2592 | * | |
2593 | * We could also return EBUSY here. | |
2594 | */ | |
2595 | ||
2596 | return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve); | |
2597 | } | |
2598 | ||
2599 | /* | |
2600 | * In a few cases, we may want to allow the directory to persist in an | |
2601 | * open-unlinked state. If the directory is being open-unlinked (still has usecount | |
2602 | * references), or if it has EAs, or if it was being deleted as part of a rename, | |
2603 | * then we go ahead and move it to the hidden directory. | |
2604 | * | |
2605 | * If the directory is being open-unlinked, then we want to keep the catalog entry | |
2606 | * alive so that future EA calls and fchmod/fstat etc. do not cause issues later. | |
2607 | * | |
2608 | * If the directory had EAs, then we want to use the open-unlink trick so that the | |
2609 | * EA removal is not done in one giant transaction. Otherwise, it could cause a panic | |
2610 | * due to overflowing the journal. | |
2611 | * | |
2612 | * Finally, if it was deleted as part of a rename, we move it to the hidden directory | |
2613 | * in order to maintain rename atomicity. | |
2614 | * | |
2615 | * Note that the allow_dirs argument to hfs_removefile specifies that it is | |
2616 | * supposed to handle directories for this case. | |
2617 | */ | |
2618 | ||
2619 | if (((hfsmp->hfs_attribute_vp != NULL) && | |
2620 | ((cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0)) || | |
2621 | (only_unlink != 0)) { | |
2622 | ||
2623 | int ret = hfs_removefile(dvp, vp, cnp, 0, 0, 1, NULL, only_unlink); | |
2624 | /* | |
2625 | * Even though hfs_vnop_rename calls vnode_recycle for us on tvp we call | |
2626 | * it here just in case we were invoked by rmdir() on a directory that had | |
2627 | * EAs. To ensure that we start reclaiming the space as soon as possible, | |
2628 | * we call vnode_recycle on the directory. | |
2629 | */ | |
2630 | vnode_recycle(vp); | |
2631 | ||
2632 | return ret; | |
2633 | ||
2634 | } | |
2635 | ||
2636 | dcp->c_flag |= C_DIR_MODIFICATION; | |
2637 | ||
2638 | #if QUOTA | |
2639 | if (hfsmp->hfs_flags & HFS_QUOTAS) | |
2640 | (void)hfs_getinoquota(cp); | |
2641 | #endif | |
2642 | if ((error = hfs_start_transaction(hfsmp)) != 0) { | |
2643 | goto out; | |
2644 | } | |
2645 | started_tr = 1; | |
2646 | ||
2647 | /* | |
2648 | * Verify the directory is empty (and valid). | |
2649 | * (Rmdir ".." won't be valid since | |
2650 | * ".." will contain a reference to | |
2651 | * the current directory and thus be | |
2652 | * non-empty.) | |
2653 | */ | |
2654 | if ((dcp->c_flags & APPEND) || (cp->c_flags & (IMMUTABLE | APPEND))) { | |
2655 | error = EPERM; | |
2656 | goto out; | |
2657 | } | |
2658 | ||
2659 | /* Remove the entry from the namei cache: */ | |
2660 | cache_purge(vp); | |
2661 | ||
2662 | /* | |
2663 | * Protect against a race with rename by using the component | |
2664 | * name passed in and parent id from dvp (instead of using | |
2665 | * the cp->c_desc which may have changed). | |
2666 | */ | |
2667 | desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr; | |
2668 | desc.cd_namelen = cnp->cn_namelen; | |
2669 | desc.cd_parentcnid = dcp->c_fileid; | |
2670 | desc.cd_cnid = cp->c_cnid; | |
2671 | desc.cd_flags = CD_ISDIR; | |
2672 | desc.cd_encoding = cp->c_encoding; | |
2673 | desc.cd_hint = 0; | |
2674 | ||
2675 | if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid, NULL, &error)) { | |
2676 | error = 0; | |
2677 | goto out; | |
2678 | } | |
2679 | ||
2680 | /* Remove entry from catalog */ | |
2681 | lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK); | |
2682 | ||
2683 | if (!skip_reserve) { | |
2684 | /* | |
2685 | * Reserve some space in the Catalog file. | |
2686 | */ | |
2687 | if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) { | |
2688 | hfs_systemfile_unlock(hfsmp, lockflags); | |
2689 | goto out; | |
2690 | } | |
2691 | } | |
2692 | ||
2693 | error = cat_delete(hfsmp, &desc, &cp->c_attr); | |
2694 | if (error == 0) { | |
2695 | /* The parent lost a child */ | |
2696 | if (dcp->c_entries > 0) | |
2697 | dcp->c_entries--; | |
2698 | DEC_FOLDERCOUNT(hfsmp, dcp->c_attr); | |
2699 | dcp->c_dirchangecnt++; | |
2700 | dcp->c_touch_chgtime = TRUE; | |
2701 | dcp->c_touch_modtime = TRUE; | |
2702 | hfs_touchtimes(hfsmp, cp); | |
2703 | (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL); | |
2704 | cp->c_flag &= ~(C_MODIFIED | C_FORCEUPDATE); | |
2705 | } | |
2706 | ||
2707 | hfs_systemfile_unlock(hfsmp, lockflags); | |
2708 | ||
2709 | if (error) | |
2710 | goto out; | |
2711 | ||
2712 | #if QUOTA | |
2713 | if (hfsmp->hfs_flags & HFS_QUOTAS) | |
2714 | (void)hfs_chkiq(cp, -1, NOCRED, 0); | |
2715 | #endif /* QUOTA */ | |
2716 | ||
2717 | hfs_volupdate(hfsmp, VOL_RMDIR, (dcp->c_cnid == kHFSRootFolderID)); | |
2718 | ||
2719 | /* Mark C_NOEXISTS since the catalog entry is now gone */ | |
2720 | cp->c_flag |= C_NOEXISTS; | |
2721 | out: | |
2722 | dcp->c_flag &= ~C_DIR_MODIFICATION; | |
2723 | wakeup((caddr_t)&dcp->c_flag); | |
2724 | ||
2725 | if (started_tr) { | |
2726 | hfs_end_transaction(hfsmp); | |
2727 | } | |
2728 | ||
2729 | return (error); | |
2730 | } | |
2731 | ||
2732 | ||
2733 | /* | |
2734 | * Remove a file or link. | |
2735 | */ | |
2736 | int | |
2737 | hfs_vnop_remove(ap) | |
2738 | struct vnop_remove_args /* { | |
2739 | struct vnode *a_dvp; | |
2740 | struct vnode *a_vp; | |
2741 | struct componentname *a_cnp; | |
2742 | int a_flags; | |
2743 | vfs_context_t a_context; | |
2744 | } */ *ap; | |
2745 | { | |
2746 | struct vnode *dvp = ap->a_dvp; | |
2747 | struct vnode *vp = ap->a_vp; | |
2748 | struct cnode *dcp = VTOC(dvp); | |
2749 | struct cnode *cp; | |
2750 | struct vnode *rvp = NULL; | |
2751 | struct hfsmount *hfsmp = VTOHFS(vp); | |
2752 | int error=0, recycle_rsrc=0; | |
2753 | int drop_rsrc_vnode = 0; | |
2754 | time_t orig_ctime; | |
2755 | ||
2756 | if (dvp == vp) { | |
2757 | return (EINVAL); | |
2758 | } | |
2759 | ||
2760 | orig_ctime = VTOC(vp)->c_ctime; | |
2761 | if (!vnode_isnamedstream(vp)) { | |
2762 | error = check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL); | |
2763 | if (error) { | |
2764 | // XXXdbg - decide on a policy for handling namespace handler failures! | |
2765 | // for now we just let them proceed. | |
2766 | } | |
2767 | } | |
2768 | error = 0; | |
2769 | ||
2770 | cp = VTOC(vp); | |
2771 | ||
2772 | /* | |
2773 | * We need to grab the cnode lock on 'cp' before the lockpair() | |
2774 | * to get an iocount on the rsrc fork BEFORE we enter hfs_removefile. | |
2775 | * To prevent other deadlocks, it's best to call hfs_vgetrsrc in a way that | |
2776 | * allows it to drop the cnode lock that it expects to be held coming in. | |
2777 | * If we don't, we could commit a lock order violation, causing a deadlock. | |
2778 | * In order to safely get the rsrc vnode with an iocount, we need to only hold the | |
2779 | * lock on the file temporarily. Unlike hfs_vnop_rename, we don't have to worry | |
2780 | * about one rsrc fork getting recycled for another, but we do want to ensure | |
2781 | * that there are no deadlocks due to lock ordering issues. | |
2782 | * | |
2783 | * Note: this function may be invoked for directory hardlinks, so just skip these | |
2784 | * steps if 'vp' is a directory. | |
2785 | */ | |
2786 | ||
2787 | ||
2788 | if ((vp->v_type == VLNK) || (vp->v_type == VREG)) { | |
2789 | ||
2790 | if ((error = hfs_lock (cp, HFS_EXCLUSIVE_LOCK))) { | |
2791 | return (error); | |
2792 | } | |
2793 | ||
2794 | error = hfs_vgetrsrc(hfsmp, vp, &rvp, TRUE, TRUE); | |
2795 | hfs_unlock(cp); | |
2796 | if (error) { | |
2797 | /* we may have gotten an rsrc vp even though we got an error */ | |
2798 | if (rvp) { | |
2799 | vnode_put(rvp); | |
2800 | rvp = NULL; | |
2801 | } | |
2802 | return (error); | |
2803 | } | |
2804 | drop_rsrc_vnode = 1; | |
2805 | } | |
2806 | /* Now that we may have an iocount on rvp, do the lock pair */ | |
2807 | ||
2808 | hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK); | |
2809 | ||
2810 | if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) { | |
2811 | hfs_unlock_truncate(cp, 0); | |
2812 | /* drop the iocount on rvp if necessary */ | |
2813 | if (drop_rsrc_vnode) { | |
2814 | vnode_put (rvp); | |
2815 | } | |
2816 | return (error); | |
2817 | } | |
2818 | ||
2819 | /* | |
2820 | * Check to see if we raced rmdir for the parent directory | |
2821 | * hfs_removefile already checks for a race on vp/cp | |
2822 | */ | |
2823 | if (dcp->c_flag & (C_DELETED | C_NOEXISTS)) { | |
2824 | error = ENOENT; | |
2825 | goto rm_done; | |
2826 | } | |
2827 | ||
2828 | error = hfs_removefile(dvp, vp, ap->a_cnp, ap->a_flags, 0, 0, rvp, 0); | |
2829 | ||
2830 | /* | |
2831 | * If the remove succeeded in deleting the file, then we may need to mark | |
2832 | * the resource fork for recycle so that it is reclaimed as quickly | |
2833 | * as possible. If it were not recycled quickly, then this resource fork | |
2834 | * vnode could keep a v_parent reference on the data fork, which prevents it | |
2835 | * from going through reclaim (by giving it extra usecounts), except in the force- | |
2836 | * unmount case. | |
2837 | * | |
2838 | * However, a caveat: we need to continue to supply resource fork | |
2839 | * access to open-unlinked files even if the resource fork is not open. This is | |
2840 | * a requirement for the compressed files work. Luckily, hfs_vgetrsrc will handle | |
2841 | * this already if the data fork has been re-parented to the hidden directory. | |
2842 | * | |
2843 | * As a result, all we really need to do here is mark the resource fork vnode | |
2844 | * for recycle. If it goes out of core, it can be brought in again if needed. | |
2845 | * If the cnode was instead marked C_NOEXISTS, then there wouldn't be any | |
2846 | * more work. | |
2847 | */ | |
2848 | if ((error == 0) && (rvp)) { | |
2849 | recycle_rsrc = 1; | |
2850 | } | |
2851 | ||
2852 | /* | |
2853 | * Drop the truncate lock before unlocking the cnode | |
2854 | * (which can potentially perform a vnode_put and | |
2855 | * recycle the vnode which in turn might require the | |
2856 | * truncate lock) | |
2857 | */ | |
2858 | rm_done: | |
2859 | hfs_unlock_truncate(cp, 0); | |
2860 | hfs_unlockpair(dcp, cp); | |
2861 | ||
2862 | if (recycle_rsrc) { | |
2863 | /* inactive or reclaim on rvp will clean up the blocks from the rsrc fork */ | |
2864 | vnode_recycle(rvp); | |
2865 | } | |
2866 | ||
2867 | if (drop_rsrc_vnode) { | |
2868 | /* drop iocount on rsrc fork, was obtained at beginning of fxn */ | |
2869 | vnode_put(rvp); | |
2870 | } | |
2871 | ||
2872 | return (error); | |
2873 | } | |
2874 | ||
2875 | ||
2876 | static int | |
2877 | hfs_removefile_callback(struct buf *bp, void *hfsmp) { | |
2878 | ||
2879 | if ( !(buf_flags(bp) & B_META)) | |
2880 | panic("hfs: symlink bp @ %p is not marked meta-data!\n", bp); | |
2881 | /* | |
2882 | * it's part of the current transaction, kill it. | |
2883 | */ | |
2884 | journal_kill_block(((struct hfsmount *)hfsmp)->jnl, bp); | |
2885 | ||
2886 | return (BUF_CLAIMED); | |
2887 | } | |
2888 | ||
2889 | /* | |
2890 | * hfs_removefile | |
2891 | * | |
2892 | * Similar to hfs_vnop_remove except there are additional options. | |
2893 | * This function may be used to remove directories if they have | |
2894 | * lots of EA's -- note the 'allow_dirs' argument. | |
2895 | * | |
2896 | * The 'rvp' argument is used to pass in a resource fork vnode with | |
2897 | * an iocount to prevent it from getting recycled during usage. If it | |
2898 | * is NULL, then it is assumed the caller is a VNOP that cannot operate | |
2899 | * on resource forks, like hfs_vnop_symlink or hfs_removedir. Otherwise in | |
2900 | * a VNOP that takes multiple vnodes, we could violate lock order and | |
2901 | * cause a deadlock. | |
2902 | * | |
2903 | * Requires cnode and truncate locks to be held. | |
2904 | */ | |
2905 | int | |
2906 | hfs_removefile(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, | |
2907 | int flags, int skip_reserve, int allow_dirs, | |
2908 | struct vnode *rvp, int only_unlink) | |
2909 | { | |
2910 | struct cnode *cp; | |
2911 | struct cnode *dcp; | |
2912 | struct hfsmount *hfsmp; | |
2913 | struct cat_desc desc; | |
2914 | struct timeval tv; | |
2915 | int dataforkbusy = 0; | |
2916 | int rsrcforkbusy = 0; | |
2917 | int lockflags; | |
2918 | int error = 0; | |
2919 | int started_tr = 0; | |
2920 | int isbigfile = 0, defer_remove=0, isdir=0; | |
2921 | int update_vh = 0; | |
2922 | ||
2923 | cp = VTOC(vp); | |
2924 | dcp = VTOC(dvp); | |
2925 | hfsmp = VTOHFS(vp); | |
2926 | ||
2927 | /* Check if we lost a race post lookup. */ | |
2928 | if (cp->c_flag & (C_NOEXISTS | C_DELETED)) { | |
2929 | return (0); | |
2930 | } | |
2931 | ||
2932 | if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid, NULL, &error)) { | |
2933 | return 0; | |
2934 | } | |
2935 | ||
2936 | /* Make sure a remove is permitted */ | |
2937 | if (VNODE_IS_RSRC(vp)) { | |
2938 | return (EPERM); | |
2939 | } | |
2940 | /* Don't allow deleting the journal or journal_info_block. */ | |
2941 | if (hfsmp->jnl && | |
2942 | (cp->c_fileid == hfsmp->hfs_jnlfileid || cp->c_fileid == hfsmp->hfs_jnlinfoblkid)) { | |
2943 | return (EPERM); | |
2944 | } | |
2945 | /* | |
2946 | * Hard links require special handling. | |
2947 | */ | |
2948 | if (cp->c_flag & C_HARDLINK) { | |
2949 | if ((flags & VNODE_REMOVE_NODELETEBUSY) && vnode_isinuse(vp, 0)) { | |
2950 | return (EBUSY); | |
2951 | } else { | |
2952 | /* A directory hard link with a link count of one is | |
2953 | * treated as a regular directory. Therefore it should | |
2954 | * only be removed using rmdir(). | |
2955 | */ | |
2956 | if ((vnode_isdir(vp) == 1) && (cp->c_linkcount == 1) && | |
2957 | (allow_dirs == 0)) { | |
2958 | return (EPERM); | |
2959 | } | |
2960 | return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve); | |
2961 | } | |
2962 | } | |
2963 | /* Directories should call hfs_rmdir! (unless they have a lot of attributes) */ | |
2964 | if (vnode_isdir(vp)) { | |
2965 | if (allow_dirs == 0) | |
2966 | return (EPERM); /* POSIX */ | |
2967 | isdir = 1; | |
2968 | } | |
2969 | /* Sanity check the parent ids. */ | |
2970 | if ((cp->c_parentcnid != hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) && | |
2971 | (cp->c_parentcnid != dcp->c_fileid)) { | |
2972 | return (EINVAL); | |
2973 | } | |
2974 | ||
2975 | dcp->c_flag |= C_DIR_MODIFICATION; | |
2976 | ||
2977 | // this guy is going away so mark him as such | |
2978 | cp->c_flag |= C_DELETED; | |
2979 | ||
2980 | ||
2981 | /* Remove our entry from the namei cache. */ | |
2982 | cache_purge(vp); | |
2983 | ||
2984 | /* | |
2985 | * We expect the caller, if operating on files, | |
2986 | * will have passed in a resource fork vnode with | |
2987 | * an iocount, even if there was no content. | |
2988 | * We only do the hfs_truncate on the rsrc fork | |
2989 | * if we know that it DID have content, however. | |
2990 | * This has the bonus of not requiring us to defer | |
2991 | * its removal, unless it is in use. | |
2992 | */ | |
2993 | ||
2994 | /* Check if this file is being used. */ | |
2995 | if (isdir == 0) { | |
2996 | dataforkbusy = vnode_isinuse(vp, 0); | |
2997 | /* Only need to defer resource fork removal if in use and has content */ | |
2998 | if (rvp && (cp->c_blocks - VTOF(vp)->ff_blocks)) { | |
2999 | rsrcforkbusy = vnode_isinuse(rvp, 0); | |
3000 | } | |
3001 | } | |
3002 | ||
3003 | /* Check if we have to break the deletion into multiple pieces. */ | |
3004 | if (isdir == 0) { | |
3005 | isbigfile = ((cp->c_datafork->ff_size >= HFS_BIGFILE_SIZE) && overflow_extents(VTOF(vp))); | |
3006 | } | |
3007 | ||
3008 | /* Check if the file has xattrs. If it does we'll have to delete them in | |
3009 | individual transactions in case there are too many */ | |
3010 | if ((hfsmp->hfs_attribute_vp != NULL) && | |
3011 | (cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0) { | |
3012 | defer_remove = 1; | |
3013 | } | |
3014 | ||
3015 | /* If we are explicitly told to only unlink item and move to hidden dir, then do it */ | |
3016 | if (only_unlink) { | |
3017 | defer_remove = 1; | |
3018 | } | |
3019 | ||
3020 | /* | |
3021 | * Carbon semantics prohibit deleting busy files. | |
3022 | * (enforced when VNODE_REMOVE_NODELETEBUSY is requested) | |
3023 | */ | |
3024 | if (dataforkbusy || rsrcforkbusy) { | |
3025 | if ((flags & VNODE_REMOVE_NODELETEBUSY) || | |
3026 | (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid == 0)) { | |
3027 | error = EBUSY; | |
3028 | goto out; | |
3029 | } | |
3030 | } | |
3031 | ||
3032 | #if QUOTA | |
3033 | if (hfsmp->hfs_flags & HFS_QUOTAS) | |
3034 | (void)hfs_getinoquota(cp); | |
3035 | #endif /* QUOTA */ | |
3036 | ||
3037 | /* | |
3038 | * Do a ubc_setsize to indicate we need to wipe contents if: | |
3039 | * 1) item is a regular file. | |
3040 | * 2) Neither fork is busy AND we are not told to unlink this. | |
3041 | * | |
3042 | * We need to check for the defer_remove since it can be set without | |
3043 | * having a busy data or rsrc fork | |
3044 | */ | |
3045 | if (isdir == 0 && (!dataforkbusy || !rsrcforkbusy) && (defer_remove == 0)) { | |
3046 | /* | |
3047 | * A ubc_setsize can cause a pagein so defer it | |
3048 | * until after the cnode lock is dropped. The | |
3049 | * cnode lock cannot be dropped/reacquired here | |
3050 | * since we might already hold the journal lock. | |
3051 | */ | |
3052 | if (!dataforkbusy && cp->c_datafork->ff_blocks && !isbigfile) { | |
3053 | cp->c_flag |= C_NEED_DATA_SETSIZE; | |
3054 | } | |
3055 | if (!rsrcforkbusy && rvp) { | |
3056 | cp->c_flag |= C_NEED_RSRC_SETSIZE; | |
3057 | } | |
3058 | } | |
3059 | ||
3060 | if ((error = hfs_start_transaction(hfsmp)) != 0) { | |
3061 | goto out; | |
3062 | } | |
3063 | started_tr = 1; | |
3064 | ||
3065 | // XXXdbg - if we're journaled, kill any dirty symlink buffers | |
3066 | if (hfsmp->jnl && vnode_islnk(vp) && (defer_remove == 0)) { | |
3067 | buf_iterate(vp, hfs_removefile_callback, BUF_SKIP_NONLOCKED, (void *)hfsmp); | |
3068 | } | |
3069 | ||
3070 | /* | |
3071 | * Prepare to truncate any non-busy forks. Busy forks will | |
3072 | * get truncated when their vnode goes inactive. | |
3073 | * Note that we will only enter this region if we | |
3074 | * can avoid creating an open-unlinked file. If | |
3075 | * either region is busy, we will have to create an open | |
3076 | * unlinked file. | |
3077 | * | |
3078 | * Since we are deleting the file, we need to stagger the runtime | |
3079 | * modifications to do things in such a way that a crash won't | |
3080 | * result in us getting overlapped extents or any other | |
3081 | * bad inconsistencies. As such, we call prepare_release_storage | |
3082 | * which updates the UBC, updates quota information, and releases | |
3083 | * any loaned blocks that belong to this file. No actual | |
3084 | * truncation or bitmap manipulation is done until *AFTER* | |
3085 | * the catalog record is removed. | |
3086 | */ | |
3087 | if (isdir == 0 && (!dataforkbusy && !rsrcforkbusy) && (only_unlink == 0)) { | |
3088 | ||
3089 | if (!dataforkbusy && !isbigfile && cp->c_datafork->ff_blocks != 0) { | |
3090 | ||
3091 | error = hfs_prepare_release_storage (hfsmp, vp); | |
3092 | if (error) { | |
3093 | goto out; | |
3094 | } | |
3095 | update_vh = 1; | |
3096 | } | |
3097 | if (!rsrcforkbusy && rvp) { | |
3098 | error = hfs_prepare_release_storage (hfsmp, rvp); | |
3099 | if (error) { | |
3100 | goto out; | |
3101 | } | |
3102 | update_vh = 1; | |
3103 | } | |
3104 | } | |
3105 | ||
3106 | /* | |
3107 | * Protect against a race with rename by using the component | |
3108 | * name passed in and parent id from dvp (instead of using | |
3109 | * the cp->c_desc which may have changed). Also, be aware that | |
3110 | * because we allow directories to be passed in, we need to special case | |
3111 | * this temporary descriptor in case we were handed a directory. | |
3112 | */ | |
3113 | if (isdir) { | |
3114 | desc.cd_flags = CD_ISDIR; | |
3115 | } | |
3116 | else { | |
3117 | desc.cd_flags = 0; | |
3118 | } | |
3119 | desc.cd_encoding = cp->c_desc.cd_encoding; | |
3120 | desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr; | |
3121 | desc.cd_namelen = cnp->cn_namelen; | |
3122 | desc.cd_parentcnid = dcp->c_fileid; | |
3123 | desc.cd_hint = cp->c_desc.cd_hint; | |
3124 | desc.cd_cnid = cp->c_cnid; | |
3125 | microtime(&tv); | |
3126 | ||
3127 | /* | |
3128 | * There are two cases to consider: | |
3129 | * 1. File/Dir is busy/big/defer_remove ==> move/rename the file/dir | |
3130 | * 2. File is not in use ==> remove the file | |
3131 | * | |
3132 | * We can get a directory in case 1 because it may have had lots of attributes, | |
3133 | * which need to get removed here. | |
3134 | */ | |
3135 | if (dataforkbusy || rsrcforkbusy || isbigfile || defer_remove) { | |
3136 | char delname[32]; | |
3137 | struct cat_desc to_desc; | |
3138 | struct cat_desc todir_desc; | |
3139 | ||
3140 | /* | |
3141 | * Orphan this file or directory (move to hidden directory). | |
3142 | * Again, we need to take care that we treat directories as directories, | |
3143 | * and files as files. Because directories with attributes can be passed in | |
3144 | * check to make sure that we have a directory or a file before filling in the | |
3145 | * temporary descriptor's flags. We keep orphaned directories AND files in | |
3146 | * the FILE_HARDLINKS private directory since we're generalizing over all | |
3147 | * orphaned filesystem objects. | |
3148 | */ | |
3149 | bzero(&todir_desc, sizeof(todir_desc)); | |
3150 | todir_desc.cd_parentcnid = 2; | |
3151 | ||
3152 | MAKE_DELETED_NAME(delname, sizeof(delname), cp->c_fileid); | |
3153 | bzero(&to_desc, sizeof(to_desc)); | |
3154 | to_desc.cd_nameptr = (const u_int8_t *)delname; | |
3155 | to_desc.cd_namelen = strlen(delname); | |
3156 | to_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid; | |
3157 | if (isdir) { | |
3158 | to_desc.cd_flags = CD_ISDIR; | |
3159 | } | |
3160 | else { | |
3161 | to_desc.cd_flags = 0; | |
3162 | } | |
3163 | to_desc.cd_cnid = cp->c_cnid; | |
3164 | ||
3165 | lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK); | |
3166 | if (!skip_reserve) { | |
3167 | if ((error = cat_preflight(hfsmp, CAT_RENAME, NULL, 0))) { | |
3168 | hfs_systemfile_unlock(hfsmp, lockflags); | |
3169 | goto out; | |
3170 | } | |
3171 | } | |
3172 | ||
3173 | error = cat_rename(hfsmp, &desc, &todir_desc, | |
3174 | &to_desc, (struct cat_desc *)NULL); | |
3175 | ||
3176 | if (error == 0) { | |
3177 | hfsmp->hfs_private_attr[FILE_HARDLINKS].ca_entries++; | |
3178 | if (isdir == 1) { | |
3179 | INC_FOLDERCOUNT(hfsmp, hfsmp->hfs_private_attr[FILE_HARDLINKS]); | |
3180 | } | |
3181 | (void) cat_update(hfsmp, &hfsmp->hfs_private_desc[FILE_HARDLINKS], | |
3182 | &hfsmp->hfs_private_attr[FILE_HARDLINKS], NULL, NULL); | |
3183 | ||
3184 | /* Update the parent directory */ | |
3185 | if (dcp->c_entries > 0) | |
3186 | dcp->c_entries--; | |
3187 | if (isdir == 1) { | |
3188 | DEC_FOLDERCOUNT(hfsmp, dcp->c_attr); | |
3189 | } | |
3190 | dcp->c_dirchangecnt++; | |
3191 | dcp->c_ctime = tv.tv_sec; | |
3192 | dcp->c_mtime = tv.tv_sec; | |
3193 | (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL); | |
3194 | ||
3195 | /* Update the file or directory's state */ | |
3196 | cp->c_flag |= C_DELETED; | |
3197 | cp->c_ctime = tv.tv_sec; | |
3198 | --cp->c_linkcount; | |
3199 | (void) cat_update(hfsmp, &to_desc, &cp->c_attr, NULL, NULL); | |
3200 | } | |
3201 | hfs_systemfile_unlock(hfsmp, lockflags); | |
3202 | if (error) | |
3203 | goto out; | |
3204 | ||
3205 | } | |
3206 | else /* Not busy */ { | |
3207 | ||
3208 | #if QUOTA | |
3209 | off_t savedbytes; | |
3210 | int blksize = hfsmp->blockSize; | |
3211 | #endif | |
3212 | u_int32_t fileid = cp->c_fileid; | |
3213 | ||
3214 | lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK); | |
3215 | if (!skip_reserve) { | |
3216 | if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) { | |
3217 | hfs_systemfile_unlock(hfsmp, lockflags); | |
3218 | goto out; | |
3219 | } | |
3220 | } | |
3221 | ||
3222 | error = cat_delete(hfsmp, &desc, &cp->c_attr); | |
3223 | ||
3224 | if (error && error != ENXIO && error != ENOENT) { | |
3225 | printf("hfs_removefile: deleting file %s (%d), err: %d\n", | |
3226 | cp->c_desc.cd_nameptr, cp->c_attr.ca_fileid, error); | |
3227 | } | |
3228 | ||
3229 | if (error == 0) { | |
3230 | /* Update the parent directory */ | |
3231 | if (dcp->c_entries > 0) | |
3232 | dcp->c_entries--; | |
3233 | dcp->c_dirchangecnt++; | |
3234 | dcp->c_ctime = tv.tv_sec; | |
3235 | dcp->c_mtime = tv.tv_sec; | |
3236 | (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL); | |
3237 | } | |
3238 | hfs_systemfile_unlock(hfsmp, lockflags); | |
3239 | if (error) { | |
3240 | goto out; | |
3241 | } | |
3242 | ||
3243 | /* | |
3244 | * Now that we've wiped out the catalog record, the file effectively doesn't | |
3245 | * exist anymore. So update the quota records to reflect the loss of the | |
3246 | * data fork and the resource fork. | |
3247 | */ | |
3248 | #if QUOTA | |
3249 | if (cp->c_datafork->ff_blocks > 0) { | |
3250 | savedbytes = ((off_t)cp->c_datafork->ff_blocks * (off_t)blksize); | |
3251 | (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0); | |
3252 | } | |
3253 | ||
3254 | if (cp->c_rsrcfork && (cp->c_rsrcfork->ff_blocks > 0)) { | |
3255 | savedbytes = ((off_t)cp->c_rsrcfork->ff_blocks * (off_t)blksize); | |
3256 | (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0); | |
3257 | } | |
3258 | ||
3259 | if (hfsmp->hfs_flags & HFS_QUOTAS) { | |
3260 | (void)hfs_chkiq(cp, -1, NOCRED, 0); | |
3261 | } | |
3262 | #endif | |
3263 | ||
3264 | ||
3265 | /* | |
3266 | * If we didn't get any errors deleting the catalog entry, then go ahead | |
3267 | * and release the backing store now. The filefork pointers are still valid. | |
3268 | */ | |
3269 | error = hfs_release_storage (hfsmp, cp->c_datafork, cp->c_rsrcfork, fileid); | |
3270 | ||
3271 | if (error) { | |
3272 | /* | |
3273 | * If we encountered an error updating the extents and bitmap, | |
3274 | * mark the volume inconsistent. At this point, the catalog record has | |
3275 | * already been deleted, so we can't recover it at this point. We need | |
3276 | * to proceed and update the volume header and mark the cnode C_NOEXISTS. | |
3277 | * The subsequent fsck should be able to recover the free space for us. | |
3278 | */ | |
3279 | hfs_mark_volume_inconsistent(hfsmp); | |
3280 | } | |
3281 | else { | |
3282 | /* reset update_vh to 0, since hfs_release_storage should have done it for us */ | |
3283 | update_vh = 0; | |
3284 | } | |
3285 | ||
3286 | cp->c_flag |= C_NOEXISTS; | |
3287 | cp->c_flag &= ~C_DELETED; | |
3288 | ||
3289 | cp->c_touch_chgtime = TRUE; /* XXX needed ? */ | |
3290 | --cp->c_linkcount; | |
3291 | ||
3292 | /* | |
3293 | * We must never get a directory if we're in this else block. We could | |
3294 | * accidentally drop the number of files in the volume header if we did. | |
3295 | */ | |
3296 | hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID)); | |
3297 | ||
3298 | } | |
3299 | ||
3300 | /* | |
3301 | * All done with this cnode's descriptor... | |
3302 | * | |
3303 | * Note: all future catalog calls for this cnode must be by | |
3304 | * fileid only. This is OK for HFS (which doesn't have file | |
3305 | * thread records) since HFS doesn't support the removal of | |
3306 | * busy files. | |
3307 | */ | |
3308 | cat_releasedesc(&cp->c_desc); | |
3309 | ||
3310 | out: | |
3311 | if (error) { | |
3312 | cp->c_flag &= ~C_DELETED; | |
3313 | } | |
3314 | ||
3315 | if (update_vh) { | |
3316 | /* | |
3317 | * If we bailed out earlier, we may need to update the volume header | |
3318 | * to deal with the borrowed blocks accounting. | |
3319 | */ | |
3320 | hfs_volupdate (hfsmp, VOL_UPDATE, 0); | |
3321 | } | |
3322 | ||
3323 | if (started_tr) { | |
3324 | hfs_end_transaction(hfsmp); | |
3325 | } | |
3326 | ||
3327 | dcp->c_flag &= ~C_DIR_MODIFICATION; | |
3328 | wakeup((caddr_t)&dcp->c_flag); | |
3329 | ||
3330 | return (error); | |
3331 | } | |
3332 | ||
3333 | ||
3334 | __private_extern__ void | |
3335 | replace_desc(struct cnode *cp, struct cat_desc *cdp) | |
3336 | { | |
3337 | // fixes 4348457 and 4463138 | |
3338 | if (&cp->c_desc == cdp) { | |
3339 | return; | |
3340 | } | |
3341 | ||
3342 | /* First release allocated name buffer */ | |
3343 | if (cp->c_desc.cd_flags & CD_HASBUF && cp->c_desc.cd_nameptr != 0) { | |
3344 | const u_int8_t *name = cp->c_desc.cd_nameptr; | |
3345 | ||
3346 | cp->c_desc.cd_nameptr = 0; | |
3347 | cp->c_desc.cd_namelen = 0; | |
3348 | cp->c_desc.cd_flags &= ~CD_HASBUF; | |
3349 | vfs_removename((const char *)name); | |
3350 | } | |
3351 | bcopy(cdp, &cp->c_desc, sizeof(cp->c_desc)); | |
3352 | ||
3353 | /* Cnode now owns the name buffer */ | |
3354 | cdp->cd_nameptr = 0; | |
3355 | cdp->cd_namelen = 0; | |
3356 | cdp->cd_flags &= ~CD_HASBUF; | |
3357 | } | |
3358 | ||
3359 | /* | |
3360 | * Rename a cnode. | |
3361 | * | |
3362 | * The VFS layer guarantees that: | |
3363 | * - source and destination will either both be directories, or | |
3364 | * both not be directories. | |
3365 | * - all the vnodes are from the same file system | |
3366 | * | |
3367 | * When the target is a directory, HFS must ensure that its empty. | |
3368 | * | |
3369 | * Note that this function requires up to 6 vnodes in order to work properly | |
3370 | * if it is operating on files (and not on directories). This is because only | |
3371 | * files can have resource forks, and we now require iocounts to be held on the | |
3372 | * vnodes corresponding to the resource forks (if applicable) as well as | |
3373 | * the files or directories undergoing rename. The problem with not holding | |
3374 | * iocounts on the resource fork vnodes is that it can lead to a deadlock | |
3375 | * situation: The rsrc fork of the source file may be recycled and reclaimed | |
3376 | * in order to provide a vnode for the destination file's rsrc fork. Since | |
3377 | * data and rsrc forks share the same cnode, we'd eventually try to lock the | |
3378 | * source file's cnode in order to sync its rsrc fork to disk, but it's already | |
3379 | * been locked. By taking the rsrc fork vnodes up front we ensure that they | |
3380 | * cannot be recycled, and that the situation mentioned above cannot happen. | |
3381 | */ | |
3382 | int | |
3383 | hfs_vnop_rename(ap) | |
3384 | struct vnop_rename_args /* { | |
3385 | struct vnode *a_fdvp; | |
3386 | struct vnode *a_fvp; | |
3387 | struct componentname *a_fcnp; | |
3388 | struct vnode *a_tdvp; | |
3389 | struct vnode *a_tvp; | |
3390 | struct componentname *a_tcnp; | |
3391 | vfs_context_t a_context; | |
3392 | } */ *ap; | |
3393 | { | |
3394 | struct vnode *tvp = ap->a_tvp; | |
3395 | struct vnode *tdvp = ap->a_tdvp; | |
3396 | struct vnode *fvp = ap->a_fvp; | |
3397 | struct vnode *fdvp = ap->a_fdvp; | |
3398 | struct vnode *fvp_rsrc = NULLVP; | |
3399 | struct vnode *tvp_rsrc = NULLVP; | |
3400 | struct componentname *tcnp = ap->a_tcnp; | |
3401 | struct componentname *fcnp = ap->a_fcnp; | |
3402 | struct proc *p = vfs_context_proc(ap->a_context); | |
3403 | struct cnode *fcp; | |
3404 | struct cnode *fdcp; | |
3405 | struct cnode *tdcp; | |
3406 | struct cnode *tcp; | |
3407 | struct cnode *error_cnode; | |
3408 | struct cat_desc from_desc; | |
3409 | struct cat_desc to_desc; | |
3410 | struct cat_desc out_desc; | |
3411 | struct hfsmount *hfsmp; | |
3412 | cat_cookie_t cookie; | |
3413 | int tvp_deleted = 0; | |
3414 | int started_tr = 0, got_cookie = 0; | |
3415 | int took_trunc_lock = 0; | |
3416 | int lockflags; | |
3417 | int error; | |
3418 | time_t orig_from_ctime, orig_to_ctime; | |
3419 | ||
3420 | orig_from_ctime = VTOC(fvp)->c_ctime; | |
3421 | if (tvp && VTOC(tvp)) { | |
3422 | orig_to_ctime = VTOC(tvp)->c_ctime; | |
3423 | } else { | |
3424 | orig_to_ctime = ~0; | |
3425 | } | |
3426 | ||
3427 | check_for_tracked_file(fvp, orig_from_ctime, NAMESPACE_HANDLER_RENAME_OP, NULL); | |
3428 | ||
3429 | if (tvp && VTOC(tvp)) { | |
3430 | check_for_tracked_file(tvp, orig_to_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL); | |
3431 | } | |
3432 | ||
3433 | /* | |
3434 | * Before grabbing the four locks, we may need to get an iocount on the resource fork | |
3435 | * vnodes in question, just like hfs_vnop_remove. If fvp and tvp are not | |
3436 | * directories, then go ahead and grab the resource fork vnodes now | |
3437 | * one at a time. We don't actively need the fvp_rsrc to do the rename operation, | |
3438 | * but we need the iocount to prevent the vnode from getting recycled/reclaimed | |
3439 | * during the middle of the VNOP. | |
3440 | */ | |
3441 | ||
3442 | ||
3443 | if ((vnode_isreg(fvp)) || (vnode_islnk(fvp))) { | |
3444 | ||
3445 | if ((error = hfs_lock (VTOC(fvp), HFS_EXCLUSIVE_LOCK))) { | |
3446 | return (error); | |
3447 | } | |
3448 | /* | |
3449 | * We care if we race against rename/delete with this cp, so we'll error out | |
3450 | * if the file becomes open-unlinked during this call. | |
3451 | */ | |
3452 | error = hfs_vgetrsrc(VTOHFS(fvp), fvp, &fvp_rsrc, TRUE, TRUE); | |
3453 | hfs_unlock (VTOC(fvp)); | |
3454 | if (error) { | |
3455 | if (fvp_rsrc) { | |
3456 | vnode_put(fvp_rsrc); | |
3457 | } | |
3458 | return error; | |
3459 | } | |
3460 | } | |
3461 | ||
3462 | if (tvp && (vnode_isreg(tvp) || vnode_islnk(tvp))) { | |
3463 | /* | |
3464 | * Lock failure is OK on tvp, since we may race with a remove on the dst. | |
3465 | * But this shouldn't stop rename from proceeding, so only try to | |
3466 | * grab the resource fork if the lock succeeded. | |
3467 | */ | |
3468 | if (hfs_lock (VTOC(tvp), HFS_EXCLUSIVE_LOCK) == 0) { | |
3469 | tcp = VTOC(tvp); | |
3470 | /* | |
3471 | * We only care if we get an open-unlinked file on the dst so we | |
3472 | * know to null out tvp/tcp to make the rename operation act | |
3473 | * as if they never existed. Because they're effectively out of the | |
3474 | * namespace already it's fine to do this. If this is true, then | |
3475 | * make sure to unlock the cnode and drop the iocount only after the unlock. | |
3476 | */ | |
3477 | ||
3478 | error = hfs_vgetrsrc(VTOHFS(tvp), tvp, &tvp_rsrc, TRUE, TRUE); | |
3479 | hfs_unlock (tcp); | |
3480 | if (error) { | |
3481 | /* | |
3482 | * Since we specify TRUE for error_on_unlinked in hfs_vgetrsrc, | |
3483 | * we can get a rsrc fork vnode even if it returns an error. | |
3484 | */ | |
3485 | tcp = NULL; | |
3486 | tvp = NULL; | |
3487 | if (tvp_rsrc) { | |
3488 | vnode_put (tvp_rsrc); | |
3489 | tvp_rsrc = NULL; | |
3490 | } | |
3491 | /* just bypass truncate lock and act as if we never got tcp/tvp */ | |
3492 | goto retry; | |
3493 | } | |
3494 | } | |
3495 | } | |
3496 | ||
3497 | /* When tvp exists, take the truncate lock for hfs_removefile(). */ | |
3498 | if (tvp && (vnode_isreg(tvp) || vnode_islnk(tvp))) { | |
3499 | hfs_lock_truncate(VTOC(tvp), HFS_EXCLUSIVE_LOCK); | |
3500 | took_trunc_lock = 1; | |
3501 | } | |
3502 | ||
3503 | retry: | |
3504 | error = hfs_lockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL, | |
3505 | HFS_EXCLUSIVE_LOCK, &error_cnode); | |
3506 | if (error) { | |
3507 | if (took_trunc_lock) { | |
3508 | hfs_unlock_truncate(VTOC(tvp), 0); | |
3509 | took_trunc_lock = 0; | |
3510 | } | |
3511 | /* | |
3512 | * tvp might no longer exist. If the cause of the lock failure | |
3513 | * was tvp, then we can try again with tvp/tcp set to NULL. | |
3514 | * This is ok because the vfs syscall will vnode_put the vnodes | |
3515 | * after we return from hfs_vnop_rename. | |
3516 | */ | |
3517 | if ((error == ENOENT) && (tvp != NULL) && (error_cnode == VTOC(tvp))) { | |
3518 | tcp = NULL; | |
3519 | tvp = NULL; | |
3520 | goto retry; | |
3521 | } | |
3522 | /* otherwise, drop iocounts on the rsrc forks and bail out */ | |
3523 | if (fvp_rsrc) { | |
3524 | vnode_put (fvp_rsrc); | |
3525 | } | |
3526 | if (tvp_rsrc) { | |
3527 | vnode_put (tvp_rsrc); | |
3528 | } | |
3529 | return (error); | |
3530 | } | |
3531 | ||
3532 | fdcp = VTOC(fdvp); | |
3533 | fcp = VTOC(fvp); | |
3534 | tdcp = VTOC(tdvp); | |
3535 | tcp = tvp ? VTOC(tvp) : NULL; | |
3536 | hfsmp = VTOHFS(tdvp); | |
3537 | ||
3538 | /* Ensure we didn't race src or dst parent directories with rmdir. */ | |
3539 | if (fdcp->c_flag & (C_NOEXISTS | C_DELETED)) { | |
3540 | error = ENOENT; | |
3541 | goto out; | |
3542 | } | |
3543 | ||
3544 | if (tdcp->c_flag & (C_NOEXISTS | C_DELETED)) { | |
3545 | error = ENOENT; | |
3546 | goto out; | |
3547 | } | |
3548 | ||
3549 | ||
3550 | /* Check for a race against unlink. The hfs_valid_cnode checks validate | |
3551 | * the parent/child relationship with fdcp and tdcp, as well as the | |
3552 | * component name of the target cnodes. | |
3553 | */ | |
3554 | if ((fcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, fdvp, fcnp, fcp->c_fileid, NULL, &error)) { | |
3555 | error = ENOENT; | |
3556 | goto out; | |
3557 | } | |
3558 | ||
3559 | if (tcp && ((tcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, tdvp, tcnp, tcp->c_fileid, NULL, &error))) { | |
3560 | // | |
3561 | // hmm, the destination vnode isn't valid any more. | |
3562 | // in this case we can just drop him and pretend he | |
3563 | // never existed in the first place. | |
3564 | // | |
3565 | if (took_trunc_lock) { | |
3566 | hfs_unlock_truncate(VTOC(tvp), 0); | |
3567 | took_trunc_lock = 0; | |
3568 | } | |
3569 | error = 0; | |
3570 | ||
3571 | hfs_unlockfour(fdcp, fcp, tdcp, tcp); | |
3572 | ||
3573 | tcp = NULL; | |
3574 | tvp = NULL; | |
3575 | ||
3576 | // retry the locking with tvp null'ed out | |
3577 | goto retry; | |
3578 | } | |
3579 | ||
3580 | fdcp->c_flag |= C_DIR_MODIFICATION; | |
3581 | if (fdvp != tdvp) { | |
3582 | tdcp->c_flag |= C_DIR_MODIFICATION; | |
3583 | } | |
3584 | ||
3585 | /* | |
3586 | * Disallow renaming of a directory hard link if the source and | |
3587 | * destination parent directories are different, or a directory whose | |
3588 | * descendant is a directory hard link and the one of the ancestors | |
3589 | * of the destination directory is a directory hard link. | |
3590 | */ | |
3591 | if (vnode_isdir(fvp) && (fdvp != tdvp)) { | |
3592 | if (fcp->c_flag & C_HARDLINK) { | |
3593 | error = EPERM; | |
3594 | goto out; | |
3595 | } | |
3596 | if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) { | |
3597 | lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); | |
3598 | if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) { | |
3599 | error = EPERM; | |
3600 | hfs_systemfile_unlock(hfsmp, lockflags); | |
3601 | goto out; | |
3602 | } | |
3603 | hfs_systemfile_unlock(hfsmp, lockflags); | |
3604 | } | |
3605 | } | |
3606 | ||
3607 | /* | |
3608 | * The following edge case is caught here: | |
3609 | * (to cannot be a descendent of from) | |
3610 | * | |
3611 | * o fdvp | |
3612 | * / | |
3613 | * / | |
3614 | * o fvp | |
3615 | * \ | |
3616 | * \ | |
3617 | * o tdvp | |
3618 | * / | |
3619 | * / | |
3620 | * o tvp | |
3621 | */ | |
3622 | if (tdcp->c_parentcnid == fcp->c_fileid) { | |
3623 | error = EINVAL; | |
3624 | goto out; | |
3625 | } | |
3626 | ||
3627 | /* | |
3628 | * The following two edge cases are caught here: | |
3629 | * (note tvp is not empty) | |
3630 | * | |
3631 | * o tdvp o tdvp | |
3632 | * / / | |
3633 | * / / | |
3634 | * o tvp tvp o fdvp | |
3635 | * \ \ | |
3636 | * \ \ | |
3637 | * o fdvp o fvp | |
3638 | * / | |
3639 | * / | |
3640 | * o fvp | |
3641 | */ | |
3642 | if (tvp && vnode_isdir(tvp) && (tcp->c_entries != 0) && fvp != tvp) { | |
3643 | error = ENOTEMPTY; | |
3644 | goto out; | |
3645 | } | |
3646 | ||
3647 | /* | |
3648 | * The following edge case is caught here: | |
3649 | * (the from child and parent are the same) | |
3650 | * | |
3651 | * o tdvp | |
3652 | * / | |
3653 | * / | |
3654 | * fdvp o fvp | |
3655 | */ | |
3656 | if (fdvp == fvp) { | |
3657 | error = EINVAL; | |
3658 | goto out; | |
3659 | } | |
3660 | ||
3661 | /* | |
3662 | * Make sure "from" vnode and its parent are changeable. | |
3663 | */ | |
3664 | if ((fcp->c_flags & (IMMUTABLE | APPEND)) || (fdcp->c_flags & APPEND)) { | |
3665 | error = EPERM; | |
3666 | goto out; | |
3667 | } | |
3668 | ||
3669 | /* | |
3670 | * If the destination parent directory is "sticky", then the | |
3671 | * user must own the parent directory, or the destination of | |
3672 | * the rename, otherwise the destination may not be changed | |
3673 | * (except by root). This implements append-only directories. | |
3674 | * | |
3675 | * Note that checks for immutable and write access are done | |
3676 | * by the call to hfs_removefile. | |
3677 | */ | |
3678 | if (tvp && (tdcp->c_mode & S_ISTXT) && | |
3679 | (suser(vfs_context_ucred(tcnp->cn_context), NULL)) && | |
3680 | (kauth_cred_getuid(vfs_context_ucred(tcnp->cn_context)) != tdcp->c_uid) && | |
3681 | (hfs_owner_rights(hfsmp, tcp->c_uid, vfs_context_ucred(tcnp->cn_context), p, false)) ) { | |
3682 | error = EPERM; | |
3683 | goto out; | |
3684 | } | |
3685 | ||
3686 | #if QUOTA | |
3687 | if (tvp) | |
3688 | (void)hfs_getinoquota(tcp); | |
3689 | #endif | |
3690 | /* Preflighting done, take fvp out of the name space. */ | |
3691 | cache_purge(fvp); | |
3692 | ||
3693 | bzero(&from_desc, sizeof(from_desc)); | |
3694 | from_desc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr; | |
3695 | from_desc.cd_namelen = fcnp->cn_namelen; | |
3696 | from_desc.cd_parentcnid = fdcp->c_fileid; | |
3697 | from_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED); | |
3698 | from_desc.cd_cnid = fcp->c_cnid; | |
3699 | ||
3700 | bzero(&to_desc, sizeof(to_desc)); | |
3701 | to_desc.cd_nameptr = (const u_int8_t *)tcnp->cn_nameptr; | |
3702 | to_desc.cd_namelen = tcnp->cn_namelen; | |
3703 | to_desc.cd_parentcnid = tdcp->c_fileid; | |
3704 | to_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED); | |
3705 | to_desc.cd_cnid = fcp->c_cnid; | |
3706 | ||
3707 | if ((error = hfs_start_transaction(hfsmp)) != 0) { | |
3708 | goto out; | |
3709 | } | |
3710 | started_tr = 1; | |
3711 | ||
3712 | /* hfs_vnop_link() and hfs_vnop_rename() set kHFSHasChildLinkMask | |
3713 | * inside a journal transaction and without holding a cnode lock. | |
3714 | * As setting of this bit depends on being in journal transaction for | |
3715 | * concurrency, check this bit again after we start journal transaction for rename | |
3716 | * to ensure that this directory does not have any descendant that | |
3717 | * is a directory hard link. | |
3718 | */ | |
3719 | if (vnode_isdir(fvp) && (fdvp != tdvp)) { | |
3720 | if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) { | |
3721 | lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); | |
3722 | if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) { | |
3723 | error = EPERM; | |
3724 | hfs_systemfile_unlock(hfsmp, lockflags); | |
3725 | goto out; | |
3726 | } | |
3727 | hfs_systemfile_unlock(hfsmp, lockflags); | |
3728 | } | |
3729 | } | |
3730 | ||
3731 | // if it's a hardlink then re-lookup the name so | |
3732 | // that we get the correct cnid in from_desc (see | |
3733 | // the comment in hfs_removefile for more details) | |
3734 | // | |
3735 | if (fcp->c_flag & C_HARDLINK) { | |
3736 | struct cat_desc tmpdesc; | |
3737 | cnid_t real_cnid; | |
3738 | ||
3739 | tmpdesc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr; | |
3740 | tmpdesc.cd_namelen = fcnp->cn_namelen; | |
3741 | tmpdesc.cd_parentcnid = fdcp->c_fileid; | |
3742 | tmpdesc.cd_hint = fdcp->c_childhint; | |
3743 | tmpdesc.cd_flags = fcp->c_desc.cd_flags & CD_ISDIR; | |
3744 | tmpdesc.cd_encoding = 0; | |
3745 | ||
3746 | lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); | |
3747 | ||
3748 | if (cat_lookup(hfsmp, &tmpdesc, 0, NULL, NULL, NULL, &real_cnid) != 0) { | |
3749 | hfs_systemfile_unlock(hfsmp, lockflags); | |
3750 | goto out; | |
3751 | } | |
3752 | ||
3753 | // use the real cnid instead of whatever happened to be there | |
3754 | from_desc.cd_cnid = real_cnid; | |
3755 | hfs_systemfile_unlock(hfsmp, lockflags); | |
3756 | } | |
3757 | ||
3758 | /* | |
3759 | * Reserve some space in the Catalog file. | |
3760 | */ | |
3761 | if ((error = cat_preflight(hfsmp, CAT_RENAME + CAT_DELETE, &cookie, p))) { | |
3762 | goto out; | |
3763 | } | |
3764 | got_cookie = 1; | |
3765 | ||
3766 | /* | |
3767 | * If the destination exists then it may need to be removed. | |
3768 | * | |
3769 | * Due to HFS's locking system, we should always move the | |
3770 | * existing 'tvp' element to the hidden directory in hfs_vnop_rename. | |
3771 | * Because the VNOP_LOOKUP call enters and exits the filesystem independently | |
3772 | * of the actual vnop that it was trying to do (stat, link, readlink), | |
3773 | * we must release the cnode lock of that element during the interim to | |
3774 | * do MAC checking, vnode authorization, and other calls. In that time, | |
3775 | * the item can be deleted (or renamed over). However, only in the rename | |
3776 | * case is it inappropriate to return ENOENT from any of those calls. Either | |
3777 | * the call should return information about the old element (stale), or get | |
3778 | * information about the newer element that we are about to write in its place. | |
3779 | * | |
3780 | * HFS lookup has been modified to detect a rename and re-drive its | |
3781 | * lookup internally. For other calls that have already succeeded in | |
3782 | * their lookup call and are waiting to acquire the cnode lock in order | |
3783 | * to proceed, that cnode lock will not fail due to the cnode being marked | |
3784 | * C_NOEXISTS, because it won't have been marked as such. It will only | |
3785 | * have C_DELETED. Thus, they will simply act on the stale open-unlinked | |
3786 | * element. All future callers will get the new element. | |
3787 | * | |
3788 | * To implement this behavior, we pass the "only_unlink" argument to | |
3789 | * hfs_removefile and hfs_removedir. This will result in the vnode acting | |
3790 | * as though it is open-unlinked. Additionally, when we are done moving the | |
3791 | * element to the hidden directory, we vnode_recycle the target so that it is | |
3792 | * reclaimed as soon as possible. Reclaim and inactive are both | |
3793 | * capable of clearing out unused blocks for an open-unlinked file or dir. | |
3794 | */ | |
3795 | if (tvp) { | |
3796 | /* | |
3797 | * When fvp matches tvp they could be case variants | |
3798 | * or matching hard links. | |
3799 | */ | |
3800 | if (fvp == tvp) { | |
3801 | if (!(fcp->c_flag & C_HARDLINK)) { | |
3802 | goto skip_rm; /* simple case variant */ | |
3803 | ||
3804 | } else if ((fdvp != tdvp) || | |
3805 | (hfsmp->hfs_flags & HFS_CASE_SENSITIVE)) { | |
3806 | goto out; /* matching hardlinks, nothing to do */ | |
3807 | ||
3808 | } else if (hfs_namecmp((const u_int8_t *)fcnp->cn_nameptr, fcnp->cn_namelen, | |
3809 | (const u_int8_t *)tcnp->cn_nameptr, tcnp->cn_namelen) == 0) { | |
3810 | goto skip_rm; /* case-variant hardlink in the same dir */ | |
3811 | } else { | |
3812 | goto out; /* matching hardlink, nothing to do */ | |
3813 | } | |
3814 | } | |
3815 | ||
3816 | ||
3817 | if (vnode_isdir(tvp)) { | |
3818 | /* | |
3819 | * hfs_removedir will eventually call hfs_removefile on the directory | |
3820 | * we're working on, because only hfs_removefile does the renaming of the | |
3821 | * item to the hidden directory. The directory will stay around in the | |
3822 | * hidden directory with C_DELETED until it gets an inactive or a reclaim. | |
3823 | * That way, we can destroy all of the EAs as needed and allow new ones to be | |
3824 | * written. | |
3825 | */ | |
3826 | error = hfs_removedir(tdvp, tvp, tcnp, HFSRM_SKIP_RESERVE, 1); | |
3827 | } | |
3828 | else { | |
3829 | error = hfs_removefile(tdvp, tvp, tcnp, 0, HFSRM_SKIP_RESERVE, 0, tvp_rsrc, 1); | |
3830 | ||
3831 | /* | |
3832 | * If the destination file had a resource fork vnode, then we need to get rid of | |
3833 | * its blocks when there are no more references to it. Because the call to | |
3834 | * hfs_removefile above always open-unlinks things, we need to force an inactive/reclaim | |
3835 | * on the resource fork vnode, in order to prevent block leaks. Otherwise, | |
3836 | * the resource fork vnode could prevent the data fork vnode from going out of scope | |
3837 | * because it holds a v_parent reference on it. So we mark it for termination | |
3838 | * with a call to vnode_recycle. hfs_vnop_reclaim has been modified so that it | |
3839 | * can clean up the blocks of open-unlinked files and resource forks. | |
3840 | * | |
3841 | * We can safely call vnode_recycle on the resource fork because we took an iocount | |
3842 | * reference on it at the beginning of the function. | |
3843 | */ | |
3844 | ||
3845 | if ((error == 0) && (tcp->c_flag & C_DELETED) && (tvp_rsrc)) { | |
3846 | vnode_recycle(tvp_rsrc); | |
3847 | } | |
3848 | } | |
3849 | ||
3850 | if (error) { | |
3851 | goto out; | |
3852 | } | |
3853 | ||
3854 | tvp_deleted = 1; | |
3855 | ||
3856 | /* Mark 'tcp' as being deleted due to a rename */ | |
3857 | tcp->c_flag |= C_RENAMED; | |
3858 | ||
3859 | /* | |
3860 | * Aggressively mark tvp/tcp for termination to ensure that we recover all blocks | |
3861 | * as quickly as possible. | |
3862 | */ | |
3863 | vnode_recycle(tvp); | |
3864 | } | |
3865 | skip_rm: | |
3866 | /* | |
3867 | * All done with tvp and fvp. | |
3868 | * | |
3869 | * We also jump to this point if there was no destination observed during lookup and namei. | |
3870 | * However, because only iocounts are held at the VFS layer, there is nothing preventing a | |
3871 | * competing thread from racing us and creating a file or dir at the destination of this rename | |
3872 | * operation. If this occurs, it may cause us to get a spurious EEXIST out of the cat_rename | |
3873 | * call below. To preserve rename's atomicity, we need to signal VFS to re-drive the | |
3874 | * namei/lookup and restart the rename operation. EEXIST is an allowable errno to be bubbled | |
3875 | * out of the rename syscall, but not for this reason, since it is a synonym errno for ENOTEMPTY. | |
3876 | * To signal VFS, we return ERECYCLE (which is also used for lookup restarts). This errno | |
3877 | * will be swallowed and it will restart the operation. | |
3878 | */ | |
3879 | ||
3880 | lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK); | |
3881 | error = cat_rename(hfsmp, &from_desc, &tdcp->c_desc, &to_desc, &out_desc); | |
3882 | hfs_systemfile_unlock(hfsmp, lockflags); | |
3883 | ||
3884 | if (error) { | |
3885 | if (error == EEXIST) { | |
3886 | error = ERECYCLE; | |
3887 | } | |
3888 | goto out; | |
3889 | } | |
3890 | ||
3891 | /* Invalidate negative cache entries in the destination directory */ | |
3892 | if (tdcp->c_flag & C_NEG_ENTRIES) { | |
3893 | cache_purge_negatives(tdvp); | |
3894 | tdcp->c_flag &= ~C_NEG_ENTRIES; | |
3895 | } | |
3896 | ||
3897 | /* Update cnode's catalog descriptor */ | |
3898 | replace_desc(fcp, &out_desc); | |
3899 | fcp->c_parentcnid = tdcp->c_fileid; | |
3900 | fcp->c_hint = 0; | |
3901 | ||
3902 | /* Now indicate this cnode needs to have date-added written to the finderinfo */ | |
3903 | fcp->c_flag |= C_NEEDS_DATEADDED; | |
3904 | (void) hfs_update (fvp, 0); | |
3905 | ||
3906 | ||
3907 | hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_RMDIR : VOL_RMFILE, | |
3908 | (fdcp->c_cnid == kHFSRootFolderID)); | |
3909 | hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_MKDIR : VOL_MKFILE, | |
3910 | (tdcp->c_cnid == kHFSRootFolderID)); | |
3911 | ||
3912 | /* Update both parent directories. */ | |
3913 | if (fdvp != tdvp) { | |
3914 | if (vnode_isdir(fvp)) { | |
3915 | /* If the source directory has directory hard link | |
3916 | * descendants, set the kHFSHasChildLinkBit in the | |
3917 | * destination parent hierarchy | |
3918 | */ | |
3919 | if ((fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) && | |
3920 | !(tdcp->c_attr.ca_recflags & kHFSHasChildLinkMask)) { | |
3921 | ||
3922 | tdcp->c_attr.ca_recflags |= kHFSHasChildLinkMask; | |
3923 | ||
3924 | error = cat_set_childlinkbit(hfsmp, tdcp->c_parentcnid); | |
3925 | if (error) { | |
3926 | printf ("hfs_vnop_rename: error updating parent chain for %u\n", tdcp->c_cnid); | |
3927 | error = 0; | |
3928 | } | |
3929 | } | |
3930 | INC_FOLDERCOUNT(hfsmp, tdcp->c_attr); | |
3931 | DEC_FOLDERCOUNT(hfsmp, fdcp->c_attr); | |
3932 | } | |
3933 | tdcp->c_entries++; | |
3934 | tdcp->c_dirchangecnt++; | |
3935 | if (fdcp->c_entries > 0) | |
3936 | fdcp->c_entries--; | |
3937 | fdcp->c_dirchangecnt++; | |
3938 | fdcp->c_touch_chgtime = TRUE; | |
3939 | fdcp->c_touch_modtime = TRUE; | |
3940 | ||
3941 | fdcp->c_flag |= C_FORCEUPDATE; // XXXdbg - force it out! | |
3942 | (void) hfs_update(fdvp, 0); | |
3943 | } | |
3944 | tdcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */ | |
3945 | tdcp->c_touch_chgtime = TRUE; | |
3946 | tdcp->c_touch_modtime = TRUE; | |
3947 | ||
3948 | tdcp->c_flag |= C_FORCEUPDATE; // XXXdbg - force it out! | |
3949 | (void) hfs_update(tdvp, 0); | |
3950 | out: | |
3951 | if (got_cookie) { | |
3952 | cat_postflight(hfsmp, &cookie, p); | |
3953 | } | |
3954 | if (started_tr) { | |
3955 | hfs_end_transaction(hfsmp); | |
3956 | } | |
3957 | ||
3958 | fdcp->c_flag &= ~C_DIR_MODIFICATION; | |
3959 | wakeup((caddr_t)&fdcp->c_flag); | |
3960 | if (fdvp != tdvp) { | |
3961 | tdcp->c_flag &= ~C_DIR_MODIFICATION; | |
3962 | wakeup((caddr_t)&tdcp->c_flag); | |
3963 | } | |
3964 | ||
3965 | if (took_trunc_lock) { | |
3966 | hfs_unlock_truncate(VTOC(tvp), 0); | |
3967 | } | |
3968 | ||
3969 | hfs_unlockfour(fdcp, fcp, tdcp, tcp); | |
3970 | ||
3971 | /* Now vnode_put the resource forks vnodes if necessary */ | |
3972 | if (tvp_rsrc) { | |
3973 | vnode_put(tvp_rsrc); | |
3974 | } | |
3975 | if (fvp_rsrc) { | |
3976 | vnode_put(fvp_rsrc); | |
3977 | } | |
3978 | ||
3979 | /* After tvp is removed the only acceptable error is EIO */ | |
3980 | if (error && tvp_deleted) | |
3981 | error = EIO; | |
3982 | ||
3983 | return (error); | |
3984 | } | |
3985 | ||
3986 | ||
3987 | /* | |
3988 | * Make a directory. | |
3989 | */ | |
3990 | int | |
3991 | hfs_vnop_mkdir(struct vnop_mkdir_args *ap) | |
3992 | { | |
3993 | /***** HACK ALERT ********/ | |
3994 | ap->a_cnp->cn_flags |= MAKEENTRY; | |
3995 | return hfs_makenode(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap, ap->a_context); | |
3996 | } | |
3997 | ||
3998 | ||
3999 | /* | |
4000 | * Create a symbolic link. | |
4001 | */ | |
4002 | int | |
4003 | hfs_vnop_symlink(struct vnop_symlink_args *ap) | |
4004 | { | |
4005 | struct vnode **vpp = ap->a_vpp; | |
4006 | struct vnode *dvp = ap->a_dvp; | |
4007 | struct vnode *vp = NULL; | |
4008 | struct cnode *cp = NULL; | |
4009 | struct hfsmount *hfsmp; | |
4010 | struct filefork *fp; | |
4011 | struct buf *bp = NULL; | |
4012 | char *datap; | |
4013 | int started_tr = 0; | |
4014 | u_int32_t len; | |
4015 | int error; | |
4016 | ||
4017 | /* HFS standard disks don't support symbolic links */ | |
4018 | if (VTOVCB(dvp)->vcbSigWord != kHFSPlusSigWord) | |
4019 | return (ENOTSUP); | |
4020 | ||
4021 | /* Check for empty target name */ | |
4022 | if (ap->a_target[0] == 0) | |
4023 | return (EINVAL); | |
4024 | ||
4025 | hfsmp = VTOHFS(dvp); | |
4026 | len = strlen(ap->a_target); | |
4027 | ||
4028 | /* Check for free space */ | |
4029 | if (((u_int64_t)hfs_freeblks(hfsmp, 0) * (u_int64_t)hfsmp->blockSize) < len) { | |
4030 | return (ENOSPC); | |
4031 | } | |
4032 | ||
4033 | /* Create the vnode */ | |
4034 | ap->a_vap->va_mode |= S_IFLNK; | |
4035 | if ((error = hfs_makenode(dvp, vpp, ap->a_cnp, ap->a_vap, ap->a_context))) { | |
4036 | goto out; | |
4037 | } | |
4038 | vp = *vpp; | |
4039 | if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK))) { | |
4040 | goto out; | |
4041 | } | |
4042 | cp = VTOC(vp); | |
4043 | fp = VTOF(vp); | |
4044 | ||
4045 | if (cp->c_flag & (C_NOEXISTS | C_DELETED)) { | |
4046 | goto out; | |
4047 | } | |
4048 | ||
4049 | #if QUOTA | |
4050 | (void)hfs_getinoquota(cp); | |
4051 | #endif /* QUOTA */ | |
4052 | ||
4053 | if ((error = hfs_start_transaction(hfsmp)) != 0) { | |
4054 | goto out; | |
4055 | } | |
4056 | started_tr = 1; | |
4057 | ||
4058 | /* | |
4059 | * Allocate space for the link. | |
4060 | * | |
4061 | * Since we're already inside a transaction, | |
4062 | * tell hfs_truncate to skip the ubc_setsize. | |
4063 | * | |
4064 | * Don't need truncate lock since a symlink is treated as a system file. | |
4065 | */ | |
4066 | error = hfs_truncate(vp, len, IO_NOZEROFILL, 1, 0, ap->a_context); | |
4067 | ||
4068 | /* On errors, remove the symlink file */ | |
4069 | if (error) { | |
4070 | /* | |
4071 | * End the transaction so we don't re-take the cnode lock | |
4072 | * below while inside a transaction (lock order violation). | |
4073 | */ | |
4074 | hfs_end_transaction(hfsmp); | |
4075 | ||
4076 | /* hfs_removefile() requires holding the truncate lock */ | |
4077 | hfs_unlock(cp); | |
4078 | hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK); | |
4079 | hfs_lock(cp, HFS_FORCE_LOCK); | |
4080 | ||
4081 | if (hfs_start_transaction(hfsmp) != 0) { | |
4082 | started_tr = 0; | |
4083 | hfs_unlock_truncate(cp, TRUE); | |
4084 | goto out; | |
4085 | } | |
4086 | ||
4087 | (void) hfs_removefile(dvp, vp, ap->a_cnp, 0, 0, 0, NULL, 0); | |
4088 | hfs_unlock_truncate(cp, 0); | |
4089 | goto out; | |
4090 | } | |
4091 | ||
4092 | /* Write the link to disk */ | |
4093 | bp = buf_getblk(vp, (daddr64_t)0, roundup((int)fp->ff_size, hfsmp->hfs_physical_block_size), | |
4094 | 0, 0, BLK_META); | |
4095 | if (hfsmp->jnl) { | |
4096 | journal_modify_block_start(hfsmp->jnl, bp); | |
4097 | } | |
4098 | datap = (char *)buf_dataptr(bp); | |
4099 | bzero(datap, buf_size(bp)); | |
4100 | bcopy(ap->a_target, datap, len); | |
4101 | ||
4102 | if (hfsmp->jnl) { | |
4103 | journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL); | |
4104 | } else { | |
4105 | buf_bawrite(bp); | |
4106 | } | |
4107 | /* | |
4108 | * We defered the ubc_setsize for hfs_truncate | |
4109 | * since we were inside a transaction. | |
4110 | * | |
4111 | * We don't need to drop the cnode lock here | |
4112 | * since this is a symlink. | |
4113 | */ | |
4114 | ubc_setsize(vp, len); | |
4115 | out: | |
4116 | if (started_tr) | |
4117 | hfs_end_transaction(hfsmp); | |
4118 | if ((cp != NULL) && (vp != NULL)) { | |
4119 | hfs_unlock(cp); | |
4120 | } | |
4121 | if (error) { | |
4122 | if (vp) { | |
4123 | vnode_put(vp); | |
4124 | } | |
4125 | *vpp = NULL; | |
4126 | } | |
4127 | return (error); | |
4128 | } | |
4129 | ||
4130 | ||
4131 | /* structures to hold a "." or ".." directory entry */ | |
4132 | struct hfs_stddotentry { | |
4133 | u_int32_t d_fileno; /* unique file number */ | |
4134 | u_int16_t d_reclen; /* length of this structure */ | |
4135 | u_int8_t d_type; /* dirent file type */ | |
4136 | u_int8_t d_namlen; /* len of filename */ | |
4137 | char d_name[4]; /* "." or ".." */ | |
4138 | }; | |
4139 | ||
4140 | struct hfs_extdotentry { | |
4141 | u_int64_t d_fileno; /* unique file number */ | |
4142 | u_int64_t d_seekoff; /* seek offset (optional, used by servers) */ | |
4143 | u_int16_t d_reclen; /* length of this structure */ | |
4144 | u_int16_t d_namlen; /* len of filename */ | |
4145 | u_int8_t d_type; /* dirent file type */ | |
4146 | u_char d_name[3]; /* "." or ".." */ | |
4147 | }; | |
4148 | ||
4149 | typedef union { | |
4150 | struct hfs_stddotentry std; | |
4151 | struct hfs_extdotentry ext; | |
4152 | } hfs_dotentry_t; | |
4153 | ||
4154 | /* | |
4155 | * hfs_vnop_readdir reads directory entries into the buffer pointed | |
4156 | * to by uio, in a filesystem independent format. Up to uio_resid | |
4157 | * bytes of data can be transferred. The data in the buffer is a | |
4158 | * series of packed dirent structures where each one contains the | |
4159 | * following entries: | |
4160 | * | |
4161 | * u_int32_t d_fileno; // file number of entry | |
4162 | * u_int16_t d_reclen; // length of this record | |
4163 | * u_int8_t d_type; // file type | |
4164 | * u_int8_t d_namlen; // length of string in d_name | |
4165 | * char d_name[MAXNAMELEN+1]; // null terminated file name | |
4166 | * | |
4167 | * The current position (uio_offset) refers to the next block of | |
4168 | * entries. The offset can only be set to a value previously | |
4169 | * returned by hfs_vnop_readdir or zero. This offset does not have | |
4170 | * to match the number of bytes returned (in uio_resid). | |
4171 | * | |
4172 | * In fact, the offset used by HFS is essentially an index (26 bits) | |
4173 | * with a tag (6 bits). The tag is for associating the next request | |
4174 | * with the current request. This enables us to have multiple threads | |
4175 | * reading the directory while the directory is also being modified. | |
4176 | * | |
4177 | * Each tag/index pair is tied to a unique directory hint. The hint | |
4178 | * contains information (filename) needed to build the catalog b-tree | |
4179 | * key for finding the next set of entries. | |
4180 | * | |
4181 | * If the directory is marked as deleted-but-in-use (cp->c_flag & C_DELETED), | |
4182 | * do NOT synthesize entries for "." and "..". | |
4183 | */ | |
4184 | int | |
4185 | hfs_vnop_readdir(ap) | |
4186 | struct vnop_readdir_args /* { | |
4187 | vnode_t a_vp; | |
4188 | uio_t a_uio; | |
4189 | int a_flags; | |
4190 | int *a_eofflag; | |
4191 | int *a_numdirent; | |
4192 | vfs_context_t a_context; | |
4193 | } */ *ap; | |
4194 | { | |
4195 | struct vnode *vp = ap->a_vp; | |
4196 | uio_t uio = ap->a_uio; | |
4197 | struct cnode *cp; | |
4198 | struct hfsmount *hfsmp; | |
4199 | directoryhint_t *dirhint = NULL; | |
4200 | directoryhint_t localhint; | |
4201 | off_t offset; | |
4202 | off_t startoffset; | |
4203 | int error = 0; | |
4204 | int eofflag = 0; | |
4205 | user_addr_t user_start = 0; | |
4206 | user_size_t user_len = 0; | |
4207 | int index; | |
4208 | unsigned int tag; | |
4209 | int items; | |
4210 | int lockflags; | |
4211 | int extended; | |
4212 | int nfs_cookies; | |
4213 | cnid_t cnid_hint = 0; | |
4214 | ||
4215 | items = 0; | |
4216 | startoffset = offset = uio_offset(uio); | |
4217 | extended = (ap->a_flags & VNODE_READDIR_EXTENDED); | |
4218 | nfs_cookies = extended && (ap->a_flags & VNODE_READDIR_REQSEEKOFF); | |
4219 | ||
4220 | /* Sanity check the uio data. */ | |
4221 | if (uio_iovcnt(uio) > 1) | |
4222 | return (EINVAL); | |
4223 | ||
4224 | if (VTOC(vp)->c_flags & UF_COMPRESSED) { | |
4225 | int compressed = hfs_file_is_compressed(VTOC(vp), 0); /* 0 == take the cnode lock */ | |
4226 | if (VTOCMP(vp) != NULL && !compressed) { | |
4227 | error = check_for_dataless_file(vp, NAMESPACE_HANDLER_READ_OP); | |
4228 | if (error) { | |
4229 | return error; | |
4230 | } | |
4231 | } | |
4232 | } | |
4233 | ||
4234 | cp = VTOC(vp); | |
4235 | hfsmp = VTOHFS(vp); | |
4236 | ||
4237 | /* Note that the dirhint calls require an exclusive lock. */ | |
4238 | if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK))) | |
4239 | return (error); | |
4240 | ||
4241 | /* Pick up cnid hint (if any). */ | |
4242 | if (nfs_cookies) { | |
4243 | cnid_hint = (cnid_t)(uio_offset(uio) >> 32); | |
4244 | uio_setoffset(uio, uio_offset(uio) & 0x00000000ffffffffLL); | |
4245 | if (cnid_hint == INT_MAX) { /* searching pass the last item */ | |
4246 | eofflag = 1; | |
4247 | goto out; | |
4248 | } | |
4249 | } | |
4250 | /* | |
4251 | * Synthesize entries for "." and "..", unless the directory has | |
4252 | * been deleted, but not closed yet (lazy delete in progress). | |
4253 | */ | |
4254 | if (offset == 0 && !(cp->c_flag & C_DELETED)) { | |
4255 | hfs_dotentry_t dotentry[2]; | |
4256 | size_t uiosize; | |
4257 | ||
4258 | if (extended) { | |
4259 | struct hfs_extdotentry *entry = &dotentry[0].ext; | |
4260 | ||
4261 | entry->d_fileno = cp->c_cnid; | |
4262 | entry->d_reclen = sizeof(struct hfs_extdotentry); | |
4263 | entry->d_type = DT_DIR; | |
4264 | entry->d_namlen = 1; | |
4265 | entry->d_name[0] = '.'; | |
4266 | entry->d_name[1] = '\0'; | |
4267 | entry->d_name[2] = '\0'; | |
4268 | entry->d_seekoff = 1; | |
4269 | ||
4270 | ++entry; | |
4271 | entry->d_fileno = cp->c_parentcnid; | |
4272 | entry->d_reclen = sizeof(struct hfs_extdotentry); | |
4273 | entry->d_type = DT_DIR; | |
4274 | entry->d_namlen = 2; | |
4275 | entry->d_name[0] = '.'; | |
4276 | entry->d_name[1] = '.'; | |
4277 | entry->d_name[2] = '\0'; | |
4278 | entry->d_seekoff = 2; | |
4279 | uiosize = 2 * sizeof(struct hfs_extdotentry); | |
4280 | } else { | |
4281 | struct hfs_stddotentry *entry = &dotentry[0].std; | |
4282 | ||
4283 | entry->d_fileno = cp->c_cnid; | |
4284 | entry->d_reclen = sizeof(struct hfs_stddotentry); | |
4285 | entry->d_type = DT_DIR; | |
4286 | entry->d_namlen = 1; | |
4287 | *(int *)&entry->d_name[0] = 0; | |
4288 | entry->d_name[0] = '.'; | |
4289 | ||
4290 | ++entry; | |
4291 | entry->d_fileno = cp->c_parentcnid; | |
4292 | entry->d_reclen = sizeof(struct hfs_stddotentry); | |
4293 | entry->d_type = DT_DIR; | |
4294 | entry->d_namlen = 2; | |
4295 | *(int *)&entry->d_name[0] = 0; | |
4296 | entry->d_name[0] = '.'; | |
4297 | entry->d_name[1] = '.'; | |
4298 | uiosize = 2 * sizeof(struct hfs_stddotentry); | |
4299 | } | |
4300 | if ((error = uiomove((caddr_t)&dotentry, uiosize, uio))) { | |
4301 | goto out; | |
4302 | } | |
4303 | offset += 2; | |
4304 | } | |
4305 | ||
4306 | /* If there are no real entries then we're done. */ | |
4307 | if (cp->c_entries == 0) { | |
4308 | error = 0; | |
4309 | eofflag = 1; | |
4310 | uio_setoffset(uio, offset); | |
4311 | goto seekoffcalc; | |
4312 | } | |
4313 | ||
4314 | // | |
4315 | // We have to lock the user's buffer here so that we won't | |
4316 | // fault on it after we've acquired a shared lock on the | |
4317 | // catalog file. The issue is that you can get a 3-way | |
4318 | // deadlock if someone else starts a transaction and then | |
4319 | // tries to lock the catalog file but can't because we're | |
4320 | // here and we can't service our page fault because VM is | |
4321 | // blocked trying to start a transaction as a result of | |
4322 | // trying to free up pages for our page fault. It's messy | |
4323 | // but it does happen on dual-processors that are paging | |
4324 | // heavily (see radar 3082639 for more info). By locking | |
4325 | // the buffer up-front we prevent ourselves from faulting | |
4326 | // while holding the shared catalog file lock. | |
4327 | // | |
4328 | // Fortunately this and hfs_search() are the only two places | |
4329 | // currently (10/30/02) that can fault on user data with a | |
4330 | // shared lock on the catalog file. | |
4331 | // | |
4332 | if (hfsmp->jnl && uio_isuserspace(uio)) { | |
4333 | user_start = uio_curriovbase(uio); | |
4334 | user_len = uio_curriovlen(uio); | |
4335 | ||
4336 | if ((error = vslock(user_start, user_len)) != 0) { | |
4337 | user_start = 0; | |
4338 | goto out; | |
4339 | } | |
4340 | } | |
4341 | /* Convert offset into a catalog directory index. */ | |
4342 | index = (offset & HFS_INDEX_MASK) - 2; | |
4343 | tag = offset & ~HFS_INDEX_MASK; | |
4344 | ||
4345 | /* Lock catalog during cat_findname and cat_getdirentries. */ | |
4346 | lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); | |
4347 | ||
4348 | /* When called from NFS, try and resolve a cnid hint. */ | |
4349 | if (nfs_cookies && cnid_hint != 0) { | |
4350 | if (cat_findname(hfsmp, cnid_hint, &localhint.dh_desc) == 0) { | |
4351 | if ( localhint.dh_desc.cd_parentcnid == cp->c_fileid) { | |
4352 | localhint.dh_index = index - 1; | |
4353 | localhint.dh_time = 0; | |
4354 | bzero(&localhint.dh_link, sizeof(localhint.dh_link)); | |
4355 | dirhint = &localhint; /* don't forget to release the descriptor */ | |
4356 | } else { | |
4357 | cat_releasedesc(&localhint.dh_desc); | |
4358 | } | |
4359 | } | |
4360 | } | |
4361 | ||
4362 | /* Get a directory hint (cnode must be locked exclusive) */ | |
4363 | if (dirhint == NULL) { | |
4364 | dirhint = hfs_getdirhint(cp, ((index - 1) & HFS_INDEX_MASK) | tag, 0); | |
4365 | ||
4366 | /* Hide tag from catalog layer. */ | |
4367 | dirhint->dh_index &= HFS_INDEX_MASK; | |
4368 | if (dirhint->dh_index == HFS_INDEX_MASK) { | |
4369 | dirhint->dh_index = -1; | |
4370 | } | |
4371 | } | |
4372 | ||
4373 | if (index == 0) { | |
4374 | dirhint->dh_threadhint = cp->c_dirthreadhint; | |
4375 | } | |
4376 | else { | |
4377 | /* | |
4378 | * If we have a non-zero index, there is a possibility that during the last | |
4379 | * call to hfs_vnop_readdir we hit EOF for this directory. If that is the case | |
4380 | * then we don't want to return any new entries for the caller. Just return 0 | |
4381 | * items, mark the eofflag, and bail out. Because we won't have done any work, the | |
4382 | * code at the end of the function will release the dirhint for us. | |
4383 | * | |
4384 | * Don't forget to unlock the catalog lock on the way out, too. | |
4385 | */ | |
4386 | if (dirhint->dh_desc.cd_flags & CD_EOF) { | |
4387 | error = 0; | |
4388 | eofflag = 1; | |
4389 | uio_setoffset(uio, startoffset); | |
4390 | hfs_systemfile_unlock (hfsmp, lockflags); | |
4391 | ||
4392 | goto seekoffcalc; | |
4393 | } | |
4394 | } | |
4395 | ||
4396 | /* Pack the buffer with dirent entries. */ | |
4397 | error = cat_getdirentries(hfsmp, cp->c_entries, dirhint, uio, extended, &items, &eofflag); | |
4398 | ||
4399 | if (index == 0 && error == 0) { | |
4400 | cp->c_dirthreadhint = dirhint->dh_threadhint; | |
4401 | } | |
4402 | ||
4403 | hfs_systemfile_unlock(hfsmp, lockflags); | |
4404 | ||
4405 | if (error != 0) { | |
4406 | goto out; | |
4407 | } | |
4408 | ||
4409 | /* Get index to the next item */ | |
4410 | index += items; | |
4411 | ||
4412 | if (items >= (int)cp->c_entries) { | |
4413 | eofflag = 1; | |
4414 | } | |
4415 | ||
4416 | /* Convert catalog directory index back into an offset. */ | |
4417 | while (tag == 0) | |
4418 | tag = (++cp->c_dirhinttag) << HFS_INDEX_BITS; | |
4419 | uio_setoffset(uio, (index + 2) | tag); | |
4420 | dirhint->dh_index |= tag; | |
4421 | ||
4422 | seekoffcalc: | |
4423 | cp->c_touch_acctime = TRUE; | |
4424 | ||
4425 | if (ap->a_numdirent) { | |
4426 | if (startoffset == 0) | |
4427 | items += 2; | |
4428 | *ap->a_numdirent = items; | |
4429 | } | |
4430 | ||
4431 | out: | |
4432 | if (user_start) { | |
4433 | vsunlock(user_start, user_len, TRUE); | |
4434 | } | |
4435 | /* If we didn't do anything then go ahead and dump the hint. */ | |
4436 | if ((dirhint != NULL) && | |
4437 | (dirhint != &localhint) && | |
4438 | (uio_offset(uio) == startoffset)) { | |
4439 | hfs_reldirhint(cp, dirhint); | |
4440 | eofflag = 1; | |
4441 | } | |
4442 | if (ap->a_eofflag) { | |
4443 | *ap->a_eofflag = eofflag; | |
4444 | } | |
4445 | if (dirhint == &localhint) { | |
4446 | cat_releasedesc(&localhint.dh_desc); | |
4447 | } | |
4448 | hfs_unlock(cp); | |
4449 | return (error); | |
4450 | } | |
4451 | ||
4452 | ||
4453 | /* | |
4454 | * Read contents of a symbolic link. | |
4455 | */ | |
4456 | int | |
4457 | hfs_vnop_readlink(ap) | |
4458 | struct vnop_readlink_args /* { | |
4459 | struct vnode *a_vp; | |
4460 | struct uio *a_uio; | |
4461 | vfs_context_t a_context; | |
4462 | } */ *ap; | |
4463 | { | |
4464 | struct vnode *vp = ap->a_vp; | |
4465 | struct cnode *cp; | |
4466 | struct filefork *fp; | |
4467 | int error; | |
4468 | ||
4469 | if (!vnode_islnk(vp)) | |
4470 | return (EINVAL); | |
4471 | ||
4472 | if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK))) | |
4473 | return (error); | |
4474 | cp = VTOC(vp); | |
4475 | fp = VTOF(vp); | |
4476 | ||
4477 | /* Zero length sym links are not allowed */ | |
4478 | if (fp->ff_size == 0 || fp->ff_size > MAXPATHLEN) { | |
4479 | error = EINVAL; | |
4480 | goto exit; | |
4481 | } | |
4482 | ||
4483 | /* Cache the path so we don't waste buffer cache resources */ | |
4484 | if (fp->ff_symlinkptr == NULL) { | |
4485 | struct buf *bp = NULL; | |
4486 | ||
4487 | MALLOC(fp->ff_symlinkptr, char *, fp->ff_size, M_TEMP, M_WAITOK); | |
4488 | if (fp->ff_symlinkptr == NULL) { | |
4489 | error = ENOMEM; | |
4490 | goto exit; | |
4491 | } | |
4492 | error = (int)buf_meta_bread(vp, (daddr64_t)0, | |
4493 | roundup((int)fp->ff_size, VTOHFS(vp)->hfs_physical_block_size), | |
4494 | vfs_context_ucred(ap->a_context), &bp); | |
4495 | if (error) { | |
4496 | if (bp) | |
4497 | buf_brelse(bp); | |
4498 | if (fp->ff_symlinkptr) { | |
4499 | FREE(fp->ff_symlinkptr, M_TEMP); | |
4500 | fp->ff_symlinkptr = NULL; | |
4501 | } | |
4502 | goto exit; | |
4503 | } | |
4504 | bcopy((char *)buf_dataptr(bp), fp->ff_symlinkptr, (size_t)fp->ff_size); | |
4505 | ||
4506 | if (VTOHFS(vp)->jnl && (buf_flags(bp) & B_LOCKED) == 0) { | |
4507 | buf_markinvalid(bp); /* data no longer needed */ | |
4508 | } | |
4509 | buf_brelse(bp); | |
4510 | } | |
4511 | error = uiomove((caddr_t)fp->ff_symlinkptr, (int)fp->ff_size, ap->a_uio); | |
4512 | ||
4513 | /* | |
4514 | * Keep track blocks read | |
4515 | */ | |
4516 | if ((VTOHFS(vp)->hfc_stage == HFC_RECORDING) && (error == 0)) { | |
4517 | ||
4518 | /* | |
4519 | * If this file hasn't been seen since the start of | |
4520 | * the current sampling period then start over. | |
4521 | */ | |
4522 | if (cp->c_atime < VTOHFS(vp)->hfc_timebase) | |
4523 | VTOF(vp)->ff_bytesread = fp->ff_size; | |
4524 | else | |
4525 | VTOF(vp)->ff_bytesread += fp->ff_size; | |
4526 | ||
4527 | // if (VTOF(vp)->ff_bytesread > fp->ff_size) | |
4528 | // cp->c_touch_acctime = TRUE; | |
4529 | } | |
4530 | ||
4531 | exit: | |
4532 | hfs_unlock(cp); | |
4533 | return (error); | |
4534 | } | |
4535 | ||
4536 | ||
4537 | /* | |
4538 | * Get configurable pathname variables. | |
4539 | */ | |
4540 | int | |
4541 | hfs_vnop_pathconf(ap) | |
4542 | struct vnop_pathconf_args /* { | |
4543 | struct vnode *a_vp; | |
4544 | int a_name; | |
4545 | int *a_retval; | |
4546 | vfs_context_t a_context; | |
4547 | } */ *ap; | |
4548 | { | |
4549 | switch (ap->a_name) { | |
4550 | case _PC_LINK_MAX: | |
4551 | if (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD) | |
4552 | *ap->a_retval = 1; | |
4553 | else | |
4554 | *ap->a_retval = HFS_LINK_MAX; | |
4555 | break; | |
4556 | case _PC_NAME_MAX: | |
4557 | if (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD) | |
4558 | *ap->a_retval = kHFSMaxFileNameChars; /* 31 */ | |
4559 | else | |
4560 | *ap->a_retval = kHFSPlusMaxFileNameChars; /* 255 */ | |
4561 | break; | |
4562 | case _PC_PATH_MAX: | |
4563 | *ap->a_retval = PATH_MAX; /* 1024 */ | |
4564 | break; | |
4565 | case _PC_PIPE_BUF: | |
4566 | *ap->a_retval = PIPE_BUF; | |
4567 | break; | |
4568 | case _PC_CHOWN_RESTRICTED: | |
4569 | *ap->a_retval = 200112; /* _POSIX_CHOWN_RESTRICTED */ | |
4570 | break; | |
4571 | case _PC_NO_TRUNC: | |
4572 | *ap->a_retval = 200112; /* _POSIX_NO_TRUNC */ | |
4573 | break; | |
4574 | case _PC_NAME_CHARS_MAX: | |
4575 | if (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD) | |
4576 | *ap->a_retval = kHFSMaxFileNameChars; /* 31 */ | |
4577 | else | |
4578 | *ap->a_retval = kHFSPlusMaxFileNameChars; /* 255 */ | |
4579 | break; | |
4580 | case _PC_CASE_SENSITIVE: | |
4581 | if (VTOHFS(ap->a_vp)->hfs_flags & HFS_CASE_SENSITIVE) | |
4582 | *ap->a_retval = 1; | |
4583 | else | |
4584 | *ap->a_retval = 0; | |
4585 | break; | |
4586 | case _PC_CASE_PRESERVING: | |
4587 | *ap->a_retval = 1; | |
4588 | break; | |
4589 | case _PC_FILESIZEBITS: | |
4590 | if (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD) | |
4591 | *ap->a_retval = 32; | |
4592 | else | |
4593 | *ap->a_retval = 64; /* number of bits to store max file size */ | |
4594 | break; | |
4595 | case _PC_XATTR_SIZE_BITS: | |
4596 | /* Number of bits to store maximum extended attribute size */ | |
4597 | *ap->a_retval = HFS_XATTR_SIZE_BITS; | |
4598 | break; | |
4599 | default: | |
4600 | return (EINVAL); | |
4601 | } | |
4602 | ||
4603 | return (0); | |
4604 | } | |
4605 | ||
4606 | ||
4607 | /* | |
4608 | * Update a cnode's on-disk metadata. | |
4609 | * | |
4610 | * If waitfor is set, then wait for the disk write of | |
4611 | * the node to complete. | |
4612 | * | |
4613 | * The cnode must be locked exclusive | |
4614 | */ | |
4615 | int | |
4616 | hfs_update(struct vnode *vp, __unused int waitfor) | |
4617 | { | |
4618 | struct cnode *cp = VTOC(vp); | |
4619 | struct proc *p; | |
4620 | struct cat_fork *dataforkp = NULL; | |
4621 | struct cat_fork *rsrcforkp = NULL; | |
4622 | struct cat_fork datafork; | |
4623 | struct cat_fork rsrcfork; | |
4624 | struct hfsmount *hfsmp; | |
4625 | int lockflags; | |
4626 | int error; | |
4627 | ||
4628 | p = current_proc(); | |
4629 | hfsmp = VTOHFS(vp); | |
4630 | ||
4631 | if (((vnode_issystem(vp) && (cp->c_cnid < kHFSFirstUserCatalogNodeID))) || | |
4632 | hfsmp->hfs_catalog_vp == NULL){ | |
4633 | return (0); | |
4634 | } | |
4635 | if ((hfsmp->hfs_flags & HFS_READ_ONLY) || (cp->c_mode == 0)) { | |
4636 | cp->c_flag &= ~C_MODIFIED; | |
4637 | cp->c_touch_acctime = 0; | |
4638 | cp->c_touch_chgtime = 0; | |
4639 | cp->c_touch_modtime = 0; | |
4640 | return (0); | |
4641 | } | |
4642 | ||
4643 | hfs_touchtimes(hfsmp, cp); | |
4644 | ||
4645 | /* Nothing to update. */ | |
4646 | if ((cp->c_flag & (C_MODIFIED | C_FORCEUPDATE)) == 0) { | |
4647 | return (0); | |
4648 | } | |
4649 | ||
4650 | if (cp->c_datafork) | |
4651 | dataforkp = &cp->c_datafork->ff_data; | |
4652 | if (cp->c_rsrcfork) | |
4653 | rsrcforkp = &cp->c_rsrcfork->ff_data; | |
4654 | ||
4655 | /* | |
4656 | * For delayed allocations updates are | |
4657 | * postponed until an fsync or the file | |
4658 | * gets written to disk. | |
4659 | * | |
4660 | * Deleted files can defer meta data updates until inactive. | |
4661 | * | |
4662 | * If we're ever called with the C_FORCEUPDATE flag though | |
4663 | * we have to do the update. | |
4664 | */ | |
4665 | if (ISSET(cp->c_flag, C_FORCEUPDATE) == 0 && | |
4666 | (ISSET(cp->c_flag, C_DELETED) || | |
4667 | (dataforkp && cp->c_datafork->ff_unallocblocks) || | |
4668 | (rsrcforkp && cp->c_rsrcfork->ff_unallocblocks))) { | |
4669 | // cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_UPDATE); | |
4670 | cp->c_flag |= C_MODIFIED; | |
4671 | ||
4672 | return (0); | |
4673 | } | |
4674 | ||
4675 | if ((error = hfs_start_transaction(hfsmp)) != 0) { | |
4676 | return error; | |
4677 | } | |
4678 | ||
4679 | /* | |
4680 | * Modify the values passed to cat_update based on whether or not | |
4681 | * the file has invalid ranges or borrowed blocks. | |
4682 | */ | |
4683 | if (dataforkp) { | |
4684 | off_t numbytes = 0; | |
4685 | ||
4686 | /* copy the datafork into a temporary copy so we don't pollute the cnode's */ | |
4687 | bcopy(dataforkp, &datafork, sizeof(datafork)); | |
4688 | dataforkp = &datafork; | |
4689 | ||
4690 | /* | |
4691 | * If there are borrowed blocks, ensure that they are subtracted | |
4692 | * from the total block count before writing the cnode entry to disk. | |
4693 | * Only extents that have actually been marked allocated in the bitmap | |
4694 | * should be reflected in the total block count for this fork. | |
4695 | */ | |
4696 | if (cp->c_datafork->ff_unallocblocks != 0) { | |
4697 | // make sure that we don't assign a negative block count | |
4698 | if (cp->c_datafork->ff_blocks < cp->c_datafork->ff_unallocblocks) { | |
4699 | panic("hfs: ff_blocks %d is less than unalloc blocks %d\n", | |
4700 | cp->c_datafork->ff_blocks, cp->c_datafork->ff_unallocblocks); | |
4701 | } | |
4702 | ||
4703 | /* Also cap the LEOF to the total number of bytes that are allocated. */ | |
4704 | datafork.cf_blocks = (cp->c_datafork->ff_blocks - cp->c_datafork->ff_unallocblocks); | |
4705 | datafork.cf_size = datafork.cf_blocks * HFSTOVCB(hfsmp)->blockSize; | |
4706 | } | |
4707 | ||
4708 | /* | |
4709 | * For files with invalid ranges (holes) the on-disk | |
4710 | * field representing the size of the file (cf_size) | |
4711 | * must be no larger than the start of the first hole. | |
4712 | * However, note that if the first invalid range exists | |
4713 | * solely within borrowed blocks, then our LEOF and block | |
4714 | * count should both be zero. As a result, set it to the | |
4715 | * min of the current cf_size and the start of the first | |
4716 | * invalid range, because it may have already been reduced | |
4717 | * to zero by the borrowed blocks check above. | |
4718 | */ | |
4719 | if (!TAILQ_EMPTY(&cp->c_datafork->ff_invalidranges)) { | |
4720 | numbytes = TAILQ_FIRST(&cp->c_datafork->ff_invalidranges)->rl_start; | |
4721 | datafork.cf_size = MIN((numbytes), (datafork.cf_size)); | |
4722 | } | |
4723 | } | |
4724 | ||
4725 | /* | |
4726 | * For resource forks with delayed allocations, make sure | |
4727 | * the block count and file size match the number of blocks | |
4728 | * actually allocated to the file on disk. | |
4729 | */ | |
4730 | if (rsrcforkp && (cp->c_rsrcfork->ff_unallocblocks != 0)) { | |
4731 | bcopy(rsrcforkp, &rsrcfork, sizeof(rsrcfork)); | |
4732 | rsrcfork.cf_blocks = (cp->c_rsrcfork->ff_blocks - cp->c_rsrcfork->ff_unallocblocks); | |
4733 | rsrcfork.cf_size = rsrcfork.cf_blocks * HFSTOVCB(hfsmp)->blockSize; | |
4734 | rsrcforkp = &rsrcfork; | |
4735 | } | |
4736 | ||
4737 | /* | |
4738 | * Lock the Catalog b-tree file. | |
4739 | */ | |
4740 | lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK); | |
4741 | ||
4742 | /* XXX - waitfor is not enforced */ | |
4743 | error = cat_update(hfsmp, &cp->c_desc, &cp->c_attr, dataforkp, rsrcforkp); | |
4744 | ||
4745 | hfs_systemfile_unlock(hfsmp, lockflags); | |
4746 | ||
4747 | /* After the updates are finished, clear the flags */ | |
4748 | cp->c_flag &= ~(C_MODIFIED | C_FORCEUPDATE); | |
4749 | ||
4750 | hfs_end_transaction(hfsmp); | |
4751 | ||
4752 | return (error); | |
4753 | } | |
4754 | ||
4755 | /* | |
4756 | * Allocate a new node | |
4757 | * Note - Function does not create and return a vnode for whiteout creation. | |
4758 | */ | |
4759 | int | |
4760 | hfs_makenode(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, | |
4761 | struct vnode_attr *vap, vfs_context_t ctx) | |
4762 | { | |
4763 | struct cnode *cp = NULL; | |
4764 | struct cnode *dcp = NULL; | |
4765 | struct vnode *tvp; | |
4766 | struct hfsmount *hfsmp; | |
4767 | struct cat_desc in_desc, out_desc; | |
4768 | struct cat_attr attr; | |
4769 | struct timeval tv; | |
4770 | int lockflags; | |
4771 | int error, started_tr = 0; | |
4772 | enum vtype vnodetype; | |
4773 | int mode; | |
4774 | int newvnode_flags = 0; | |
4775 | int nocache = 0; | |
4776 | u_int32_t gnv_flags = 0; | |
4777 | ||
4778 | if ((error = hfs_lock(VTOC(dvp), HFS_EXCLUSIVE_LOCK))) | |
4779 | return (error); | |
4780 | ||
4781 | /* set the cnode pointer only after successfully acquiring lock */ | |
4782 | dcp = VTOC(dvp); | |
4783 | ||
4784 | /* Don't allow creation of new entries in open-unlinked directories */ | |
4785 | if ((error = hfs_checkdeleted(dcp))) { | |
4786 | hfs_unlock(dcp); | |
4787 | return error; | |
4788 | } | |
4789 | ||
4790 | dcp->c_flag |= C_DIR_MODIFICATION; | |
4791 | ||
4792 | hfsmp = VTOHFS(dvp); | |
4793 | *vpp = NULL; | |
4794 | tvp = NULL; | |
4795 | out_desc.cd_flags = 0; | |
4796 | out_desc.cd_nameptr = NULL; | |
4797 | ||
4798 | vnodetype = vap->va_type; | |
4799 | if (vnodetype == VNON) | |
4800 | vnodetype = VREG; | |
4801 | mode = MAKEIMODE(vnodetype, vap->va_mode); | |
4802 | ||
4803 | #if CONFIG_PROTECT | |
4804 | /* If we're creating a regular file on a CP filesystem, then delay caching */ | |
4805 | if ((vnodetype == VREG ) && (cp_fs_protected (VTOVFS(dvp)))) { | |
4806 | nocache = 1; | |
4807 | } | |
4808 | #endif | |
4809 | ||
4810 | /* Check if were out of usable disk space. */ | |
4811 | if ((hfs_freeblks(hfsmp, 1) == 0) && (vfs_context_suser(ctx) != 0)) { | |
4812 | error = ENOSPC; | |
4813 | goto exit; | |
4814 | } | |
4815 | ||
4816 | microtime(&tv); | |
4817 | ||
4818 | /* Setup the default attributes */ | |
4819 | bzero(&attr, sizeof(attr)); | |
4820 | attr.ca_mode = mode; | |
4821 | attr.ca_linkcount = 1; | |
4822 | if (VATTR_IS_ACTIVE(vap, va_rdev)) { | |
4823 | attr.ca_rdev = vap->va_rdev; | |
4824 | } | |
4825 | if (VATTR_IS_ACTIVE(vap, va_create_time)) { | |
4826 | VATTR_SET_SUPPORTED(vap, va_create_time); | |
4827 | attr.ca_itime = vap->va_create_time.tv_sec; | |
4828 | } else { | |
4829 | attr.ca_itime = tv.tv_sec; | |
4830 | } | |
4831 | if ((hfsmp->hfs_flags & HFS_STANDARD) && gTimeZone.tz_dsttime) { | |
4832 | attr.ca_itime += 3600; /* Same as what hfs_update does */ | |
4833 | } | |
4834 | attr.ca_atime = attr.ca_ctime = attr.ca_mtime = attr.ca_itime; | |
4835 | attr.ca_atimeondisk = attr.ca_atime; | |
4836 | if (VATTR_IS_ACTIVE(vap, va_flags)) { | |
4837 | VATTR_SET_SUPPORTED(vap, va_flags); | |
4838 | attr.ca_flags = vap->va_flags; | |
4839 | } | |
4840 | ||
4841 | /* | |
4842 | * HFS+ only: all files get ThreadExists | |
4843 | * HFSX only: dirs get HasFolderCount | |
4844 | */ | |
4845 | if (!(hfsmp->hfs_flags & HFS_STANDARD)) { | |
4846 | if (vnodetype == VDIR) { | |
4847 | if (hfsmp->hfs_flags & HFS_FOLDERCOUNT) | |
4848 | attr.ca_recflags = kHFSHasFolderCountMask; | |
4849 | } else { | |
4850 | attr.ca_recflags = kHFSThreadExistsMask; | |
4851 | } | |
4852 | } | |
4853 | ||
4854 | /* Add the date added to the item */ | |
4855 | hfs_write_dateadded (&attr, attr.ca_atime); | |
4856 | ||
4857 | attr.ca_uid = vap->va_uid; | |
4858 | attr.ca_gid = vap->va_gid; | |
4859 | VATTR_SET_SUPPORTED(vap, va_mode); | |
4860 | VATTR_SET_SUPPORTED(vap, va_uid); | |
4861 | VATTR_SET_SUPPORTED(vap, va_gid); | |
4862 | ||
4863 | #if QUOTA | |
4864 | /* check to see if this node's creation would cause us to go over | |
4865 | * quota. If so, abort this operation. | |
4866 | */ | |
4867 | if (hfsmp->hfs_flags & HFS_QUOTAS) { | |
4868 | if ((error = hfs_quotacheck(hfsmp, 1, attr.ca_uid, attr.ca_gid, | |
4869 | vfs_context_ucred(ctx)))) { | |
4870 | goto exit; | |
4871 | } | |
4872 | } | |
4873 | #endif | |
4874 | ||
4875 | ||
4876 | /* Tag symlinks with a type and creator. */ | |
4877 | if (vnodetype == VLNK) { | |
4878 | struct FndrFileInfo *fip; | |
4879 | ||
4880 | fip = (struct FndrFileInfo *)&attr.ca_finderinfo; | |
4881 | fip->fdType = SWAP_BE32(kSymLinkFileType); | |
4882 | fip->fdCreator = SWAP_BE32(kSymLinkCreator); | |
4883 | } | |
4884 | if (cnp->cn_flags & ISWHITEOUT) | |
4885 | attr.ca_flags |= UF_OPAQUE; | |
4886 | ||
4887 | /* Setup the descriptor */ | |
4888 | in_desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr; | |
4889 | in_desc.cd_namelen = cnp->cn_namelen; | |
4890 | in_desc.cd_parentcnid = dcp->c_fileid; | |
4891 | in_desc.cd_flags = S_ISDIR(mode) ? CD_ISDIR : 0; | |
4892 | in_desc.cd_hint = dcp->c_childhint; | |
4893 | in_desc.cd_encoding = 0; | |
4894 | ||
4895 | if ((error = hfs_start_transaction(hfsmp)) != 0) { | |
4896 | goto exit; | |
4897 | } | |
4898 | started_tr = 1; | |
4899 | ||
4900 | // have to also lock the attribute file because cat_create() needs | |
4901 | // to check that any fileID it wants to use does not have orphaned | |
4902 | // attributes in it. | |
4903 | lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK); | |
4904 | ||
4905 | /* Reserve some space in the Catalog file. */ | |
4906 | if ((error = cat_preflight(hfsmp, CAT_CREATE, NULL, 0))) { | |
4907 | hfs_systemfile_unlock(hfsmp, lockflags); | |
4908 | goto exit; | |
4909 | } | |
4910 | error = cat_create(hfsmp, &in_desc, &attr, &out_desc); | |
4911 | if (error == 0) { | |
4912 | /* Update the parent directory */ | |
4913 | dcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */ | |
4914 | dcp->c_entries++; | |
4915 | if (vnodetype == VDIR) { | |
4916 | INC_FOLDERCOUNT(hfsmp, dcp->c_attr); | |
4917 | } | |
4918 | dcp->c_dirchangecnt++; | |
4919 | dcp->c_ctime = tv.tv_sec; | |
4920 | dcp->c_mtime = tv.tv_sec; | |
4921 | (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL); | |
4922 | } | |
4923 | hfs_systemfile_unlock(hfsmp, lockflags); | |
4924 | if (error) | |
4925 | goto exit; | |
4926 | ||
4927 | /* Invalidate negative cache entries in the directory */ | |
4928 | if (dcp->c_flag & C_NEG_ENTRIES) { | |
4929 | cache_purge_negatives(dvp); | |
4930 | dcp->c_flag &= ~C_NEG_ENTRIES; | |
4931 | } | |
4932 | ||
4933 | hfs_volupdate(hfsmp, vnodetype == VDIR ? VOL_MKDIR : VOL_MKFILE, | |
4934 | (dcp->c_cnid == kHFSRootFolderID)); | |
4935 | ||
4936 | // XXXdbg | |
4937 | // have to end the transaction here before we call hfs_getnewvnode() | |
4938 | // because that can cause us to try and reclaim a vnode on a different | |
4939 | // file system which could cause us to start a transaction which can | |
4940 | // deadlock with someone on that other file system (since we could be | |
4941 | // holding two transaction locks as well as various vnodes and we did | |
4942 | // not obtain the locks on them in the proper order). | |
4943 | // | |
4944 | // NOTE: this means that if the quota check fails or we have to update | |
4945 | // the change time on a block-special device that those changes | |
4946 | // will happen as part of independent transactions. | |
4947 | // | |
4948 | if (started_tr) { | |
4949 | hfs_end_transaction(hfsmp); | |
4950 | started_tr = 0; | |
4951 | } | |
4952 | ||
4953 | /* Do not create vnode for whiteouts */ | |
4954 | if (S_ISWHT(mode)) { | |
4955 | goto exit; | |
4956 | } | |
4957 | ||
4958 | gnv_flags |= GNV_CREATE; | |
4959 | if (nocache) { | |
4960 | gnv_flags |= GNV_NOCACHE; | |
4961 | } | |
4962 | ||
4963 | /* | |
4964 | * Create a vnode for the object just created. | |
4965 | * | |
4966 | * NOTE: Maintaining the cnode lock on the parent directory is important, | |
4967 | * as it prevents race conditions where other threads want to look up entries | |
4968 | * in the directory and/or add things as we are in the process of creating | |
4969 | * the vnode below. However, this has the potential for causing a | |
4970 | * double lock panic when dealing with shadow files on a HFS boot partition. | |
4971 | * The panic could occur if we are not cleaning up after ourselves properly | |
4972 | * when done with a shadow file or in the error cases. The error would occur if we | |
4973 | * try to create a new vnode, and then end up reclaiming another shadow vnode to | |
4974 | * create the new one. However, if everything is working properly, this should | |
4975 | * be a non-issue as we would never enter that reclaim codepath. | |
4976 | * | |
4977 | * The cnode is locked on successful return. | |
4978 | */ | |
4979 | error = hfs_getnewvnode(hfsmp, dvp, cnp, &out_desc, gnv_flags, &attr, | |
4980 | NULL, &tvp, &newvnode_flags); | |
4981 | if (error) | |
4982 | goto exit; | |
4983 | ||
4984 | cp = VTOC(tvp); | |
4985 | *vpp = tvp; | |
4986 | ||
4987 | #if CONFIG_PROTECT | |
4988 | error = cp_entry_create_keys(cp); | |
4989 | /* | |
4990 | * If we fail to create keys, then do NOT allow this vnode to percolate out into the | |
4991 | * namespace. Delete it and return the errno that cp_entry_create_keys generated. | |
4992 | * Luckily, we can do this without issues because the entry was newly created | |
4993 | * and we're still holding the directory cnode lock. Because we prevented it from | |
4994 | * getting inserted into the namecache upon vnode creation, all accesss to this file | |
4995 | * would have to go through the directory, whose lock we are still holding. | |
4996 | */ | |
4997 | if (error) { | |
4998 | /* | |
4999 | * If we fail to remove/recycle the item here, we can't do much about it. Log | |
5000 | * a message to the console and then we can backtrack it. The ultimate error | |
5001 | * that will get emitted to userland will be from the failure to create the EA blob. | |
5002 | */ | |
5003 | int err = hfs_removefile (dvp, tvp, cnp, 0, 0, 0, NULL, 0); | |
5004 | if (err) { | |
5005 | printf("hfs_makenode: removefile failed (%d) for CP file %p\n", err, tvp); | |
5006 | } | |
5007 | hfs_unlock (cp); | |
5008 | err = vnode_recycle (tvp); | |
5009 | if (err) { | |
5010 | printf("hfs_makenode: vnode_recycle failed (%d) for CP file %p\n", err, tvp); | |
5011 | } | |
5012 | /* Drop the iocount on the new vnode to force reclamation/recycling */ | |
5013 | vnode_put (tvp); | |
5014 | cp = NULL; | |
5015 | *vpp = NULL; | |
5016 | } | |
5017 | else { | |
5018 | /* insert item into name cache if it wasn't already inserted.*/ | |
5019 | if (nocache) { | |
5020 | cache_enter (dvp, tvp, cnp); | |
5021 | } | |
5022 | } | |
5023 | ||
5024 | #endif | |
5025 | /* | |
5026 | * If CONFIG_PROTECT is not enabled, then all items will get automatically added into | |
5027 | * the namecache, as nocache will be set to 0. | |
5028 | */ | |
5029 | ||
5030 | #if QUOTA | |
5031 | /* | |
5032 | * Once we create this vnode, we need to initialize its quota data | |
5033 | * structures, if necessary. We know that it is OK to just go ahead and | |
5034 | * initialize because we've already validated earlier (through the hfs_quotacheck | |
5035 | * function) to see if creating this cnode/vnode would cause us to go over quota. | |
5036 | */ | |
5037 | if (hfsmp->hfs_flags & HFS_QUOTAS) { | |
5038 | (void) hfs_getinoquota(cp); | |
5039 | } | |
5040 | #endif | |
5041 | ||
5042 | exit: | |
5043 | cat_releasedesc(&out_desc); | |
5044 | ||
5045 | /* | |
5046 | * Make sure we release cnode lock on dcp. | |
5047 | */ | |
5048 | if (dcp) { | |
5049 | dcp->c_flag &= ~C_DIR_MODIFICATION; | |
5050 | wakeup((caddr_t)&dcp->c_flag); | |
5051 | ||
5052 | hfs_unlock(dcp); | |
5053 | } | |
5054 | if (error == 0 && cp != NULL) { | |
5055 | hfs_unlock(cp); | |
5056 | } | |
5057 | if (started_tr) { | |
5058 | hfs_end_transaction(hfsmp); | |
5059 | started_tr = 0; | |
5060 | } | |
5061 | ||
5062 | return (error); | |
5063 | } | |
5064 | ||
5065 | ||
5066 | /* | |
5067 | * hfs_vgetrsrc acquires a resource fork vnode corresponding to the cnode that is | |
5068 | * found in 'vp'. The rsrc fork vnode is returned with the cnode locked and iocount | |
5069 | * on the rsrc vnode. | |
5070 | * | |
5071 | * *rvpp is an output argument for returning the pointer to the resource fork vnode. | |
5072 | * In most cases, the resource fork vnode will not be set if we return an error. | |
5073 | * However, if error_on_unlinked is set, we may have already acquired the resource fork vnode | |
5074 | * before we discover the error (the file has gone open-unlinked). In this case only, | |
5075 | * we may return a vnode in the output argument despite an error. | |
5076 | * | |
5077 | * If can_drop_lock is set, then it is safe for this function to temporarily drop | |
5078 | * and then re-acquire the cnode lock. We may need to do this, for example, in order to | |
5079 | * acquire an iocount or promote our lock. | |
5080 | * | |
5081 | * error_on_unlinked is an argument which indicates that we are to return an error if we | |
5082 | * discover that the cnode has gone into an open-unlinked state ( C_DELETED or C_NOEXISTS) | |
5083 | * is set in the cnode flags. This is only necessary if can_drop_lock is true, otherwise | |
5084 | * there's really no reason to double-check for errors on the cnode. | |
5085 | */ | |
5086 | ||
5087 | int | |
5088 | hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp, struct vnode **rvpp, | |
5089 | int can_drop_lock, int error_on_unlinked) | |
5090 | { | |
5091 | struct vnode *rvp; | |
5092 | struct vnode *dvp = NULLVP; | |
5093 | struct cnode *cp = VTOC(vp); | |
5094 | int error; | |
5095 | int vid; | |
5096 | int delete_status = 0; | |
5097 | ||
5098 | if (vnode_vtype(vp) == VDIR) { | |
5099 | return EINVAL; | |
5100 | } | |
5101 | ||
5102 | /* | |
5103 | * Need to check the status of the cnode to validate it hasn't gone | |
5104 | * open-unlinked on us before we can actually do work with it. | |
5105 | */ | |
5106 | delete_status = hfs_checkdeleted(cp); | |
5107 | if ((delete_status) && (error_on_unlinked)) { | |
5108 | return delete_status; | |
5109 | } | |
5110 | ||
5111 | restart: | |
5112 | /* Attempt to use existing vnode */ | |
5113 | if ((rvp = cp->c_rsrc_vp)) { | |
5114 | vid = vnode_vid(rvp); | |
5115 | ||
5116 | /* | |
5117 | * It is not safe to hold the cnode lock when calling vnode_getwithvid() | |
5118 | * for the alternate fork -- vnode_getwithvid() could deadlock waiting | |
5119 | * for a VL_WANTTERM while another thread has an iocount on the alternate | |
5120 | * fork vnode and is attempting to acquire the common cnode lock. | |
5121 | * | |
5122 | * But it's also not safe to drop the cnode lock when we're holding | |
5123 | * multiple cnode locks, like during a hfs_removefile() operation | |
5124 | * since we could lock out of order when re-acquiring the cnode lock. | |
5125 | * | |
5126 | * So we can only drop the lock here if its safe to drop it -- which is | |
5127 | * most of the time with the exception being hfs_removefile(). | |
5128 | */ | |
5129 | if (can_drop_lock) | |
5130 | hfs_unlock(cp); | |
5131 | ||
5132 | error = vnode_getwithvid(rvp, vid); | |
5133 | ||
5134 | if (can_drop_lock) { | |
5135 | (void) hfs_lock(cp, HFS_FORCE_LOCK); | |
5136 | ||
5137 | /* | |
5138 | * When we relinquished our cnode lock, the cnode could have raced | |
5139 | * with a delete and gotten deleted. If the caller did not want | |
5140 | * us to ignore open-unlinked files, then re-check the C_DELETED | |
5141 | * state and see if we need to return an ENOENT here because the item | |
5142 | * got deleted in the intervening time. | |
5143 | */ | |
5144 | if (error_on_unlinked) { | |
5145 | if ((delete_status = hfs_checkdeleted(cp))) { | |
5146 | /* | |
5147 | * If error == 0, this means that we succeeded in acquiring an iocount on the | |
5148 | * rsrc fork vnode. However, if we're in this block of code, that means that we noticed | |
5149 | * that the cnode has gone open-unlinked. In this case, the caller requested that we | |
5150 | * not do any other work and return an errno. The caller will be responsible for | |
5151 | * dropping the iocount we just acquired because we can't do it until we've released | |
5152 | * the cnode lock. | |
5153 | */ | |
5154 | if (error == 0) { | |
5155 | *rvpp = rvp; | |
5156 | } | |
5157 | return delete_status; | |
5158 | } | |
5159 | } | |
5160 | ||
5161 | /* | |
5162 | * When our lock was relinquished, the resource fork | |
5163 | * could have been recycled. Check for this and try | |
5164 | * again. | |
5165 | */ | |
5166 | if (error == ENOENT) | |
5167 | goto restart; | |
5168 | } | |
5169 | if (error) { | |
5170 | const char * name = (const char *)VTOC(vp)->c_desc.cd_nameptr; | |
5171 | ||
5172 | if (name) | |
5173 | printf("hfs_vgetrsrc: couldn't get resource" | |
5174 | " fork for %s, err %d\n", name, error); | |
5175 | return (error); | |
5176 | } | |
5177 | } else { | |
5178 | struct cat_fork rsrcfork; | |
5179 | struct componentname cn; | |
5180 | struct cat_desc *descptr = NULL; | |
5181 | struct cat_desc to_desc; | |
5182 | char delname[32]; | |
5183 | int lockflags; | |
5184 | int newvnode_flags = 0; | |
5185 | ||
5186 | /* | |
5187 | * Make sure cnode lock is exclusive, if not upgrade it. | |
5188 | * | |
5189 | * We assume that we were called from a read-only VNOP (getattr) | |
5190 | * and that its safe to have the cnode lock dropped and reacquired. | |
5191 | */ | |
5192 | if (cp->c_lockowner != current_thread()) { | |
5193 | if (!can_drop_lock) { | |
5194 | return (EINVAL); | |
5195 | } | |
5196 | /* | |
5197 | * If the upgrade fails we lose the lock and | |
5198 | * have to take the exclusive lock on our own. | |
5199 | */ | |
5200 | if (lck_rw_lock_shared_to_exclusive(&cp->c_rwlock) == FALSE) | |
5201 | lck_rw_lock_exclusive(&cp->c_rwlock); | |
5202 | cp->c_lockowner = current_thread(); | |
5203 | } | |
5204 | ||
5205 | /* | |
5206 | * hfs_vgetsrc may be invoked for a cnode that has already been marked | |
5207 | * C_DELETED. This is because we need to continue to provide rsrc | |
5208 | * fork access to open-unlinked files. In this case, build a fake descriptor | |
5209 | * like in hfs_removefile. If we don't do this, buildkey will fail in | |
5210 | * cat_lookup because this cnode has no name in its descriptor. However, | |
5211 | * only do this if the caller did not specify that they wanted us to | |
5212 | * error out upon encountering open-unlinked files. | |
5213 | */ | |
5214 | ||
5215 | if ((error_on_unlinked) && (can_drop_lock)) { | |
5216 | if ((error = hfs_checkdeleted(cp))) { | |
5217 | return error; | |
5218 | } | |
5219 | } | |
5220 | ||
5221 | if ((cp->c_flag & C_DELETED ) && (cp->c_desc.cd_namelen == 0)) { | |
5222 | bzero (&to_desc, sizeof(to_desc)); | |
5223 | bzero (delname, 32); | |
5224 | MAKE_DELETED_NAME(delname, sizeof(delname), cp->c_fileid); | |
5225 | to_desc.cd_nameptr = (const u_int8_t*) delname; | |
5226 | to_desc.cd_namelen = strlen(delname); | |
5227 | to_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid; | |
5228 | to_desc.cd_flags = 0; | |
5229 | to_desc.cd_cnid = cp->c_cnid; | |
5230 | ||
5231 | descptr = &to_desc; | |
5232 | } | |
5233 | else { | |
5234 | descptr = &cp->c_desc; | |
5235 | } | |
5236 | ||
5237 | ||
5238 | lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK); | |
5239 | ||
5240 | /* Get resource fork data */ | |
5241 | error = cat_lookup(hfsmp, descptr, 1, (struct cat_desc *)0, | |
5242 | (struct cat_attr *)0, &rsrcfork, NULL); | |
5243 | ||
5244 | hfs_systemfile_unlock(hfsmp, lockflags); | |
5245 | if (error) { | |
5246 | return (error); | |
5247 | } | |
5248 | /* | |
5249 | * Supply hfs_getnewvnode with a component name. | |
5250 | */ | |
5251 | cn.cn_pnbuf = NULL; | |
5252 | if (descptr->cd_nameptr) { | |
5253 | MALLOC_ZONE(cn.cn_pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); | |
5254 | cn.cn_nameiop = LOOKUP; | |
5255 | cn.cn_flags = ISLASTCN | HASBUF; | |
5256 | cn.cn_context = NULL; | |
5257 | cn.cn_pnlen = MAXPATHLEN; | |
5258 | cn.cn_nameptr = cn.cn_pnbuf; | |
5259 | cn.cn_hash = 0; | |
5260 | cn.cn_consume = 0; | |
5261 | cn.cn_namelen = snprintf(cn.cn_nameptr, MAXPATHLEN, | |
5262 | "%s%s", descptr->cd_nameptr, | |
5263 | _PATH_RSRCFORKSPEC); | |
5264 | } | |
5265 | dvp = vnode_getparent(vp); | |
5266 | error = hfs_getnewvnode(hfsmp, dvp, cn.cn_pnbuf ? &cn : NULL, | |
5267 | descptr, GNV_WANTRSRC | GNV_SKIPLOCK, &cp->c_attr, | |
5268 | &rsrcfork, &rvp, &newvnode_flags); | |
5269 | if (dvp) | |
5270 | vnode_put(dvp); | |
5271 | if (cn.cn_pnbuf) | |
5272 | FREE_ZONE(cn.cn_pnbuf, cn.cn_pnlen, M_NAMEI); | |
5273 | if (error) | |
5274 | return (error); | |
5275 | } | |
5276 | ||
5277 | *rvpp = rvp; | |
5278 | return (0); | |
5279 | } | |
5280 | ||
5281 | /* | |
5282 | * Wrapper for special device reads | |
5283 | */ | |
5284 | int | |
5285 | hfsspec_read(ap) | |
5286 | struct vnop_read_args /* { | |
5287 | struct vnode *a_vp; | |
5288 | struct uio *a_uio; | |
5289 | int a_ioflag; | |
5290 | vfs_context_t a_context; | |
5291 | } */ *ap; | |
5292 | { | |
5293 | /* | |
5294 | * Set access flag. | |
5295 | */ | |
5296 | VTOC(ap->a_vp)->c_touch_acctime = TRUE; | |
5297 | return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_read), ap)); | |
5298 | } | |
5299 | ||
5300 | /* | |
5301 | * Wrapper for special device writes | |
5302 | */ | |
5303 | int | |
5304 | hfsspec_write(ap) | |
5305 | struct vnop_write_args /* { | |
5306 | struct vnode *a_vp; | |
5307 | struct uio *a_uio; | |
5308 | int a_ioflag; | |
5309 | vfs_context_t a_context; | |
5310 | } */ *ap; | |
5311 | { | |
5312 | /* | |
5313 | * Set update and change flags. | |
5314 | */ | |
5315 | VTOC(ap->a_vp)->c_touch_chgtime = TRUE; | |
5316 | VTOC(ap->a_vp)->c_touch_modtime = TRUE; | |
5317 | return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_write), ap)); | |
5318 | } | |
5319 | ||
5320 | /* | |
5321 | * Wrapper for special device close | |
5322 | * | |
5323 | * Update the times on the cnode then do device close. | |
5324 | */ | |
5325 | int | |
5326 | hfsspec_close(ap) | |
5327 | struct vnop_close_args /* { | |
5328 | struct vnode *a_vp; | |
5329 | int a_fflag; | |
5330 | vfs_context_t a_context; | |
5331 | } */ *ap; | |
5332 | { | |
5333 | struct vnode *vp = ap->a_vp; | |
5334 | struct cnode *cp; | |
5335 | ||
5336 | if (vnode_isinuse(ap->a_vp, 0)) { | |
5337 | if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) == 0) { | |
5338 | cp = VTOC(vp); | |
5339 | hfs_touchtimes(VTOHFS(vp), cp); | |
5340 | hfs_unlock(cp); | |
5341 | } | |
5342 | } | |
5343 | return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_close), ap)); | |
5344 | } | |
5345 | ||
5346 | #if FIFO | |
5347 | /* | |
5348 | * Wrapper for fifo reads | |
5349 | */ | |
5350 | static int | |
5351 | hfsfifo_read(ap) | |
5352 | struct vnop_read_args /* { | |
5353 | struct vnode *a_vp; | |
5354 | struct uio *a_uio; | |
5355 | int a_ioflag; | |
5356 | vfs_context_t a_context; | |
5357 | } */ *ap; | |
5358 | { | |
5359 | /* | |
5360 | * Set access flag. | |
5361 | */ | |
5362 | VTOC(ap->a_vp)->c_touch_acctime = TRUE; | |
5363 | return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_read), ap)); | |
5364 | } | |
5365 | ||
5366 | /* | |
5367 | * Wrapper for fifo writes | |
5368 | */ | |
5369 | static int | |
5370 | hfsfifo_write(ap) | |
5371 | struct vnop_write_args /* { | |
5372 | struct vnode *a_vp; | |
5373 | struct uio *a_uio; | |
5374 | int a_ioflag; | |
5375 | vfs_context_t a_context; | |
5376 | } */ *ap; | |
5377 | { | |
5378 | /* | |
5379 | * Set update and change flags. | |
5380 | */ | |
5381 | VTOC(ap->a_vp)->c_touch_chgtime = TRUE; | |
5382 | VTOC(ap->a_vp)->c_touch_modtime = TRUE; | |
5383 | return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_write), ap)); | |
5384 | } | |
5385 | ||
5386 | /* | |
5387 | * Wrapper for fifo close | |
5388 | * | |
5389 | * Update the times on the cnode then do device close. | |
5390 | */ | |
5391 | static int | |
5392 | hfsfifo_close(ap) | |
5393 | struct vnop_close_args /* { | |
5394 | struct vnode *a_vp; | |
5395 | int a_fflag; | |
5396 | vfs_context_t a_context; | |
5397 | } */ *ap; | |
5398 | { | |
5399 | struct vnode *vp = ap->a_vp; | |
5400 | struct cnode *cp; | |
5401 | ||
5402 | if (vnode_isinuse(ap->a_vp, 1)) { | |
5403 | if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) == 0) { | |
5404 | cp = VTOC(vp); | |
5405 | hfs_touchtimes(VTOHFS(vp), cp); | |
5406 | hfs_unlock(cp); | |
5407 | } | |
5408 | } | |
5409 | return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_close), ap)); | |
5410 | } | |
5411 | ||
5412 | ||
5413 | #endif /* FIFO */ | |
5414 | ||
5415 | /* | |
5416 | * Synchronize a file's in-core state with that on disk. | |
5417 | */ | |
5418 | int | |
5419 | hfs_vnop_fsync(ap) | |
5420 | struct vnop_fsync_args /* { | |
5421 | struct vnode *a_vp; | |
5422 | int a_waitfor; | |
5423 | vfs_context_t a_context; | |
5424 | } */ *ap; | |
5425 | { | |
5426 | struct vnode* vp = ap->a_vp; | |
5427 | int error; | |
5428 | ||
5429 | /* Note: We check hfs flags instead of vfs mount flag because during | |
5430 | * read-write update, hfs marks itself read-write much earlier than | |
5431 | * the vfs, and hence won't result in skipping of certain writes like | |
5432 | * zero'ing out of unused nodes, creation of hotfiles btree, etc. | |
5433 | */ | |
5434 | if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) { | |
5435 | return 0; | |
5436 | } | |
5437 | ||
5438 | #if CONFIG_PROTECT | |
5439 | if ((error = cp_handle_vnop(VTOC(vp), CP_WRITE_ACCESS)) != 0) { | |
5440 | return (error); | |
5441 | } | |
5442 | #endif /* CONFIG_PROTECT */ | |
5443 | ||
5444 | /* | |
5445 | * We need to allow ENOENT lock errors since unlink | |
5446 | * systenm call can call VNOP_FSYNC during vclean. | |
5447 | */ | |
5448 | error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK); | |
5449 | if (error) | |
5450 | return (0); | |
5451 | ||
5452 | error = hfs_fsync(vp, ap->a_waitfor, 0, vfs_context_proc(ap->a_context)); | |
5453 | ||
5454 | hfs_unlock(VTOC(vp)); | |
5455 | return (error); | |
5456 | } | |
5457 | ||
5458 | ||
5459 | int | |
5460 | hfs_vnop_whiteout(ap) | |
5461 | struct vnop_whiteout_args /* { | |
5462 | struct vnode *a_dvp; | |
5463 | struct componentname *a_cnp; | |
5464 | int a_flags; | |
5465 | vfs_context_t a_context; | |
5466 | } */ *ap; | |
5467 | { | |
5468 | int error = 0; | |
5469 | struct vnode *vp = NULL; | |
5470 | struct vnode_attr va; | |
5471 | struct vnop_lookup_args lookup_args; | |
5472 | struct vnop_remove_args remove_args; | |
5473 | struct hfsmount *hfsmp; | |
5474 | ||
5475 | hfsmp = VTOHFS(ap->a_dvp); | |
5476 | if (hfsmp->hfs_flags & HFS_STANDARD) { | |
5477 | error = ENOTSUP; | |
5478 | goto exit; | |
5479 | } | |
5480 | ||
5481 | switch (ap->a_flags) { | |
5482 | case LOOKUP: | |
5483 | error = 0; | |
5484 | break; | |
5485 | ||
5486 | case CREATE: | |
5487 | VATTR_INIT(&va); | |
5488 | VATTR_SET(&va, va_type, VREG); | |
5489 | VATTR_SET(&va, va_mode, S_IFWHT); | |
5490 | VATTR_SET(&va, va_uid, 0); | |
5491 | VATTR_SET(&va, va_gid, 0); | |
5492 | ||
5493 | error = hfs_makenode(ap->a_dvp, &vp, ap->a_cnp, &va, ap->a_context); | |
5494 | /* No need to release the vnode as no vnode is created for whiteouts */ | |
5495 | break; | |
5496 | ||
5497 | case DELETE: | |
5498 | lookup_args.a_dvp = ap->a_dvp; | |
5499 | lookup_args.a_vpp = &vp; | |
5500 | lookup_args.a_cnp = ap->a_cnp; | |
5501 | lookup_args.a_context = ap->a_context; | |
5502 | ||
5503 | error = hfs_vnop_lookup(&lookup_args); | |
5504 | if (error) { | |
5505 | break; | |
5506 | } | |
5507 | ||
5508 | remove_args.a_dvp = ap->a_dvp; | |
5509 | remove_args.a_vp = vp; | |
5510 | remove_args.a_cnp = ap->a_cnp; | |
5511 | remove_args.a_flags = 0; | |
5512 | remove_args.a_context = ap->a_context; | |
5513 | ||
5514 | error = hfs_vnop_remove(&remove_args); | |
5515 | vnode_put(vp); | |
5516 | break; | |
5517 | ||
5518 | default: | |
5519 | panic("hfs_vnop_whiteout: unknown operation (flag = %x)\n", ap->a_flags); | |
5520 | }; | |
5521 | ||
5522 | exit: | |
5523 | return (error); | |
5524 | } | |
5525 | ||
5526 | int (**hfs_vnodeop_p)(void *); | |
5527 | int (**hfs_std_vnodeop_p) (void *); | |
5528 | ||
5529 | #define VOPFUNC int (*)(void *) | |
5530 | ||
5531 | static int hfs_readonly_op (__unused void* ap) { return (EROFS); } | |
5532 | ||
5533 | /* | |
5534 | * In 10.6 and forward, HFS Standard is read-only and deprecated. The vnop table below | |
5535 | * is for use with HFS standard to block out operations that would modify the file system | |
5536 | */ | |
5537 | ||
5538 | struct vnodeopv_entry_desc hfs_standard_vnodeop_entries[] = { | |
5539 | { &vnop_default_desc, (VOPFUNC)vn_default_error }, | |
5540 | { &vnop_lookup_desc, (VOPFUNC)hfs_vnop_lookup }, /* lookup */ | |
5541 | { &vnop_create_desc, (VOPFUNC)hfs_readonly_op }, /* create (READONLY) */ | |
5542 | { &vnop_mknod_desc, (VOPFUNC)hfs_readonly_op }, /* mknod (READONLY) */ | |
5543 | { &vnop_open_desc, (VOPFUNC)hfs_vnop_open }, /* open */ | |
5544 | { &vnop_close_desc, (VOPFUNC)hfs_vnop_close }, /* close */ | |
5545 | { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */ | |
5546 | { &vnop_setattr_desc, (VOPFUNC)hfs_readonly_op }, /* setattr */ | |
5547 | { &vnop_read_desc, (VOPFUNC)hfs_vnop_read }, /* read */ | |
5548 | { &vnop_write_desc, (VOPFUNC)hfs_readonly_op }, /* write (READONLY) */ | |
5549 | { &vnop_ioctl_desc, (VOPFUNC)hfs_vnop_ioctl }, /* ioctl */ | |
5550 | { &vnop_select_desc, (VOPFUNC)hfs_vnop_select }, /* select */ | |
5551 | { &vnop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */ | |
5552 | { &vnop_exchange_desc, (VOPFUNC)hfs_readonly_op }, /* exchange (READONLY)*/ | |
5553 | { &vnop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */ | |
5554 | { &vnop_fsync_desc, (VOPFUNC)hfs_readonly_op}, /* fsync (READONLY) */ | |
5555 | { &vnop_remove_desc, (VOPFUNC)hfs_readonly_op }, /* remove (READONLY) */ | |
5556 | { &vnop_link_desc, (VOPFUNC)hfs_readonly_op }, /* link ( READONLLY) */ | |
5557 | { &vnop_rename_desc, (VOPFUNC)hfs_readonly_op }, /* rename (READONLY)*/ | |
5558 | { &vnop_mkdir_desc, (VOPFUNC)hfs_readonly_op }, /* mkdir (READONLY) */ | |
5559 | { &vnop_rmdir_desc, (VOPFUNC)hfs_readonly_op }, /* rmdir (READONLY) */ | |
5560 | { &vnop_symlink_desc, (VOPFUNC)hfs_readonly_op }, /* symlink (READONLY) */ | |
5561 | { &vnop_readdir_desc, (VOPFUNC)hfs_vnop_readdir }, /* readdir */ | |
5562 | { &vnop_readdirattr_desc, (VOPFUNC)hfs_vnop_readdirattr }, /* readdirattr */ | |
5563 | { &vnop_readlink_desc, (VOPFUNC)hfs_vnop_readlink }, /* readlink */ | |
5564 | { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */ | |
5565 | { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */ | |
5566 | { &vnop_strategy_desc, (VOPFUNC)hfs_vnop_strategy }, /* strategy */ | |
5567 | { &vnop_pathconf_desc, (VOPFUNC)hfs_vnop_pathconf }, /* pathconf */ | |
5568 | { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */ | |
5569 | { &vnop_allocate_desc, (VOPFUNC)hfs_readonly_op }, /* allocate (READONLY) */ | |
5570 | { &vnop_searchfs_desc, (VOPFUNC)hfs_vnop_search }, /* search fs */ | |
5571 | { &vnop_bwrite_desc, (VOPFUNC)hfs_readonly_op }, /* bwrite (READONLY) */ | |
5572 | { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* pagein */ | |
5573 | { &vnop_pageout_desc,(VOPFUNC) hfs_readonly_op }, /* pageout (READONLY) */ | |
5574 | { &vnop_copyfile_desc, (VOPFUNC)hfs_readonly_op }, /* copyfile (READONLY)*/ | |
5575 | { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */ | |
5576 | { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */ | |
5577 | { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */ | |
5578 | { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr}, | |
5579 | { &vnop_setxattr_desc, (VOPFUNC)hfs_readonly_op}, /* set xattr (READONLY) */ | |
5580 | { &vnop_removexattr_desc, (VOPFUNC)hfs_readonly_op}, /* remove xattr (READONLY) */ | |
5581 | { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr}, | |
5582 | { &vnop_whiteout_desc, (VOPFUNC)hfs_readonly_op}, /* whiteout (READONLY) */ | |
5583 | #if NAMEDSTREAMS | |
5584 | { &vnop_getnamedstream_desc, (VOPFUNC)hfs_vnop_getnamedstream }, | |
5585 | { &vnop_makenamedstream_desc, (VOPFUNC)hfs_readonly_op }, | |
5586 | { &vnop_removenamedstream_desc, (VOPFUNC)hfs_readonly_op }, | |
5587 | #endif | |
5588 | { NULL, (VOPFUNC)NULL } | |
5589 | }; | |
5590 | ||
5591 | struct vnodeopv_desc hfs_std_vnodeop_opv_desc = | |
5592 | { &hfs_std_vnodeop_p, hfs_standard_vnodeop_entries }; | |
5593 | ||
5594 | ||
5595 | /* VNOP table for HFS+ */ | |
5596 | struct vnodeopv_entry_desc hfs_vnodeop_entries[] = { | |
5597 | { &vnop_default_desc, (VOPFUNC)vn_default_error }, | |
5598 | { &vnop_lookup_desc, (VOPFUNC)hfs_vnop_lookup }, /* lookup */ | |
5599 | { &vnop_create_desc, (VOPFUNC)hfs_vnop_create }, /* create */ | |
5600 | { &vnop_mknod_desc, (VOPFUNC)hfs_vnop_mknod }, /* mknod */ | |
5601 | { &vnop_open_desc, (VOPFUNC)hfs_vnop_open }, /* open */ | |
5602 | { &vnop_close_desc, (VOPFUNC)hfs_vnop_close }, /* close */ | |
5603 | { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */ | |
5604 | { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */ | |
5605 | { &vnop_read_desc, (VOPFUNC)hfs_vnop_read }, /* read */ | |
5606 | { &vnop_write_desc, (VOPFUNC)hfs_vnop_write }, /* write */ | |
5607 | { &vnop_ioctl_desc, (VOPFUNC)hfs_vnop_ioctl }, /* ioctl */ | |
5608 | { &vnop_select_desc, (VOPFUNC)hfs_vnop_select }, /* select */ | |
5609 | { &vnop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */ | |
5610 | { &vnop_exchange_desc, (VOPFUNC)hfs_vnop_exchange }, /* exchange */ | |
5611 | { &vnop_mmap_desc, (VOPFUNC)hfs_vnop_mmap }, /* mmap */ | |
5612 | { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */ | |
5613 | { &vnop_remove_desc, (VOPFUNC)hfs_vnop_remove }, /* remove */ | |
5614 | { &vnop_link_desc, (VOPFUNC)hfs_vnop_link }, /* link */ | |
5615 | { &vnop_rename_desc, (VOPFUNC)hfs_vnop_rename }, /* rename */ | |
5616 | { &vnop_mkdir_desc, (VOPFUNC)hfs_vnop_mkdir }, /* mkdir */ | |
5617 | { &vnop_rmdir_desc, (VOPFUNC)hfs_vnop_rmdir }, /* rmdir */ | |
5618 | { &vnop_symlink_desc, (VOPFUNC)hfs_vnop_symlink }, /* symlink */ | |
5619 | { &vnop_readdir_desc, (VOPFUNC)hfs_vnop_readdir }, /* readdir */ | |
5620 | { &vnop_readdirattr_desc, (VOPFUNC)hfs_vnop_readdirattr }, /* readdirattr */ | |
5621 | { &vnop_readlink_desc, (VOPFUNC)hfs_vnop_readlink }, /* readlink */ | |
5622 | { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */ | |
5623 | { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */ | |
5624 | { &vnop_strategy_desc, (VOPFUNC)hfs_vnop_strategy }, /* strategy */ | |
5625 | { &vnop_pathconf_desc, (VOPFUNC)hfs_vnop_pathconf }, /* pathconf */ | |
5626 | { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */ | |
5627 | { &vnop_allocate_desc, (VOPFUNC)hfs_vnop_allocate }, /* allocate */ | |
5628 | { &vnop_searchfs_desc, (VOPFUNC)hfs_vnop_search }, /* search fs */ | |
5629 | { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite }, /* bwrite */ | |
5630 | { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* pagein */ | |
5631 | { &vnop_pageout_desc,(VOPFUNC) hfs_vnop_pageout }, /* pageout */ | |
5632 | { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */ | |
5633 | { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */ | |
5634 | { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */ | |
5635 | { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */ | |
5636 | { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr}, | |
5637 | { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr}, | |
5638 | { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr}, | |
5639 | { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr}, | |
5640 | { &vnop_whiteout_desc, (VOPFUNC)hfs_vnop_whiteout}, | |
5641 | #if NAMEDSTREAMS | |
5642 | { &vnop_getnamedstream_desc, (VOPFUNC)hfs_vnop_getnamedstream }, | |
5643 | { &vnop_makenamedstream_desc, (VOPFUNC)hfs_vnop_makenamedstream }, | |
5644 | { &vnop_removenamedstream_desc, (VOPFUNC)hfs_vnop_removenamedstream }, | |
5645 | #endif | |
5646 | { NULL, (VOPFUNC)NULL } | |
5647 | }; | |
5648 | ||
5649 | struct vnodeopv_desc hfs_vnodeop_opv_desc = | |
5650 | { &hfs_vnodeop_p, hfs_vnodeop_entries }; | |
5651 | ||
5652 | ||
5653 | /* Spec Op vnop table for HFS+ */ | |
5654 | int (**hfs_specop_p)(void *); | |
5655 | struct vnodeopv_entry_desc hfs_specop_entries[] = { | |
5656 | { &vnop_default_desc, (VOPFUNC)vn_default_error }, | |
5657 | { &vnop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */ | |
5658 | { &vnop_create_desc, (VOPFUNC)spec_create }, /* create */ | |
5659 | { &vnop_mknod_desc, (VOPFUNC)spec_mknod }, /* mknod */ | |
5660 | { &vnop_open_desc, (VOPFUNC)spec_open }, /* open */ | |
5661 | { &vnop_close_desc, (VOPFUNC)hfsspec_close }, /* close */ | |
5662 | { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */ | |
5663 | { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */ | |
5664 | { &vnop_read_desc, (VOPFUNC)hfsspec_read }, /* read */ | |
5665 | { &vnop_write_desc, (VOPFUNC)hfsspec_write }, /* write */ | |
5666 | { &vnop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */ | |
5667 | { &vnop_select_desc, (VOPFUNC)spec_select }, /* select */ | |
5668 | { &vnop_revoke_desc, (VOPFUNC)spec_revoke }, /* revoke */ | |
5669 | { &vnop_mmap_desc, (VOPFUNC)spec_mmap }, /* mmap */ | |
5670 | { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */ | |
5671 | { &vnop_remove_desc, (VOPFUNC)spec_remove }, /* remove */ | |
5672 | { &vnop_link_desc, (VOPFUNC)spec_link }, /* link */ | |
5673 | { &vnop_rename_desc, (VOPFUNC)spec_rename }, /* rename */ | |
5674 | { &vnop_mkdir_desc, (VOPFUNC)spec_mkdir }, /* mkdir */ | |
5675 | { &vnop_rmdir_desc, (VOPFUNC)spec_rmdir }, /* rmdir */ | |
5676 | { &vnop_symlink_desc, (VOPFUNC)spec_symlink }, /* symlink */ | |
5677 | { &vnop_readdir_desc, (VOPFUNC)spec_readdir }, /* readdir */ | |
5678 | { &vnop_readlink_desc, (VOPFUNC)spec_readlink }, /* readlink */ | |
5679 | { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */ | |
5680 | { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */ | |
5681 | { &vnop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */ | |
5682 | { &vnop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */ | |
5683 | { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */ | |
5684 | { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite }, | |
5685 | { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* Pagein */ | |
5686 | { &vnop_pageout_desc, (VOPFUNC)hfs_vnop_pageout }, /* Pageout */ | |
5687 | { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */ | |
5688 | { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */ | |
5689 | { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */ | |
5690 | { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL } | |
5691 | }; | |
5692 | struct vnodeopv_desc hfs_specop_opv_desc = | |
5693 | { &hfs_specop_p, hfs_specop_entries }; | |
5694 | ||
5695 | #if FIFO | |
5696 | /* HFS+ FIFO VNOP table */ | |
5697 | int (**hfs_fifoop_p)(void *); | |
5698 | struct vnodeopv_entry_desc hfs_fifoop_entries[] = { | |
5699 | { &vnop_default_desc, (VOPFUNC)vn_default_error }, | |
5700 | { &vnop_lookup_desc, (VOPFUNC)fifo_lookup }, /* lookup */ | |
5701 | { &vnop_create_desc, (VOPFUNC)fifo_create }, /* create */ | |
5702 | { &vnop_mknod_desc, (VOPFUNC)fifo_mknod }, /* mknod */ | |
5703 | { &vnop_open_desc, (VOPFUNC)fifo_open }, /* open */ | |
5704 | { &vnop_close_desc, (VOPFUNC)hfsfifo_close }, /* close */ | |
5705 | { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */ | |
5706 | { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */ | |
5707 | { &vnop_read_desc, (VOPFUNC)hfsfifo_read }, /* read */ | |
5708 | { &vnop_write_desc, (VOPFUNC)hfsfifo_write }, /* write */ | |
5709 | { &vnop_ioctl_desc, (VOPFUNC)fifo_ioctl }, /* ioctl */ | |
5710 | { &vnop_select_desc, (VOPFUNC)fifo_select }, /* select */ | |
5711 | { &vnop_revoke_desc, (VOPFUNC)fifo_revoke }, /* revoke */ | |
5712 | { &vnop_mmap_desc, (VOPFUNC)fifo_mmap }, /* mmap */ | |
5713 | { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */ | |
5714 | { &vnop_remove_desc, (VOPFUNC)fifo_remove }, /* remove */ | |
5715 | { &vnop_link_desc, (VOPFUNC)fifo_link }, /* link */ | |
5716 | { &vnop_rename_desc, (VOPFUNC)fifo_rename }, /* rename */ | |
5717 | { &vnop_mkdir_desc, (VOPFUNC)fifo_mkdir }, /* mkdir */ | |
5718 | { &vnop_rmdir_desc, (VOPFUNC)fifo_rmdir }, /* rmdir */ | |
5719 | { &vnop_symlink_desc, (VOPFUNC)fifo_symlink }, /* symlink */ | |
5720 | { &vnop_readdir_desc, (VOPFUNC)fifo_readdir }, /* readdir */ | |
5721 | { &vnop_readlink_desc, (VOPFUNC)fifo_readlink }, /* readlink */ | |
5722 | { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */ | |
5723 | { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */ | |
5724 | { &vnop_strategy_desc, (VOPFUNC)fifo_strategy }, /* strategy */ | |
5725 | { &vnop_pathconf_desc, (VOPFUNC)fifo_pathconf }, /* pathconf */ | |
5726 | { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */ | |
5727 | { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite }, | |
5728 | { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* Pagein */ | |
5729 | { &vnop_pageout_desc, (VOPFUNC)hfs_vnop_pageout }, /* Pageout */ | |
5730 | { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */ | |
5731 | { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */ | |
5732 | { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */ | |
5733 | { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */ | |
5734 | { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL } | |
5735 | }; | |
5736 | struct vnodeopv_desc hfs_fifoop_opv_desc = | |
5737 | { &hfs_fifoop_p, hfs_fifoop_entries }; | |
5738 | #endif /* FIFO */ | |
5739 | ||
5740 | ||
5741 |