]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. | |
7 | * | |
8 | * This file contains Original Code and/or Modifications of Original Code | |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. Please obtain a copy of the License at | |
12 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
13 | * file. | |
14 | * | |
15 | * The Original Code and all software distributed under the License are | |
16 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
17 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
18 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
20 | * Please see the License for the specific language governing rights and | |
21 | * limitations under the License. | |
22 | * | |
23 | * @APPLE_LICENSE_HEADER_END@ | |
24 | */ | |
25 | ||
26 | #include <sys/systm.h> | |
27 | #include <sys/kernel.h> | |
28 | #include <sys/file.h> | |
29 | #include <sys/dirent.h> | |
30 | #include <sys/stat.h> | |
31 | #include <sys/buf.h> | |
32 | #include <sys/mount.h> | |
33 | #include <sys/vnode.h> | |
34 | #include <sys/malloc.h> | |
35 | #include <sys/namei.h> | |
36 | #include <sys/ubc.h> | |
37 | #include <sys/quota.h> | |
38 | #include <sys/time.h> | |
39 | #include <sys/disk.h> | |
40 | ||
41 | #include <miscfs/specfs/specdev.h> | |
42 | #include <miscfs/fifofs/fifo.h> | |
43 | #include <vfs/vfs_support.h> | |
44 | #include <machine/spl.h> | |
45 | ||
46 | #include <sys/kdebug.h> | |
47 | ||
48 | #include "hfs.h" | |
49 | #include "hfs_catalog.h" | |
50 | #include "hfs_cnode.h" | |
51 | #include "hfs_lockf.h" | |
52 | #include "hfs_dbg.h" | |
53 | #include "hfs_mount.h" | |
54 | #include "hfs_quota.h" | |
55 | #include "hfs_endian.h" | |
56 | ||
57 | #include "hfscommon/headers/BTreesInternal.h" | |
58 | #include "hfscommon/headers/FileMgrInternal.h" | |
59 | ||
60 | #define MAKE_DELETED_NAME(NAME,FID) \ | |
61 | (void) sprintf((NAME), "%s%d", HFS_DELETE_PREFIX, (FID)) | |
62 | ||
63 | #define KNDETACH_VNLOCKED 0x00000001 | |
64 | ||
65 | #define CARBON_TEMP_DIR_NAME "Cleanup At Startup" | |
66 | ||
67 | ||
68 | /* Global vfs data structures for hfs */ | |
69 | ||
70 | ||
71 | extern unsigned long strtoul(const char *, char **, int); | |
72 | ||
73 | extern int groupmember(gid_t gid, struct ucred *cred); | |
74 | ||
75 | static int hfs_makenode(int mode, struct vnode *dvp, struct vnode **vpp, | |
76 | struct componentname *cnp); | |
77 | ||
78 | static int hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp, | |
79 | struct vnode **rvpp, struct proc *p); | |
80 | ||
81 | static int hfs_metasync(struct hfsmount *hfsmp, daddr_t node, struct proc *p); | |
82 | ||
83 | static int hfs_removedir(struct vnode *, struct vnode *, struct componentname *, | |
84 | int); | |
85 | ||
86 | static int hfs_removefile(struct vnode *, struct vnode *, struct componentname *, | |
87 | int); | |
88 | ||
89 | /* Options for hfs_removedir and hfs_removefile */ | |
90 | #define HFSRM_PARENT_LOCKED 0x01 | |
91 | #define HFSRM_SKIP_RESERVE 0x02 | |
92 | #define HFSRM_SAVE_NAME 0x04 | |
93 | #define HFSRM_RENAMEOPTS 0x07 | |
94 | ||
95 | ||
96 | int hfs_write_access(struct vnode *vp, struct ucred *cred, struct proc *p, Boolean considerFlags); | |
97 | ||
98 | int hfs_chflags(struct vnode *vp, u_long flags, struct ucred *cred, | |
99 | struct proc *p); | |
100 | int hfs_chmod(struct vnode *vp, int mode, struct ucred *cred, | |
101 | struct proc *p); | |
102 | int hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, | |
103 | struct ucred *cred, struct proc *p); | |
104 | ||
105 | /***************************************************************************** | |
106 | * | |
107 | * Common Operations on vnodes | |
108 | * | |
109 | *****************************************************************************/ | |
110 | ||
111 | /* | |
112 | * Create a regular file | |
113 | #% create dvp L U U | |
114 | #% create vpp - L - | |
115 | # | |
116 | vop_create { | |
117 | IN WILLRELE struct vnode *dvp; | |
118 | OUT struct vnode **vpp; | |
119 | IN struct componentname *cnp; | |
120 | IN struct vattr *vap; | |
121 | ||
122 | We are responsible for freeing the namei buffer, | |
123 | it is done in hfs_makenode() | |
124 | */ | |
125 | ||
126 | static int | |
127 | hfs_create(ap) | |
128 | struct vop_create_args /* { | |
129 | struct vnode *a_dvp; | |
130 | struct vnode **a_vpp; | |
131 | struct componentname *a_cnp; | |
132 | struct vattr *a_vap; | |
133 | } */ *ap; | |
134 | { | |
135 | struct vattr *vap = ap->a_vap; | |
136 | ||
137 | return (hfs_makenode(MAKEIMODE(vap->va_type, vap->va_mode), | |
138 | ap->a_dvp, ap->a_vpp, ap->a_cnp)); | |
139 | } | |
140 | ||
141 | ||
142 | /* | |
143 | * Mknod vnode call | |
144 | ||
145 | #% mknod dvp L U U | |
146 | #% mknod vpp - X - | |
147 | # | |
148 | vop_mknod { | |
149 | IN WILLRELE struct vnode *dvp; | |
150 | OUT WILLRELE struct vnode **vpp; | |
151 | IN struct componentname *cnp; | |
152 | IN struct vattr *vap; | |
153 | */ | |
154 | /* ARGSUSED */ | |
155 | ||
156 | static int | |
157 | hfs_mknod(ap) | |
158 | struct vop_mknod_args /* { | |
159 | struct vnode *a_dvp; | |
160 | struct vnode **a_vpp; | |
161 | struct componentname *a_cnp; | |
162 | struct vattr *a_vap; | |
163 | } */ *ap; | |
164 | { | |
165 | struct vattr *vap = ap->a_vap; | |
166 | struct vnode **vpp = ap->a_vpp; | |
167 | struct cnode *cp; | |
168 | int error; | |
169 | ||
170 | if (VTOVCB(ap->a_dvp)->vcbSigWord != kHFSPlusSigWord) { | |
171 | VOP_ABORTOP(ap->a_dvp, ap->a_cnp); | |
172 | vput(ap->a_dvp); | |
173 | return (EOPNOTSUPP); | |
174 | } | |
175 | ||
176 | /* Create the vnode */ | |
177 | error = hfs_makenode(MAKEIMODE(vap->va_type, vap->va_mode), | |
178 | ap->a_dvp, vpp, ap->a_cnp); | |
179 | if (error) | |
180 | return (error); | |
181 | cp = VTOC(*vpp); | |
182 | cp->c_flag |= C_ACCESS | C_CHANGE | C_UPDATE; | |
183 | if ((vap->va_rdev != VNOVAL) && | |
184 | (vap->va_type == VBLK || vap->va_type == VCHR)) | |
185 | cp->c_rdev = vap->va_rdev; | |
186 | /* | |
187 | * Remove cnode so that it will be reloaded by lookup and | |
188 | * checked to see if it is an alias of an existing vnode. | |
189 | * Note: unlike UFS, we don't bash v_type here. | |
190 | */ | |
191 | vput(*vpp); | |
192 | vgone(*vpp); | |
193 | *vpp = 0; | |
194 | return (0); | |
195 | } | |
196 | ||
197 | ||
198 | /* | |
199 | * Open called. | |
200 | #% open vp L L L | |
201 | # | |
202 | vop_open { | |
203 | IN struct vnode *vp; | |
204 | IN int mode; | |
205 | IN struct ucred *cred; | |
206 | IN struct proc *p; | |
207 | */ | |
208 | ||
209 | ||
210 | static int | |
211 | hfs_open(ap) | |
212 | struct vop_open_args /* { | |
213 | struct vnode *a_vp; | |
214 | int a_mode; | |
215 | struct ucred *a_cred; | |
216 | struct proc *a_p; | |
217 | } */ *ap; | |
218 | { | |
219 | struct vnode *vp = ap->a_vp; | |
220 | struct filefork *fp = VTOF(vp); | |
221 | struct timeval tv; | |
222 | ||
223 | /* | |
224 | * Files marked append-only must be opened for appending. | |
225 | */ | |
226 | if ((vp->v_type != VDIR) && (VTOC(vp)->c_flags & APPEND) && | |
227 | (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE) | |
228 | return (EPERM); | |
229 | ||
230 | if (ap->a_mode & O_EVTONLY) { | |
231 | if (vp->v_type == VREG) { | |
232 | ++VTOF(vp)->ff_evtonly_refs; | |
233 | } else { | |
234 | ++VTOC(vp)->c_evtonly_refs; | |
235 | }; | |
236 | }; | |
237 | ||
238 | /* | |
239 | * On the first (non-busy) open of a fragmented | |
240 | * file attempt to de-frag it (if its less than 20MB). | |
241 | */ | |
242 | if ((VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) || | |
243 | !UBCISVALID(vp) || ubc_isinuse(vp, 1)) { | |
244 | return (0); | |
245 | } | |
246 | fp = VTOF(vp); | |
247 | if (fp->ff_blocks && | |
248 | fp->ff_extents[7].blockCount != 0 && | |
249 | fp->ff_size <= (20 * 1024 * 1024)) { | |
250 | /* | |
251 | * Wait until system bootup is done (3 min). | |
252 | */ | |
253 | microuptime(&tv); | |
254 | if (tv.tv_sec < (60 * 3)) { | |
255 | return (0); | |
256 | } | |
257 | (void) hfs_relocate(vp, VTOVCB(vp)->nextAllocation + 4096, ap->a_cred, ap->a_p); | |
258 | } | |
259 | ||
260 | return (0); | |
261 | } | |
262 | ||
263 | /* | |
264 | * Close called. | |
265 | * | |
266 | * Update the times on the cnode. | |
267 | #% close vp U U U | |
268 | # | |
269 | vop_close { | |
270 | IN struct vnode *vp; | |
271 | IN int fflag; | |
272 | IN struct ucred *cred; | |
273 | IN struct proc *p; | |
274 | */ | |
275 | ||
276 | ||
277 | static int | |
278 | hfs_close(ap) | |
279 | struct vop_close_args /* { | |
280 | struct vnode *a_vp; | |
281 | int a_fflag; | |
282 | struct ucred *a_cred; | |
283 | struct proc *a_p; | |
284 | } */ *ap; | |
285 | { | |
286 | register struct vnode *vp = ap->a_vp; | |
287 | register struct cnode *cp = VTOC(vp); | |
288 | register struct filefork *fp = VTOF(vp); | |
289 | struct proc *p = ap->a_p; | |
290 | struct timeval tv; | |
291 | off_t leof; | |
292 | u_long blks, blocksize; | |
293 | int devBlockSize; | |
294 | int error; | |
295 | ||
296 | simple_lock(&vp->v_interlock); | |
297 | if ((!UBCISVALID(vp) && vp->v_usecount > 1) | |
298 | || (UBCISVALID(vp) && ubc_isinuse(vp, 1))) { | |
299 | tv = time; | |
300 | CTIMES(cp, &tv, &tv); | |
301 | } | |
302 | simple_unlock(&vp->v_interlock); | |
303 | ||
304 | if (ap->a_fflag & O_EVTONLY) { | |
305 | if (vp->v_type == VREG) { | |
306 | --VTOF(vp)->ff_evtonly_refs; | |
307 | } else { | |
308 | --VTOC(vp)->c_evtonly_refs; | |
309 | }; | |
310 | }; | |
311 | ||
312 | /* | |
313 | * VOP_CLOSE can be called with vp locked (from vclean). | |
314 | * We check for this case using VOP_ISLOCKED and bail. | |
315 | * | |
316 | * XXX During a force unmount we won't do the cleanup below! | |
317 | */ | |
318 | if (vp->v_type == VDIR || VOP_ISLOCKED(vp)) | |
319 | return (0); | |
320 | ||
321 | leof = fp->ff_size; | |
322 | ||
323 | if ((fp->ff_blocks > 0) && | |
324 | !ISSET(cp->c_flag, C_DELETED) && | |
325 | ((VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) == 0)) { | |
326 | enum vtype our_type = vp->v_type; | |
327 | u_long our_id = vp->v_id; | |
328 | int was_nocache = ISSET(vp->v_flag, VNOCACHE_DATA); | |
329 | ||
330 | error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); | |
331 | if (error) | |
332 | return (0); | |
333 | /* | |
334 | * Since we can context switch in vn_lock our vnode | |
335 | * could get recycled (eg umount -f). Double check | |
336 | * that its still ours. | |
337 | */ | |
338 | if (vp->v_type != our_type || vp->v_id != our_id | |
339 | || cp != VTOC(vp) || !UBCINFOEXISTS(vp)) { | |
340 | VOP_UNLOCK(vp, 0, p); | |
341 | return (0); | |
342 | } | |
343 | ||
344 | /* | |
345 | * Last chance to explicitly zero out the areas | |
346 | * that are currently marked invalid: | |
347 | */ | |
348 | VOP_DEVBLOCKSIZE(cp->c_devvp, &devBlockSize); | |
349 | (void) cluster_push(vp); | |
350 | SET(vp->v_flag, VNOCACHE_DATA); /* Don't cache zeros */ | |
351 | while (!CIRCLEQ_EMPTY(&fp->ff_invalidranges)) { | |
352 | struct rl_entry *invalid_range = CIRCLEQ_FIRST(&fp->ff_invalidranges); | |
353 | off_t start = invalid_range->rl_start; | |
354 | off_t end = invalid_range->rl_end; | |
355 | ||
356 | /* The range about to be written must be validated | |
357 | * first, so that VOP_CMAP() will return the | |
358 | * appropriate mapping for the cluster code: | |
359 | */ | |
360 | rl_remove(start, end, &fp->ff_invalidranges); | |
361 | ||
362 | (void) cluster_write(vp, (struct uio *) 0, leof, | |
363 | invalid_range->rl_end + 1, invalid_range->rl_start, | |
364 | (off_t)0, devBlockSize, IO_HEADZEROFILL | IO_NOZERODIRTY); | |
365 | ||
366 | if (ISSET(vp->v_flag, VHASDIRTY)) | |
367 | (void) cluster_push(vp); | |
368 | ||
369 | cp->c_flag |= C_MODIFIED; | |
370 | } | |
371 | cp->c_flag &= ~C_ZFWANTSYNC; | |
372 | cp->c_zftimeout = 0; | |
373 | blocksize = VTOVCB(vp)->blockSize; | |
374 | blks = leof / blocksize; | |
375 | if (((off_t)blks * (off_t)blocksize) != leof) | |
376 | blks++; | |
377 | /* | |
378 | * Shrink the peof to the smallest size neccessary to contain the leof. | |
379 | */ | |
380 | if (blks < fp->ff_blocks) | |
381 | (void) VOP_TRUNCATE(vp, leof, IO_NDELAY, ap->a_cred, p); | |
382 | (void) cluster_push(vp); | |
383 | ||
384 | if (!was_nocache) | |
385 | CLR(vp->v_flag, VNOCACHE_DATA); | |
386 | ||
387 | /* | |
388 | * If the VOP_TRUNCATE didn't happen to flush the vnode's | |
389 | * information out to disk, force it to be updated now that | |
390 | * all invalid ranges have been zero-filled and validated: | |
391 | */ | |
392 | if (cp->c_flag & C_MODIFIED) { | |
393 | tv = time; | |
394 | VOP_UPDATE(vp, &tv, &tv, 0); | |
395 | } | |
396 | VOP_UNLOCK(vp, 0, p); | |
397 | } | |
398 | if ((vp->v_flag & VSYSTEM) && (vp->v_usecount == 1)) | |
399 | vgone(vp); | |
400 | return (0); | |
401 | } | |
402 | ||
403 | /* | |
404 | #% access vp L L L | |
405 | # | |
406 | vop_access { | |
407 | IN struct vnode *vp; | |
408 | IN int mode; | |
409 | IN struct ucred *cred; | |
410 | IN struct proc *p; | |
411 | ||
412 | */ | |
413 | ||
414 | static int | |
415 | hfs_access(ap) | |
416 | struct vop_access_args /* { | |
417 | struct vnode *a_vp; | |
418 | int a_mode; | |
419 | struct ucred *a_cred; | |
420 | struct proc *a_p; | |
421 | } */ *ap; | |
422 | { | |
423 | struct vnode *vp = ap->a_vp; | |
424 | struct cnode *cp = VTOC(vp); | |
425 | struct ucred *cred = ap->a_cred; | |
426 | register gid_t *gp; | |
427 | mode_t mode = ap->a_mode; | |
428 | mode_t mask = 0; | |
429 | int i; | |
430 | int error; | |
431 | ||
432 | /* | |
433 | * Disallow write attempts on read-only file systems; | |
434 | * unless the file is a socket, fifo, or a block or | |
435 | * character device resident on the file system. | |
436 | */ | |
437 | if (mode & VWRITE) { | |
438 | switch (vp->v_type) { | |
439 | case VDIR: | |
440 | case VLNK: | |
441 | case VREG: | |
442 | if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) | |
443 | return (EROFS); | |
444 | #if QUOTA | |
445 | if ((error = hfs_getinoquota(cp))) | |
446 | return (error); | |
447 | #endif /* QUOTA */ | |
448 | break; | |
449 | } | |
450 | /* If immutable bit set, nobody gets to write it. */ | |
451 | if (cp->c_flags & IMMUTABLE) | |
452 | return (EPERM); | |
453 | } | |
454 | ||
455 | ||
456 | /* Otherwise, user id 0 always gets access. */ | |
457 | if (cred->cr_uid == 0) | |
458 | return (0); | |
459 | ||
460 | mask = 0; | |
461 | ||
462 | /* Otherwise, check the owner. */ | |
463 | if ( (cp->c_uid == cred->cr_uid) || (cp->c_uid == UNKNOWNUID) ) { | |
464 | if (mode & VEXEC) | |
465 | mask |= S_IXUSR; | |
466 | if (mode & VREAD) | |
467 | mask |= S_IRUSR; | |
468 | if (mode & VWRITE) | |
469 | mask |= S_IWUSR; | |
470 | return ((cp->c_mode & mask) == mask ? 0 : EACCES); | |
471 | } | |
472 | ||
473 | /* Otherwise, check the groups. */ | |
474 | if (! (VTOVFS(vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS)) { | |
475 | for (i = 0, gp = cred->cr_groups; i < cred->cr_ngroups; i++, gp++) | |
476 | if (cp->c_gid == *gp) { | |
477 | if (mode & VEXEC) | |
478 | mask |= S_IXGRP; | |
479 | if (mode & VREAD) | |
480 | mask |= S_IRGRP; | |
481 | if (mode & VWRITE) | |
482 | mask |= S_IWGRP; | |
483 | return ((cp->c_mode & mask) == mask ? 0 : EACCES); | |
484 | } | |
485 | } | |
486 | ||
487 | /* Otherwise, check everyone else. */ | |
488 | if (mode & VEXEC) | |
489 | mask |= S_IXOTH; | |
490 | if (mode & VREAD) | |
491 | mask |= S_IROTH; | |
492 | if (mode & VWRITE) | |
493 | mask |= S_IWOTH; | |
494 | return ((cp->c_mode & mask) == mask ? 0 : EACCES); | |
495 | } | |
496 | ||
497 | ||
498 | ||
499 | /* | |
500 | #% getattr vp = = = | |
501 | # | |
502 | vop_getattr { | |
503 | IN struct vnode *vp; | |
504 | IN struct vattr *vap; | |
505 | IN struct ucred *cred; | |
506 | IN struct proc *p; | |
507 | ||
508 | */ | |
509 | ||
510 | ||
511 | /* ARGSUSED */ | |
512 | static int | |
513 | hfs_getattr(ap) | |
514 | struct vop_getattr_args /* { | |
515 | struct vnode *a_vp; | |
516 | struct vattr *a_vap; | |
517 | struct ucred *a_cred; | |
518 | struct proc *a_p; | |
519 | } */ *ap; | |
520 | { | |
521 | struct vnode *vp = ap->a_vp; | |
522 | struct cnode *cp = VTOC(vp); | |
523 | struct vattr *vap = ap->a_vap; | |
524 | struct timeval tv; | |
525 | ||
526 | tv = time; | |
527 | CTIMES(cp, &tv, &tv); | |
528 | ||
529 | vap->va_type = vp->v_type; | |
530 | vap->va_mode = cp->c_mode; | |
531 | vap->va_nlink = cp->c_nlink; | |
532 | /* | |
533 | * [2856576] Since we are dynamically changing the owner, also | |
534 | * effectively turn off the set-user-id and set-group-id bits, | |
535 | * just like chmod(2) would when changing ownership. This prevents | |
536 | * a security hole where set-user-id programs run as whoever is | |
537 | * logged on (or root if nobody is logged in yet!) | |
538 | */ | |
539 | if (cp->c_uid == UNKNOWNUID) { | |
540 | vap->va_mode &= ~(S_ISUID | S_ISGID); | |
541 | vap->va_uid = ap->a_cred->cr_uid; | |
542 | } else { | |
543 | vap->va_uid = cp->c_uid; | |
544 | } | |
545 | vap->va_gid = cp->c_gid; | |
546 | vap->va_fsid = cp->c_dev; | |
547 | /* | |
548 | * Exporting file IDs from HFS Plus: | |
549 | * | |
550 | * For "normal" files the c_fileid is the same value as the | |
551 | * c_cnid. But for hard link files, they are different - the | |
552 | * c_cnid belongs to the active directory entry (ie the link) | |
553 | * and the c_fileid is for the actual inode (ie the data file). | |
554 | * | |
555 | * The stat call (getattr) will always return the c_fileid | |
556 | * and Carbon APIs, which are hardlink-ignorant, will always | |
557 | * receive the c_cnid (from getattrlist). | |
558 | */ | |
559 | vap->va_fileid = cp->c_fileid; | |
560 | vap->va_atime.tv_sec = cp->c_atime; | |
561 | vap->va_atime.tv_nsec = 0; | |
562 | vap->va_mtime.tv_sec = cp->c_mtime; | |
563 | vap->va_mtime.tv_nsec = cp->c_mtime_nsec; | |
564 | vap->va_ctime.tv_sec = cp->c_ctime; | |
565 | vap->va_ctime.tv_nsec = 0; | |
566 | vap->va_gen = 0; | |
567 | vap->va_flags = cp->c_flags; | |
568 | vap->va_rdev = 0; | |
569 | vap->va_blocksize = VTOVFS(vp)->mnt_stat.f_iosize; | |
570 | vap->va_filerev = 0; | |
571 | if (vp->v_type == VDIR) { | |
572 | vap->va_size = cp->c_nlink * AVERAGE_HFSDIRENTRY_SIZE; | |
573 | vap->va_bytes = 0; | |
574 | } else { | |
575 | vap->va_size = VTOF(vp)->ff_size; | |
576 | vap->va_bytes = (u_quad_t)cp->c_blocks * | |
577 | (u_quad_t)VTOVCB(vp)->blockSize; | |
578 | if (vp->v_type == VBLK || vp->v_type == VCHR) | |
579 | vap->va_rdev = cp->c_rdev; | |
580 | } | |
581 | return (0); | |
582 | } | |
583 | ||
584 | /* | |
585 | * Set attribute vnode op. called from several syscalls | |
586 | #% setattr vp L L L | |
587 | # | |
588 | vop_setattr { | |
589 | IN struct vnode *vp; | |
590 | IN struct vattr *vap; | |
591 | IN struct ucred *cred; | |
592 | IN struct proc *p; | |
593 | ||
594 | */ | |
595 | ||
596 | static int | |
597 | hfs_setattr(ap) | |
598 | struct vop_setattr_args /* { | |
599 | struct vnode *a_vp; | |
600 | struct vattr *a_vap; | |
601 | struct ucred *a_cred; | |
602 | struct proc *a_p; | |
603 | } */ *ap; | |
604 | { | |
605 | struct vattr *vap = ap->a_vap; | |
606 | struct vnode *vp = ap->a_vp; | |
607 | struct cnode *cp = VTOC(vp); | |
608 | struct ucred *cred = ap->a_cred; | |
609 | struct proc *p = ap->a_p; | |
610 | struct timeval atimeval, mtimeval; | |
611 | int error; | |
612 | ||
613 | /* | |
614 | * Check for unsettable attributes. | |
615 | */ | |
616 | if ((vap->va_type != VNON) || (vap->va_nlink != VNOVAL) || | |
617 | (vap->va_fsid != VNOVAL) || (vap->va_fileid != VNOVAL) || | |
618 | (vap->va_blocksize != VNOVAL) || (vap->va_rdev != VNOVAL) || | |
619 | ((int)vap->va_bytes != VNOVAL) || (vap->va_gen != VNOVAL)) { | |
620 | return (EINVAL); | |
621 | } | |
622 | ||
623 | // XXXdbg | |
624 | // don't allow people to set the attributes of symlinks | |
625 | // (nfs has a bad habit of doing ths and it can cause | |
626 | // problems for journaling). | |
627 | // | |
628 | if (vp->v_type == VLNK) { | |
629 | return 0; | |
630 | } | |
631 | ||
632 | ||
633 | ||
634 | if (vap->va_flags != VNOVAL) { | |
635 | if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) | |
636 | return (EROFS); | |
637 | if ((error = hfs_chflags(vp, vap->va_flags, cred, p))) | |
638 | return (error); | |
639 | if (vap->va_flags & (IMMUTABLE | APPEND)) | |
640 | return (0); | |
641 | } | |
642 | ||
643 | if (cp->c_flags & (IMMUTABLE | APPEND)) | |
644 | return (EPERM); | |
645 | ||
646 | // XXXdbg - don't allow modification of the journal or journal_info_block | |
647 | if (VTOHFS(vp)->jnl && cp->c_datafork) { | |
648 | struct HFSPlusExtentDescriptor *extd; | |
649 | ||
650 | extd = &cp->c_datafork->ff_extents[0]; | |
651 | if (extd->startBlock == VTOVCB(vp)->vcbJinfoBlock || extd->startBlock == VTOHFS(vp)->jnl_start) { | |
652 | return EPERM; | |
653 | } | |
654 | } | |
655 | ||
656 | /* | |
657 | * Go through the fields and update iff not VNOVAL. | |
658 | */ | |
659 | if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) { | |
660 | if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) | |
661 | return (EROFS); | |
662 | if ((error = hfs_chown(vp, vap->va_uid, vap->va_gid, cred, p))) | |
663 | return (error); | |
664 | } | |
665 | if (vap->va_size != VNOVAL) { | |
666 | /* | |
667 | * Disallow write attempts on read-only file systems; | |
668 | * unless the file is a socket, fifo, or a block or | |
669 | * character device resident on the file system. | |
670 | */ | |
671 | switch (vp->v_type) { | |
672 | case VDIR: | |
673 | return (EISDIR); | |
674 | case VLNK: | |
675 | case VREG: | |
676 | if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) | |
677 | return (EROFS); | |
678 | break; | |
679 | default: | |
680 | break; | |
681 | } | |
682 | if ((error = VOP_TRUNCATE(vp, vap->va_size, 0, cred, p))) | |
683 | return (error); | |
684 | } | |
685 | cp = VTOC(vp); | |
686 | if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) { | |
687 | if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) | |
688 | return (EROFS); | |
689 | if (((error = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, true)) != 0) && | |
690 | ((vap->va_vaflags & VA_UTIMES_NULL) == 0 || | |
691 | (error = VOP_ACCESS(vp, VWRITE, cred, p)))) { | |
692 | return (error); | |
693 | } | |
694 | if (vap->va_atime.tv_sec != VNOVAL) | |
695 | cp->c_flag |= C_ACCESS; | |
696 | if (vap->va_mtime.tv_sec != VNOVAL) { | |
697 | cp->c_flag |= C_CHANGE | C_UPDATE; | |
698 | /* | |
699 | * The utimes system call can reset the modification | |
700 | * time but it doesn't know about HFS create times. | |
701 | * So we need to insure that the creation time is | |
702 | * always at least as old as the modification time. | |
703 | */ | |
704 | if ((VTOVCB(vp)->vcbSigWord == kHFSPlusSigWord) && | |
705 | (cp->c_cnid != kRootDirID) && | |
706 | (vap->va_mtime.tv_sec < cp->c_itime)) { | |
707 | cp->c_itime = vap->va_mtime.tv_sec; | |
708 | } | |
709 | } | |
710 | atimeval.tv_sec = vap->va_atime.tv_sec; | |
711 | atimeval.tv_usec = 0; | |
712 | mtimeval.tv_sec = vap->va_mtime.tv_sec; | |
713 | mtimeval.tv_usec = 0; | |
714 | if ((error = VOP_UPDATE(vp, &atimeval, &mtimeval, 1))) | |
715 | return (error); | |
716 | } | |
717 | error = 0; | |
718 | if (vap->va_mode != (mode_t)VNOVAL) { | |
719 | if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) | |
720 | return (EROFS); | |
721 | error = hfs_chmod(vp, (int)vap->va_mode, cred, p); | |
722 | } | |
723 | HFS_KNOTE(vp, NOTE_ATTRIB); | |
724 | return (error); | |
725 | } | |
726 | ||
727 | ||
728 | /* | |
729 | * Change the mode on a file. | |
730 | * cnode must be locked before calling. | |
731 | */ | |
732 | __private_extern__ | |
733 | int | |
734 | hfs_chmod(vp, mode, cred, p) | |
735 | register struct vnode *vp; | |
736 | register int mode; | |
737 | register struct ucred *cred; | |
738 | struct proc *p; | |
739 | { | |
740 | register struct cnode *cp = VTOC(vp); | |
741 | int error; | |
742 | ||
743 | if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord) | |
744 | return (0); | |
745 | ||
746 | // XXXdbg - don't allow modification of the journal or journal_info_block | |
747 | if (VTOHFS(vp)->jnl && cp && cp->c_datafork) { | |
748 | struct HFSPlusExtentDescriptor *extd; | |
749 | ||
750 | extd = &cp->c_datafork->ff_extents[0]; | |
751 | if (extd->startBlock == VTOVCB(vp)->vcbJinfoBlock || extd->startBlock == VTOHFS(vp)->jnl_start) { | |
752 | return EPERM; | |
753 | } | |
754 | } | |
755 | ||
756 | #if OVERRIDE_UNKNOWN_PERMISSIONS | |
757 | if (VTOVFS(vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) { | |
758 | return (0); | |
759 | }; | |
760 | #endif | |
761 | if ((error = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, true)) != 0) | |
762 | return (error); | |
763 | if (cred->cr_uid) { | |
764 | if (vp->v_type != VDIR && (mode & S_ISTXT)) | |
765 | return (EFTYPE); | |
766 | if (!groupmember(cp->c_gid, cred) && (mode & S_ISGID)) | |
767 | return (EPERM); | |
768 | } | |
769 | cp->c_mode &= ~ALLPERMS; | |
770 | cp->c_mode |= (mode & ALLPERMS); | |
771 | cp->c_flag |= C_CHANGE; | |
772 | return (0); | |
773 | } | |
774 | ||
775 | ||
776 | __private_extern__ | |
777 | int | |
778 | hfs_write_access(struct vnode *vp, struct ucred *cred, struct proc *p, Boolean considerFlags) | |
779 | { | |
780 | struct cnode *cp = VTOC(vp); | |
781 | gid_t *gp; | |
782 | int retval = 0; | |
783 | int i; | |
784 | ||
785 | /* | |
786 | * Disallow write attempts on read-only file systems; | |
787 | * unless the file is a socket, fifo, or a block or | |
788 | * character device resident on the file system. | |
789 | */ | |
790 | switch (vp->v_type) { | |
791 | case VDIR: | |
792 | case VLNK: | |
793 | case VREG: | |
794 | if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) | |
795 | return (EROFS); | |
796 | break; | |
797 | default: | |
798 | break; | |
799 | } | |
800 | ||
801 | /* If immutable bit set, nobody gets to write it. */ | |
802 | if (considerFlags && (cp->c_flags & IMMUTABLE)) | |
803 | return (EPERM); | |
804 | ||
805 | /* Otherwise, user id 0 always gets access. */ | |
806 | if (cred->cr_uid == 0) | |
807 | return (0); | |
808 | ||
809 | /* Otherwise, check the owner. */ | |
810 | if ((retval = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, false)) == 0) | |
811 | return ((cp->c_mode & S_IWUSR) == S_IWUSR ? 0 : EACCES); | |
812 | ||
813 | /* Otherwise, check the groups. */ | |
814 | for (i = 0, gp = cred->cr_groups; i < cred->cr_ngroups; i++, gp++) { | |
815 | if (cp->c_gid == *gp) | |
816 | return ((cp->c_mode & S_IWGRP) == S_IWGRP ? 0 : EACCES); | |
817 | } | |
818 | ||
819 | /* Otherwise, check everyone else. */ | |
820 | return ((cp->c_mode & S_IWOTH) == S_IWOTH ? 0 : EACCES); | |
821 | } | |
822 | ||
823 | ||
824 | ||
825 | /* | |
826 | * Change the flags on a file or directory. | |
827 | * cnode must be locked before calling. | |
828 | */ | |
829 | __private_extern__ | |
830 | int | |
831 | hfs_chflags(vp, flags, cred, p) | |
832 | register struct vnode *vp; | |
833 | register u_long flags; | |
834 | register struct ucred *cred; | |
835 | struct proc *p; | |
836 | { | |
837 | register struct cnode *cp = VTOC(vp); | |
838 | int retval; | |
839 | ||
840 | if (VTOVCB(vp)->vcbSigWord == kHFSSigWord) { | |
841 | if ((retval = hfs_write_access(vp, cred, p, false)) != 0) { | |
842 | return retval; | |
843 | }; | |
844 | } else if ((retval = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, true)) != 0) { | |
845 | return retval; | |
846 | }; | |
847 | ||
848 | if (cred->cr_uid == 0) { | |
849 | if ((cp->c_flags & (SF_IMMUTABLE | SF_APPEND)) && | |
850 | securelevel > 0) { | |
851 | return EPERM; | |
852 | }; | |
853 | cp->c_flags = flags; | |
854 | } else { | |
855 | if (cp->c_flags & (SF_IMMUTABLE | SF_APPEND) || | |
856 | (flags & UF_SETTABLE) != flags) { | |
857 | return EPERM; | |
858 | }; | |
859 | cp->c_flags &= SF_SETTABLE; | |
860 | cp->c_flags |= (flags & UF_SETTABLE); | |
861 | } | |
862 | cp->c_flag |= C_CHANGE; | |
863 | ||
864 | return (0); | |
865 | } | |
866 | ||
867 | ||
868 | /* | |
869 | * Perform chown operation on cnode cp; | |
870 | * code must be locked prior to call. | |
871 | */ | |
872 | __private_extern__ | |
873 | int | |
874 | hfs_chown(vp, uid, gid, cred, p) | |
875 | register struct vnode *vp; | |
876 | uid_t uid; | |
877 | gid_t gid; | |
878 | struct ucred *cred; | |
879 | struct proc *p; | |
880 | { | |
881 | register struct cnode *cp = VTOC(vp); | |
882 | uid_t ouid; | |
883 | gid_t ogid; | |
884 | int error = 0; | |
885 | #if QUOTA | |
886 | register int i; | |
887 | int64_t change; | |
888 | #endif /* QUOTA */ | |
889 | ||
890 | if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord) | |
891 | return (EOPNOTSUPP); | |
892 | ||
893 | if (VTOVFS(vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) | |
894 | return (0); | |
895 | ||
896 | if (uid == (uid_t)VNOVAL) | |
897 | uid = cp->c_uid; | |
898 | if (gid == (gid_t)VNOVAL) | |
899 | gid = cp->c_gid; | |
900 | /* | |
901 | * If we don't own the file, are trying to change the owner | |
902 | * of the file, or are not a member of the target group, | |
903 | * the caller must be superuser or the call fails. | |
904 | */ | |
905 | if ((cred->cr_uid != cp->c_uid || uid != cp->c_uid || | |
906 | (gid != cp->c_gid && !groupmember((gid_t)gid, cred))) && | |
907 | (error = suser(cred, &p->p_acflag))) | |
908 | return (error); | |
909 | ||
910 | ogid = cp->c_gid; | |
911 | ouid = cp->c_uid; | |
912 | #if QUOTA | |
913 | if ((error = hfs_getinoquota(cp))) | |
914 | return (error); | |
915 | if (ouid == uid) { | |
916 | dqrele(vp, cp->c_dquot[USRQUOTA]); | |
917 | cp->c_dquot[USRQUOTA] = NODQUOT; | |
918 | } | |
919 | if (ogid == gid) { | |
920 | dqrele(vp, cp->c_dquot[GRPQUOTA]); | |
921 | cp->c_dquot[GRPQUOTA] = NODQUOT; | |
922 | } | |
923 | ||
924 | /* | |
925 | * Eventually need to account for (fake) a block per directory | |
926 | *if (vp->v_type == VDIR) | |
927 | *change = VTOVCB(vp)->blockSize; | |
928 | *else | |
929 | */ | |
930 | ||
931 | change = (int64_t)(cp->c_blocks) * (int64_t)VTOVCB(vp)->blockSize; | |
932 | (void) hfs_chkdq(cp, -change, cred, CHOWN); | |
933 | (void) hfs_chkiq(cp, -1, cred, CHOWN); | |
934 | for (i = 0; i < MAXQUOTAS; i++) { | |
935 | dqrele(vp, cp->c_dquot[i]); | |
936 | cp->c_dquot[i] = NODQUOT; | |
937 | } | |
938 | #endif /* QUOTA */ | |
939 | cp->c_gid = gid; | |
940 | cp->c_uid = uid; | |
941 | #if QUOTA | |
942 | if ((error = hfs_getinoquota(cp)) == 0) { | |
943 | if (ouid == uid) { | |
944 | dqrele(vp, cp->c_dquot[USRQUOTA]); | |
945 | cp->c_dquot[USRQUOTA] = NODQUOT; | |
946 | } | |
947 | if (ogid == gid) { | |
948 | dqrele(vp, cp->c_dquot[GRPQUOTA]); | |
949 | cp->c_dquot[GRPQUOTA] = NODQUOT; | |
950 | } | |
951 | if ((error = hfs_chkdq(cp, change, cred, CHOWN)) == 0) { | |
952 | if ((error = hfs_chkiq(cp, 1, cred, CHOWN)) == 0) | |
953 | goto good; | |
954 | else | |
955 | (void) hfs_chkdq(cp, -change, cred, CHOWN|FORCE); | |
956 | } | |
957 | for (i = 0; i < MAXQUOTAS; i++) { | |
958 | dqrele(vp, cp->c_dquot[i]); | |
959 | cp->c_dquot[i] = NODQUOT; | |
960 | } | |
961 | } | |
962 | cp->c_gid = ogid; | |
963 | cp->c_uid = ouid; | |
964 | if (hfs_getinoquota(cp) == 0) { | |
965 | if (ouid == uid) { | |
966 | dqrele(vp, cp->c_dquot[USRQUOTA]); | |
967 | cp->c_dquot[USRQUOTA] = NODQUOT; | |
968 | } | |
969 | if (ogid == gid) { | |
970 | dqrele(vp, cp->c_dquot[GRPQUOTA]); | |
971 | cp->c_dquot[GRPQUOTA] = NODQUOT; | |
972 | } | |
973 | (void) hfs_chkdq(cp, change, cred, FORCE|CHOWN); | |
974 | (void) hfs_chkiq(cp, 1, cred, FORCE|CHOWN); | |
975 | (void) hfs_getinoquota(cp); | |
976 | } | |
977 | return (error); | |
978 | good: | |
979 | if (hfs_getinoquota(cp)) | |
980 | panic("hfs_chown: lost quota"); | |
981 | #endif /* QUOTA */ | |
982 | ||
983 | if (ouid != uid || ogid != gid) | |
984 | cp->c_flag |= C_CHANGE; | |
985 | if (ouid != uid && cred->cr_uid != 0) | |
986 | cp->c_mode &= ~S_ISUID; | |
987 | if (ogid != gid && cred->cr_uid != 0) | |
988 | cp->c_mode &= ~S_ISGID; | |
989 | return (0); | |
990 | } | |
991 | ||
992 | ||
993 | /* | |
994 | # | |
995 | #% exchange fvp L L L | |
996 | #% exchange tvp L L L | |
997 | # | |
998 | */ | |
999 | /* | |
1000 | * The hfs_exchange routine swaps the fork data in two files by | |
1001 | * exchanging some of the information in the cnode. It is used | |
1002 | * to preserve the file ID when updating an existing file, in | |
1003 | * case the file is being tracked through its file ID. Typically | |
1004 | * its used after creating a new file during a safe-save. | |
1005 | */ | |
1006 | ||
1007 | static int | |
1008 | hfs_exchange(ap) | |
1009 | struct vop_exchange_args /* { | |
1010 | struct vnode *a_fvp; | |
1011 | struct vnode *a_tvp; | |
1012 | struct ucred *a_cred; | |
1013 | struct proc *a_p; | |
1014 | } */ *ap; | |
1015 | { | |
1016 | struct vnode *from_vp = ap->a_fvp; | |
1017 | struct vnode *to_vp = ap->a_tvp; | |
1018 | struct cnode *from_cp = VTOC(from_vp); | |
1019 | struct cnode *to_cp = VTOC(to_vp); | |
1020 | struct hfsmount *hfsmp = VTOHFS(from_vp); | |
1021 | struct cat_desc tempdesc; | |
1022 | struct cat_attr tempattr; | |
1023 | int error = 0, started_tr = 0, grabbed_lock = 0; | |
1024 | cat_cookie_t cookie = {0}; | |
1025 | ||
1026 | /* The files must be on the same volume. */ | |
1027 | if (from_vp->v_mount != to_vp->v_mount) | |
1028 | return (EXDEV); | |
1029 | ||
1030 | /* Only normal files can be exchanged. */ | |
1031 | if ((from_vp->v_type != VREG) || (to_vp->v_type != VREG) || | |
1032 | (from_cp->c_flag & C_HARDLINK) || (to_cp->c_flag & C_HARDLINK) || | |
1033 | VNODE_IS_RSRC(from_vp) || VNODE_IS_RSRC(to_vp)) | |
1034 | return (EINVAL); | |
1035 | ||
1036 | // XXXdbg - don't allow modification of the journal or journal_info_block | |
1037 | if (hfsmp->jnl) { | |
1038 | struct HFSPlusExtentDescriptor *extd; | |
1039 | ||
1040 | if (from_cp->c_datafork) { | |
1041 | extd = &from_cp->c_datafork->ff_extents[0]; | |
1042 | if (extd->startBlock == VTOVCB(from_vp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) { | |
1043 | return EPERM; | |
1044 | } | |
1045 | } | |
1046 | ||
1047 | if (to_cp->c_datafork) { | |
1048 | extd = &to_cp->c_datafork->ff_extents[0]; | |
1049 | if (extd->startBlock == VTOVCB(to_vp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) { | |
1050 | return EPERM; | |
1051 | } | |
1052 | } | |
1053 | } | |
1054 | ||
1055 | // XXXdbg | |
1056 | hfs_global_shared_lock_acquire(hfsmp); | |
1057 | grabbed_lock = 1; | |
1058 | if (hfsmp->jnl) { | |
1059 | if ((error = journal_start_transaction(hfsmp->jnl)) != 0) { | |
1060 | goto Err_Exit; | |
1061 | } | |
1062 | started_tr = 1; | |
1063 | } | |
1064 | ||
1065 | /* | |
1066 | * Reserve some space in the Catalog file. | |
1067 | */ | |
1068 | if ((error = cat_preflight(hfsmp, CAT_EXCHANGE, &cookie, ap->a_p))) { | |
1069 | goto Err_Exit; | |
1070 | } | |
1071 | ||
1072 | /* Lock catalog b-tree */ | |
1073 | error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, ap->a_p); | |
1074 | if (error) goto Err_Exit; | |
1075 | ||
1076 | /* The backend code always tries to delete the virtual | |
1077 | * extent id for exchanging files so we neeed to lock | |
1078 | * the extents b-tree. | |
1079 | */ | |
1080 | error = hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_EXCLUSIVE, ap->a_p); | |
1081 | if (error) { | |
1082 | (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, ap->a_p); | |
1083 | goto Err_Exit; | |
1084 | } | |
1085 | ||
1086 | /* Do the exchange */ | |
1087 | error = MacToVFSError(ExchangeFileIDs(HFSTOVCB(hfsmp), | |
1088 | from_cp->c_desc.cd_nameptr, to_cp->c_desc.cd_nameptr, | |
1089 | from_cp->c_parentcnid, to_cp->c_parentcnid, | |
1090 | from_cp->c_hint, to_cp->c_hint)); | |
1091 | ||
1092 | (void) hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_RELEASE, ap->a_p); | |
1093 | (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, ap->a_p); | |
1094 | ||
1095 | if (error != E_NONE) { | |
1096 | goto Err_Exit; | |
1097 | } | |
1098 | ||
1099 | /* Purge the vnodes from the name cache */ | |
1100 | if (from_vp) | |
1101 | cache_purge(from_vp); | |
1102 | if (to_vp) | |
1103 | cache_purge(to_vp); | |
1104 | ||
1105 | /* Save a copy of from attributes before swapping. */ | |
1106 | bcopy(&from_cp->c_desc, &tempdesc, sizeof(struct cat_desc)); | |
1107 | bcopy(&from_cp->c_attr, &tempattr, sizeof(struct cat_attr)); | |
1108 | ||
1109 | /* | |
1110 | * Swap the descriptors and all non-fork related attributes. | |
1111 | * (except the modify date) | |
1112 | */ | |
1113 | bcopy(&to_cp->c_desc, &from_cp->c_desc, sizeof(struct cat_desc)); | |
1114 | ||
1115 | from_cp->c_hint = 0; | |
1116 | from_cp->c_fileid = from_cp->c_cnid; | |
1117 | from_cp->c_itime = to_cp->c_itime; | |
1118 | from_cp->c_btime = to_cp->c_btime; | |
1119 | from_cp->c_atime = to_cp->c_atime; | |
1120 | from_cp->c_ctime = to_cp->c_ctime; | |
1121 | from_cp->c_gid = to_cp->c_gid; | |
1122 | from_cp->c_uid = to_cp->c_uid; | |
1123 | from_cp->c_flags = to_cp->c_flags; | |
1124 | from_cp->c_mode = to_cp->c_mode; | |
1125 | bcopy(to_cp->c_finderinfo, from_cp->c_finderinfo, 32); | |
1126 | ||
1127 | bcopy(&tempdesc, &to_cp->c_desc, sizeof(struct cat_desc)); | |
1128 | to_cp->c_hint = 0; | |
1129 | to_cp->c_fileid = to_cp->c_cnid; | |
1130 | to_cp->c_itime = tempattr.ca_itime; | |
1131 | to_cp->c_btime = tempattr.ca_btime; | |
1132 | to_cp->c_atime = tempattr.ca_atime; | |
1133 | to_cp->c_ctime = tempattr.ca_ctime; | |
1134 | to_cp->c_gid = tempattr.ca_gid; | |
1135 | to_cp->c_uid = tempattr.ca_uid; | |
1136 | to_cp->c_flags = tempattr.ca_flags; | |
1137 | to_cp->c_mode = tempattr.ca_mode; | |
1138 | bcopy(tempattr.ca_finderinfo, to_cp->c_finderinfo, 32); | |
1139 | ||
1140 | /* Reinsert into the cnode hash under new file IDs*/ | |
1141 | hfs_chashremove(from_cp); | |
1142 | hfs_chashremove(to_cp); | |
1143 | ||
1144 | hfs_chashinsert(from_cp); | |
1145 | hfs_chashinsert(to_cp); | |
1146 | ||
1147 | /* | |
1148 | * When a file moves out of "Cleanup At Startup" | |
1149 | * we can drop its NODUMP status. | |
1150 | */ | |
1151 | if ((from_cp->c_flags & UF_NODUMP) && | |
1152 | (from_cp->c_parentcnid != to_cp->c_parentcnid)) { | |
1153 | from_cp->c_flags &= ~UF_NODUMP; | |
1154 | from_cp->c_flag |= C_CHANGE; | |
1155 | } | |
1156 | if ((to_cp->c_flags & UF_NODUMP) && | |
1157 | (to_cp->c_parentcnid != from_cp->c_parentcnid)) { | |
1158 | to_cp->c_flags &= ~UF_NODUMP; | |
1159 | to_cp->c_flag |= C_CHANGE; | |
1160 | } | |
1161 | ||
1162 | HFS_KNOTE(from_vp, NOTE_ATTRIB); | |
1163 | HFS_KNOTE(to_vp, NOTE_ATTRIB); | |
1164 | ||
1165 | Err_Exit: | |
1166 | cat_postflight(hfsmp, &cookie, ap->a_p); | |
1167 | ||
1168 | // XXXdbg | |
1169 | if (started_tr) { | |
1170 | journal_end_transaction(hfsmp->jnl); | |
1171 | } | |
1172 | if (grabbed_lock) { | |
1173 | hfs_global_shared_lock_release(hfsmp); | |
1174 | } | |
1175 | ||
1176 | return (error); | |
1177 | } | |
1178 | ||
1179 | ||
1180 | /* | |
1181 | ||
1182 | #% fsync vp L L L | |
1183 | # | |
1184 | vop_fsync { | |
1185 | IN struct vnode *vp; | |
1186 | IN struct ucred *cred; | |
1187 | IN int waitfor; | |
1188 | IN struct proc *p; | |
1189 | ||
1190 | */ | |
1191 | static int | |
1192 | hfs_fsync(ap) | |
1193 | struct vop_fsync_args /* { | |
1194 | struct vnode *a_vp; | |
1195 | struct ucred *a_cred; | |
1196 | int a_waitfor; | |
1197 | struct proc *a_p; | |
1198 | } */ *ap; | |
1199 | { | |
1200 | struct vnode *vp = ap->a_vp; | |
1201 | struct cnode *cp = VTOC(vp); | |
1202 | struct filefork *fp = NULL; | |
1203 | int retval = 0; | |
1204 | register struct buf *bp; | |
1205 | struct timeval tv; | |
1206 | struct buf *nbp; | |
1207 | struct hfsmount *hfsmp = VTOHFS(ap->a_vp); | |
1208 | int s; | |
1209 | int wait; | |
1210 | int retry = 0; | |
1211 | ||
1212 | wait = (ap->a_waitfor == MNT_WAIT); | |
1213 | ||
1214 | /* HFS directories don't have any data blocks. */ | |
1215 | if (vp->v_type == VDIR) | |
1216 | goto metasync; | |
1217 | ||
1218 | /* | |
1219 | * For system files flush the B-tree header and | |
1220 | * for regular files write out any clusters | |
1221 | */ | |
1222 | if (vp->v_flag & VSYSTEM) { | |
1223 | if (VTOF(vp)->fcbBTCBPtr != NULL) { | |
1224 | // XXXdbg | |
1225 | if (hfsmp->jnl == NULL) { | |
1226 | BTFlushPath(VTOF(vp)); | |
1227 | } | |
1228 | } | |
1229 | } else if (UBCINFOEXISTS(vp)) | |
1230 | (void) cluster_push(vp); | |
1231 | ||
1232 | /* | |
1233 | * When MNT_WAIT is requested and the zero fill timeout | |
1234 | * has expired then we must explicitly zero out any areas | |
1235 | * that are currently marked invalid (holes). | |
1236 | * | |
1237 | * Files with NODUMP can bypass zero filling here. | |
1238 | */ | |
1239 | if ((wait || (cp->c_flag & C_ZFWANTSYNC)) && | |
1240 | ((cp->c_flags & UF_NODUMP) == 0) && | |
1241 | UBCINFOEXISTS(vp) && (fp = VTOF(vp)) && | |
1242 | cp->c_zftimeout != 0) { | |
1243 | int devblksize; | |
1244 | int was_nocache; | |
1245 | ||
1246 | if (time.tv_sec < cp->c_zftimeout) { | |
1247 | /* Remember that a force sync was requested. */ | |
1248 | cp->c_flag |= C_ZFWANTSYNC; | |
1249 | goto loop; | |
1250 | } | |
1251 | VOP_DEVBLOCKSIZE(cp->c_devvp, &devblksize); | |
1252 | was_nocache = ISSET(vp->v_flag, VNOCACHE_DATA); | |
1253 | SET(vp->v_flag, VNOCACHE_DATA); /* Don't cache zeros */ | |
1254 | ||
1255 | while (!CIRCLEQ_EMPTY(&fp->ff_invalidranges)) { | |
1256 | struct rl_entry *invalid_range = CIRCLEQ_FIRST(&fp->ff_invalidranges); | |
1257 | off_t start = invalid_range->rl_start; | |
1258 | off_t end = invalid_range->rl_end; | |
1259 | ||
1260 | /* The range about to be written must be validated | |
1261 | * first, so that VOP_CMAP() will return the | |
1262 | * appropriate mapping for the cluster code: | |
1263 | */ | |
1264 | rl_remove(start, end, &fp->ff_invalidranges); | |
1265 | ||
1266 | (void) cluster_write(vp, (struct uio *) 0, | |
1267 | fp->ff_size, | |
1268 | invalid_range->rl_end + 1, | |
1269 | invalid_range->rl_start, | |
1270 | (off_t)0, devblksize, | |
1271 | IO_HEADZEROFILL | IO_NOZERODIRTY); | |
1272 | cp->c_flag |= C_MODIFIED; | |
1273 | } | |
1274 | (void) cluster_push(vp); | |
1275 | if (!was_nocache) | |
1276 | CLR(vp->v_flag, VNOCACHE_DATA); | |
1277 | cp->c_flag &= ~C_ZFWANTSYNC; | |
1278 | cp->c_zftimeout = 0; | |
1279 | } | |
1280 | ||
1281 | /* | |
1282 | * Flush all dirty buffers associated with a vnode. | |
1283 | */ | |
1284 | loop: | |
1285 | s = splbio(); | |
1286 | for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) { | |
1287 | nbp = bp->b_vnbufs.le_next; | |
1288 | if ((bp->b_flags & B_BUSY)) | |
1289 | continue; | |
1290 | if ((bp->b_flags & B_DELWRI) == 0) | |
1291 | panic("hfs_fsync: bp 0x% not dirty (hfsmp 0x%x)", bp, hfsmp); | |
1292 | // XXXdbg | |
1293 | if (hfsmp->jnl && (bp->b_flags & B_LOCKED)) { | |
1294 | if ((bp->b_flags & B_META) == 0) { | |
1295 | panic("hfs: bp @ 0x%x is locked but not meta! jnl 0x%x\n", | |
1296 | bp, hfsmp->jnl); | |
1297 | } | |
1298 | // if journal_active() returns >= 0 then the journal is ok and we | |
1299 | // shouldn't do anything to this locked block (because it is part | |
1300 | // of a transaction). otherwise we'll just go through the normal | |
1301 | // code path and flush the buffer. | |
1302 | if (journal_active(hfsmp->jnl) >= 0) { | |
1303 | continue; | |
1304 | } | |
1305 | } | |
1306 | ||
1307 | bremfree(bp); | |
1308 | bp->b_flags |= B_BUSY; | |
1309 | /* Clear B_LOCKED, should only be set on meta files */ | |
1310 | bp->b_flags &= ~B_LOCKED; | |
1311 | ||
1312 | splx(s); | |
1313 | /* | |
1314 | * Wait for I/O associated with indirect blocks to complete, | |
1315 | * since there is no way to quickly wait for them below. | |
1316 | */ | |
1317 | if (bp->b_vp == vp || ap->a_waitfor == MNT_NOWAIT) | |
1318 | (void) bawrite(bp); | |
1319 | else | |
1320 | (void) VOP_BWRITE(bp); | |
1321 | goto loop; | |
1322 | } | |
1323 | ||
1324 | if (wait) { | |
1325 | while (vp->v_numoutput) { | |
1326 | vp->v_flag |= VBWAIT; | |
1327 | tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "hfs_fsync", 0); | |
1328 | } | |
1329 | ||
1330 | // XXXdbg -- is checking for hfsmp->jnl == NULL the right | |
1331 | // thing to do? | |
1332 | if (hfsmp->jnl == NULL && vp->v_dirtyblkhd.lh_first) { | |
1333 | /* still have some dirty buffers */ | |
1334 | if (retry++ > 10) { | |
1335 | vprint("hfs_fsync: dirty", vp); | |
1336 | splx(s); | |
1337 | /* | |
1338 | * Looks like the requests are not | |
1339 | * getting queued to the driver. | |
1340 | * Retrying here causes a cpu bound loop. | |
1341 | * Yield to the other threads and hope | |
1342 | * for the best. | |
1343 | */ | |
1344 | (void)tsleep((caddr_t)&vp->v_numoutput, | |
1345 | PRIBIO + 1, "hfs_fsync", hz/10); | |
1346 | retry = 0; | |
1347 | } else { | |
1348 | splx(s); | |
1349 | } | |
1350 | /* try again */ | |
1351 | goto loop; | |
1352 | } | |
1353 | } | |
1354 | splx(s); | |
1355 | ||
1356 | metasync: | |
1357 | tv = time; | |
1358 | if (vp->v_flag & VSYSTEM) { | |
1359 | if (VTOF(vp)->fcbBTCBPtr != NULL) | |
1360 | BTSetLastSync(VTOF(vp), tv.tv_sec); | |
1361 | cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE); | |
1362 | } else /* User file */ { | |
1363 | retval = VOP_UPDATE(ap->a_vp, &tv, &tv, wait); | |
1364 | ||
1365 | /* When MNT_WAIT is requested push out any delayed meta data */ | |
1366 | if ((retval == 0) && wait && cp->c_hint && | |
1367 | !ISSET(cp->c_flag, C_DELETED | C_NOEXISTS)) { | |
1368 | hfs_metasync(VTOHFS(vp), cp->c_hint, ap->a_p); | |
1369 | } | |
1370 | ||
1371 | // make sure that we've really been called from the user | |
1372 | // fsync() and if so push out any pending transactions | |
1373 | // that this file might is a part of (and get them on | |
1374 | // stable storage). | |
1375 | if (vp->v_flag & VFULLFSYNC) { | |
1376 | if (hfsmp->jnl) { | |
1377 | journal_flush(hfsmp->jnl); | |
1378 | } else { | |
1379 | VOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, NOCRED, ap->a_p); | |
1380 | } | |
1381 | } | |
1382 | } | |
1383 | ||
1384 | return (retval); | |
1385 | } | |
1386 | ||
1387 | /* Sync an hfs catalog b-tree node */ | |
1388 | static int | |
1389 | hfs_metasync(struct hfsmount *hfsmp, daddr_t node, struct proc *p) | |
1390 | { | |
1391 | struct vnode *vp; | |
1392 | struct buf *bp; | |
1393 | struct buf *nbp; | |
1394 | int s; | |
1395 | ||
1396 | vp = HFSTOVCB(hfsmp)->catalogRefNum; | |
1397 | ||
1398 | // XXXdbg - don't need to do this on a journaled volume | |
1399 | if (hfsmp->jnl) { | |
1400 | return 0; | |
1401 | } | |
1402 | ||
1403 | if (hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p) != 0) | |
1404 | return (0); | |
1405 | ||
1406 | /* | |
1407 | * Look for a matching node that has been delayed | |
1408 | * but is not part of a set (B_LOCKED). | |
1409 | */ | |
1410 | s = splbio(); | |
1411 | for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) { | |
1412 | nbp = bp->b_vnbufs.le_next; | |
1413 | if (bp->b_flags & B_BUSY) | |
1414 | continue; | |
1415 | if (bp->b_lblkno == node) { | |
1416 | if (bp->b_flags & B_LOCKED) | |
1417 | break; | |
1418 | ||
1419 | bremfree(bp); | |
1420 | bp->b_flags |= B_BUSY; | |
1421 | splx(s); | |
1422 | (void) VOP_BWRITE(bp); | |
1423 | goto exit; | |
1424 | } | |
1425 | } | |
1426 | splx(s); | |
1427 | exit: | |
1428 | (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); | |
1429 | ||
1430 | return (0); | |
1431 | } | |
1432 | ||
1433 | __private_extern__ | |
1434 | int | |
1435 | hfs_btsync(struct vnode *vp, int sync_transaction) | |
1436 | { | |
1437 | struct cnode *cp = VTOC(vp); | |
1438 | register struct buf *bp; | |
1439 | struct timeval tv; | |
1440 | struct buf *nbp; | |
1441 | struct hfsmount *hfsmp = VTOHFS(vp); | |
1442 | int s; | |
1443 | ||
1444 | /* | |
1445 | * Flush all dirty buffers associated with b-tree. | |
1446 | */ | |
1447 | loop: | |
1448 | s = splbio(); | |
1449 | ||
1450 | for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) { | |
1451 | nbp = bp->b_vnbufs.le_next; | |
1452 | if ((bp->b_flags & B_BUSY)) | |
1453 | continue; | |
1454 | if ((bp->b_flags & B_DELWRI) == 0) | |
1455 | panic("hfs_btsync: not dirty (bp 0x%x hfsmp 0x%x)", bp, hfsmp); | |
1456 | ||
1457 | // XXXdbg | |
1458 | if (hfsmp->jnl && (bp->b_flags & B_LOCKED)) { | |
1459 | if ((bp->b_flags & B_META) == 0) { | |
1460 | panic("hfs: bp @ 0x%x is locked but not meta! jnl 0x%x\n", | |
1461 | bp, hfsmp->jnl); | |
1462 | } | |
1463 | // if journal_active() returns >= 0 then the journal is ok and we | |
1464 | // shouldn't do anything to this locked block (because it is part | |
1465 | // of a transaction). otherwise we'll just go through the normal | |
1466 | // code path and flush the buffer. | |
1467 | if (journal_active(hfsmp->jnl) >= 0) { | |
1468 | continue; | |
1469 | } | |
1470 | } | |
1471 | ||
1472 | if (sync_transaction && !(bp->b_flags & B_LOCKED)) | |
1473 | continue; | |
1474 | ||
1475 | bremfree(bp); | |
1476 | bp->b_flags |= B_BUSY; | |
1477 | bp->b_flags &= ~B_LOCKED; | |
1478 | ||
1479 | splx(s); | |
1480 | ||
1481 | (void) bawrite(bp); | |
1482 | ||
1483 | goto loop; | |
1484 | } | |
1485 | splx(s); | |
1486 | ||
1487 | tv = time; | |
1488 | if ((vp->v_flag & VSYSTEM) && (VTOF(vp)->fcbBTCBPtr != NULL)) | |
1489 | (void) BTSetLastSync(VTOF(vp), tv.tv_sec); | |
1490 | cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE); | |
1491 | ||
1492 | return 0; | |
1493 | } | |
1494 | ||
1495 | /* | |
1496 | * Rmdir system call. | |
1497 | #% rmdir dvp L U U | |
1498 | #% rmdir vp L U U | |
1499 | # | |
1500 | vop_rmdir { | |
1501 | IN WILLRELE struct vnode *dvp; | |
1502 | IN WILLRELE struct vnode *vp; | |
1503 | IN struct componentname *cnp; | |
1504 | ||
1505 | */ | |
1506 | static int | |
1507 | hfs_rmdir(ap) | |
1508 | struct vop_rmdir_args /* { | |
1509 | struct vnode *a_dvp; | |
1510 | struct vnode *a_vp; | |
1511 | struct componentname *a_cnp; | |
1512 | } */ *ap; | |
1513 | { | |
1514 | return (hfs_removedir(ap->a_dvp, ap->a_vp, ap->a_cnp, 0)); | |
1515 | } | |
1516 | ||
1517 | /* | |
1518 | * hfs_removedir | |
1519 | */ | |
1520 | static int | |
1521 | hfs_removedir(dvp, vp, cnp, options) | |
1522 | struct vnode *dvp; | |
1523 | struct vnode *vp; | |
1524 | struct componentname *cnp; | |
1525 | int options; | |
1526 | { | |
1527 | struct proc *p = cnp->cn_proc; | |
1528 | struct cnode *cp; | |
1529 | struct cnode *dcp; | |
1530 | struct hfsmount * hfsmp; | |
1531 | struct timeval tv; | |
1532 | cat_cookie_t cookie = {0}; | |
1533 | int error = 0, started_tr = 0, grabbed_lock = 0; | |
1534 | ||
1535 | cp = VTOC(vp); | |
1536 | dcp = VTOC(dvp); | |
1537 | hfsmp = VTOHFS(vp); | |
1538 | ||
1539 | if (dcp == cp) { | |
1540 | vrele(dvp); | |
1541 | vput(vp); | |
1542 | return (EINVAL); /* cannot remove "." */ | |
1543 | } | |
1544 | ||
1545 | #if QUOTA | |
1546 | (void)hfs_getinoquota(cp); | |
1547 | #endif | |
1548 | // XXXdbg | |
1549 | hfs_global_shared_lock_acquire(hfsmp); | |
1550 | grabbed_lock = 1; | |
1551 | if (hfsmp->jnl) { | |
1552 | if ((error = journal_start_transaction(hfsmp->jnl)) != 0) { | |
1553 | goto out; | |
1554 | } | |
1555 | started_tr = 1; | |
1556 | } | |
1557 | ||
1558 | if (!(options & HFSRM_SKIP_RESERVE)) { | |
1559 | /* | |
1560 | * Reserve some space in the Catalog file. | |
1561 | */ | |
1562 | if ((error = cat_preflight(hfsmp, CAT_DELETE, &cookie, p))) { | |
1563 | goto out; | |
1564 | } | |
1565 | } | |
1566 | ||
1567 | /* | |
1568 | * Verify the directory is empty (and valid). | |
1569 | * (Rmdir ".." won't be valid since | |
1570 | * ".." will contain a reference to | |
1571 | * the current directory and thus be | |
1572 | * non-empty.) | |
1573 | */ | |
1574 | if (cp->c_entries != 0) { | |
1575 | error = ENOTEMPTY; | |
1576 | goto out; | |
1577 | } | |
1578 | if ((dcp->c_flags & APPEND) || (cp->c_flags & (IMMUTABLE | APPEND))) { | |
1579 | error = EPERM; | |
1580 | goto out; | |
1581 | } | |
1582 | ||
1583 | /* Remove the entry from the namei cache: */ | |
1584 | cache_purge(vp); | |
1585 | ||
1586 | /* Lock catalog b-tree */ | |
1587 | error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p); | |
1588 | if (error) goto out; | |
1589 | ||
1590 | if (cp->c_entries > 0) | |
1591 | panic("hfs_rmdir: attempting to delete a non-empty directory!"); | |
1592 | /* Remove entry from catalog */ | |
1593 | error = cat_delete(hfsmp, &cp->c_desc, &cp->c_attr); | |
1594 | ||
1595 | /* Unlock catalog b-tree */ | |
1596 | (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); | |
1597 | if (error) goto out; | |
1598 | ||
1599 | #if QUOTA | |
1600 | (void)hfs_chkiq(cp, -1, NOCRED, 0); | |
1601 | #endif /* QUOTA */ | |
1602 | ||
1603 | /* The parent lost a child */ | |
1604 | if (dcp->c_entries > 0) | |
1605 | dcp->c_entries--; | |
1606 | if (dcp->c_nlink > 0) | |
1607 | dcp->c_nlink--; | |
1608 | dcp->c_flag |= C_CHANGE | C_UPDATE; | |
1609 | tv = time; | |
1610 | (void) VOP_UPDATE(dvp, &tv, &tv, 0); | |
1611 | HFS_KNOTE(dvp, NOTE_WRITE | NOTE_LINK); | |
1612 | ||
1613 | hfs_volupdate(hfsmp, VOL_RMDIR, (dcp->c_cnid == kHFSRootFolderID)); | |
1614 | ||
1615 | cp->c_mode = 0; /* Makes the vnode go away...see inactive */ | |
1616 | cp->c_flag |= C_NOEXISTS; | |
1617 | out: | |
1618 | if (!(options & HFSRM_PARENT_LOCKED)) { | |
1619 | vput(dvp); | |
1620 | } | |
1621 | HFS_KNOTE(vp, NOTE_DELETE); | |
1622 | vput(vp); | |
1623 | ||
1624 | if (!(options & HFSRM_SKIP_RESERVE)) { | |
1625 | cat_postflight(hfsmp, &cookie, p); | |
1626 | } | |
1627 | // XXXdbg | |
1628 | if (started_tr) { | |
1629 | journal_end_transaction(hfsmp->jnl); | |
1630 | } | |
1631 | if (grabbed_lock) { | |
1632 | hfs_global_shared_lock_release(hfsmp); | |
1633 | } | |
1634 | ||
1635 | return (error); | |
1636 | } | |
1637 | ||
1638 | /* | |
1639 | ||
1640 | #% remove dvp L U U | |
1641 | #% remove vp L U U | |
1642 | # | |
1643 | vop_remove { | |
1644 | IN WILLRELE struct vnode *dvp; | |
1645 | IN WILLRELE struct vnode *vp; | |
1646 | IN struct componentname *cnp; | |
1647 | ||
1648 | */ | |
1649 | ||
1650 | static int | |
1651 | hfs_remove(ap) | |
1652 | struct vop_remove_args /* { | |
1653 | struct vnode *a_dvp; | |
1654 | struct vnode *a_vp; | |
1655 | struct componentname *a_cnp; | |
1656 | } */ *ap; | |
1657 | { | |
1658 | return (hfs_removefile(ap->a_dvp, ap->a_vp, ap->a_cnp, 0)); | |
1659 | } | |
1660 | ||
1661 | ||
1662 | ||
1663 | /* | |
1664 | * hfs_removefile | |
1665 | * | |
1666 | * Similar to hfs_remove except there are additional options. | |
1667 | */ | |
1668 | static int | |
1669 | hfs_removefile(dvp, vp, cnp, options) | |
1670 | struct vnode *dvp; | |
1671 | struct vnode *vp; | |
1672 | struct componentname *cnp; | |
1673 | int options; | |
1674 | { | |
1675 | struct vnode *rvp = NULL; | |
1676 | struct cnode *cp; | |
1677 | struct cnode *dcp; | |
1678 | struct hfsmount *hfsmp; | |
1679 | struct proc *p = cnp->cn_proc; | |
1680 | int dataforkbusy = 0; | |
1681 | int rsrcforkbusy = 0; | |
1682 | int truncated = 0; | |
1683 | struct timeval tv; | |
1684 | cat_cookie_t cookie = {0}; | |
1685 | int error = 0; | |
1686 | int started_tr = 0, grabbed_lock = 0; | |
1687 | int refcount, isbigfile = 0; | |
1688 | ||
1689 | /* Directories should call hfs_rmdir! */ | |
1690 | if (vp->v_type == VDIR) { | |
1691 | error = EISDIR; | |
1692 | goto out; | |
1693 | } | |
1694 | ||
1695 | cp = VTOC(vp); | |
1696 | dcp = VTOC(dvp); | |
1697 | hfsmp = VTOHFS(vp); | |
1698 | ||
1699 | if (cp->c_parentcnid != dcp->c_cnid) { | |
1700 | error = EINVAL; | |
1701 | goto out; | |
1702 | } | |
1703 | ||
1704 | /* Make sure a remove is permitted */ | |
1705 | if ((cp->c_flags & (IMMUTABLE | APPEND)) || | |
1706 | (VTOC(dvp)->c_flags & APPEND) || | |
1707 | VNODE_IS_RSRC(vp)) { | |
1708 | error = EPERM; | |
1709 | goto out; | |
1710 | } | |
1711 | ||
1712 | /* | |
1713 | * Aquire a vnode for a non-empty resource fork. | |
1714 | * (needed for VOP_TRUNCATE) | |
1715 | */ | |
1716 | if (cp->c_blocks - VTOF(vp)->ff_blocks) { | |
1717 | error = hfs_vgetrsrc(hfsmp, vp, &rvp, p); | |
1718 | if (error) | |
1719 | goto out; | |
1720 | } | |
1721 | ||
1722 | // XXXdbg - don't allow deleting the journal or journal_info_block | |
1723 | if (hfsmp->jnl && cp->c_datafork) { | |
1724 | struct HFSPlusExtentDescriptor *extd; | |
1725 | ||
1726 | extd = &cp->c_datafork->ff_extents[0]; | |
1727 | if (extd->startBlock == HFSTOVCB(hfsmp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) { | |
1728 | error = EPERM; | |
1729 | goto out; | |
1730 | } | |
1731 | } | |
1732 | ||
1733 | /* | |
1734 | * Check if this file is being used. | |
1735 | * | |
1736 | * The namei done for the remove took a reference on the | |
1737 | * vnode (vp). And we took a ref on the resource vnode (rvp). | |
1738 | * Hence set 1 in the tookref parameter of ubc_isinuse(). | |
1739 | */ | |
1740 | if (VTOC(vp)->c_flag & C_VPREFHELD) { | |
1741 | refcount = 2; | |
1742 | } else { | |
1743 | refcount = 1; | |
1744 | } | |
1745 | if (UBCISVALID(vp) && ubc_isinuse(vp, refcount)) | |
1746 | dataforkbusy = 1; | |
1747 | if (rvp && UBCISVALID(rvp) && ubc_isinuse(rvp, 1)) | |
1748 | rsrcforkbusy = 1; | |
1749 | ||
1750 | // need this to check if we have to break the deletion | |
1751 | // into multiple pieces | |
1752 | isbigfile = (VTOC(vp)->c_datafork->ff_size >= HFS_BIGFILE_SIZE); | |
1753 | ||
1754 | /* | |
1755 | * Carbon semantics prohibit deleting busy files. | |
1756 | * (enforced when NODELETEBUSY is requested) | |
1757 | */ | |
1758 | if ((dataforkbusy || rsrcforkbusy) && | |
1759 | ((cnp->cn_flags & NODELETEBUSY) || | |
1760 | (hfsmp->hfs_privdir_desc.cd_cnid == 0))) { | |
1761 | error = EBUSY; | |
1762 | goto out; | |
1763 | } | |
1764 | ||
1765 | #if QUOTA | |
1766 | (void)hfs_getinoquota(cp); | |
1767 | #endif /* QUOTA */ | |
1768 | ||
1769 | // XXXdbg | |
1770 | hfs_global_shared_lock_acquire(hfsmp); | |
1771 | grabbed_lock = 1; | |
1772 | if (hfsmp->jnl) { | |
1773 | if ((error = journal_start_transaction(hfsmp->jnl)) != 0) { | |
1774 | goto out; | |
1775 | } | |
1776 | started_tr = 1; | |
1777 | } | |
1778 | ||
1779 | if (!(options & HFSRM_SKIP_RESERVE)) { | |
1780 | /* | |
1781 | * Reserve some space in the Catalog file. | |
1782 | */ | |
1783 | if ((error = cat_preflight(hfsmp, CAT_DELETE, &cookie, p))) { | |
1784 | goto out; | |
1785 | } | |
1786 | } | |
1787 | ||
1788 | /* Remove our entry from the namei cache. */ | |
1789 | cache_purge(vp); | |
1790 | ||
1791 | // XXXdbg - if we're journaled, kill any dirty symlink buffers | |
1792 | if (hfsmp->jnl && vp->v_type == VLNK && vp->v_dirtyblkhd.lh_first) { | |
1793 | struct buf *bp, *nbp; | |
1794 | ||
1795 | recheck: | |
1796 | for (bp=vp->v_dirtyblkhd.lh_first; bp; bp=nbp) { | |
1797 | nbp = bp->b_vnbufs.le_next; | |
1798 | ||
1799 | if ((bp->b_flags & B_BUSY)) { | |
1800 | // if it was busy, someone else must be dealing | |
1801 | // with it so just move on. | |
1802 | continue; | |
1803 | } | |
1804 | ||
1805 | if (!(bp->b_flags & B_META)) { | |
1806 | panic("hfs: symlink bp @ 0x%x is not marked meta-data!\n", bp); | |
1807 | } | |
1808 | ||
1809 | // if it's part of the current transaction, kill it. | |
1810 | if (bp->b_flags & B_LOCKED) { | |
1811 | bremfree(bp); | |
1812 | bp->b_flags |= B_BUSY; | |
1813 | journal_kill_block(hfsmp->jnl, bp); | |
1814 | goto recheck; | |
1815 | } | |
1816 | } | |
1817 | } | |
1818 | // XXXdbg | |
1819 | ||
1820 | /* | |
1821 | * Truncate any non-busy forks. Busy forks will | |
1822 | * get trucated when their vnode goes inactive. | |
1823 | * | |
1824 | * (Note: hard links are truncated in VOP_INACTIVE) | |
1825 | */ | |
1826 | if ((cp->c_flag & C_HARDLINK) == 0) { | |
1827 | int mode = cp->c_mode; | |
1828 | ||
1829 | if (!dataforkbusy && !isbigfile && cp->c_datafork->ff_blocks != 0) { | |
1830 | cp->c_mode = 0; /* Suppress VOP_UPDATES */ | |
1831 | error = VOP_TRUNCATE(vp, (off_t)0, IO_NDELAY, NOCRED, p); | |
1832 | cp->c_mode = mode; | |
1833 | if (error) | |
1834 | goto out; | |
1835 | truncated = 1; | |
1836 | } | |
1837 | if (!rsrcforkbusy && rvp) { | |
1838 | cp->c_mode = 0; /* Suppress VOP_UPDATES */ | |
1839 | error = VOP_TRUNCATE(rvp, (off_t)0, IO_NDELAY, NOCRED, p); | |
1840 | cp->c_mode = mode; | |
1841 | if (error) | |
1842 | goto out; | |
1843 | truncated = 1; | |
1844 | } | |
1845 | } | |
1846 | /* | |
1847 | * There are 3 remove cases to consider: | |
1848 | * 1. File is a hardlink ==> remove the link | |
1849 | * 2. File is busy (in use) ==> move/rename the file | |
1850 | * 3. File is not in use ==> remove the file | |
1851 | */ | |
1852 | ||
1853 | if (cp->c_flag & C_HARDLINK) { | |
1854 | struct cat_desc desc; | |
1855 | ||
1856 | if ((cnp->cn_flags & HASBUF) == 0 || | |
1857 | cnp->cn_nameptr[0] == '\0') { | |
1858 | error = ENOENT; /* name missing! */ | |
1859 | goto out; | |
1860 | } | |
1861 | ||
1862 | /* Setup a descriptor for the link */ | |
1863 | bzero(&desc, sizeof(desc)); | |
1864 | desc.cd_nameptr = cnp->cn_nameptr; | |
1865 | desc.cd_namelen = cnp->cn_namelen; | |
1866 | desc.cd_parentcnid = dcp->c_cnid; | |
1867 | /* XXX - if cnid is out of sync then the wrong thread rec will get deleted. */ | |
1868 | desc.cd_cnid = cp->c_cnid; | |
1869 | ||
1870 | /* Lock catalog b-tree */ | |
1871 | error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p); | |
1872 | if (error) | |
1873 | goto out; | |
1874 | ||
1875 | /* Delete the link record */ | |
1876 | error = cat_delete(hfsmp, &desc, &cp->c_attr); | |
1877 | ||
1878 | if ((error == 0) && (--cp->c_nlink < 1)) { | |
1879 | char inodename[32]; | |
1880 | char delname[32]; | |
1881 | struct cat_desc to_desc; | |
1882 | struct cat_desc from_desc; | |
1883 | ||
1884 | /* | |
1885 | * This is now esentially an open deleted file. | |
1886 | * Rename it to reflect this state which makes | |
1887 | * orphan file cleanup easier (see hfs_remove_orphans). | |
1888 | * Note: a rename failure here is not fatal. | |
1889 | */ | |
1890 | MAKE_INODE_NAME(inodename, cp->c_rdev); | |
1891 | bzero(&from_desc, sizeof(from_desc)); | |
1892 | from_desc.cd_nameptr = inodename; | |
1893 | from_desc.cd_namelen = strlen(inodename); | |
1894 | from_desc.cd_parentcnid = hfsmp->hfs_privdir_desc.cd_cnid; | |
1895 | from_desc.cd_flags = 0; | |
1896 | from_desc.cd_cnid = cp->c_fileid; | |
1897 | ||
1898 | MAKE_DELETED_NAME(delname, cp->c_fileid); | |
1899 | bzero(&to_desc, sizeof(to_desc)); | |
1900 | to_desc.cd_nameptr = delname; | |
1901 | to_desc.cd_namelen = strlen(delname); | |
1902 | to_desc.cd_parentcnid = hfsmp->hfs_privdir_desc.cd_cnid; | |
1903 | to_desc.cd_flags = 0; | |
1904 | to_desc.cd_cnid = cp->c_fileid; | |
1905 | ||
1906 | (void) cat_rename(hfsmp, &from_desc, &hfsmp->hfs_privdir_desc, | |
1907 | &to_desc, (struct cat_desc *)NULL); | |
1908 | cp->c_flag |= C_DELETED; | |
1909 | } | |
1910 | ||
1911 | /* Unlock the Catalog */ | |
1912 | (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); | |
1913 | ||
1914 | if (error != 0) | |
1915 | goto out; | |
1916 | ||
1917 | cp->c_flag |= C_CHANGE; | |
1918 | tv = time; | |
1919 | (void) VOP_UPDATE(vp, &tv, &tv, 0); | |
1920 | ||
1921 | hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID)); | |
1922 | ||
1923 | } else if (dataforkbusy || rsrcforkbusy || isbigfile) { | |
1924 | char delname[32]; | |
1925 | struct cat_desc to_desc; | |
1926 | struct cat_desc todir_desc; | |
1927 | ||
1928 | /* | |
1929 | * Orphan this file (move to hidden directory). | |
1930 | */ | |
1931 | bzero(&todir_desc, sizeof(todir_desc)); | |
1932 | todir_desc.cd_parentcnid = 2; | |
1933 | ||
1934 | MAKE_DELETED_NAME(delname, cp->c_fileid); | |
1935 | bzero(&to_desc, sizeof(to_desc)); | |
1936 | to_desc.cd_nameptr = delname; | |
1937 | to_desc.cd_namelen = strlen(delname); | |
1938 | to_desc.cd_parentcnid = hfsmp->hfs_privdir_desc.cd_cnid; | |
1939 | to_desc.cd_flags = 0; | |
1940 | to_desc.cd_cnid = cp->c_cnid; | |
1941 | ||
1942 | /* Lock catalog b-tree */ | |
1943 | error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p); | |
1944 | if (error) | |
1945 | goto out; | |
1946 | ||
1947 | error = cat_rename(hfsmp, &cp->c_desc, &todir_desc, | |
1948 | &to_desc, (struct cat_desc *)NULL); | |
1949 | ||
1950 | // XXXdbg - only bump this count if we were successful | |
1951 | if (error == 0) { | |
1952 | hfsmp->hfs_privdir_attr.ca_entries++; | |
1953 | } | |
1954 | (void)cat_update(hfsmp, &hfsmp->hfs_privdir_desc, | |
1955 | &hfsmp->hfs_privdir_attr, NULL, NULL); | |
1956 | ||
1957 | /* Unlock the Catalog */ | |
1958 | (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); | |
1959 | if (error) goto out; | |
1960 | ||
1961 | cp->c_flag |= C_CHANGE | C_DELETED | C_NOEXISTS; | |
1962 | --cp->c_nlink; | |
1963 | tv = time; | |
1964 | (void) VOP_UPDATE(vp, &tv, &tv, 0); | |
1965 | ||
1966 | } else /* Not busy */ { | |
1967 | ||
1968 | if (cp->c_blocks > 0) { | |
1969 | #if 0 | |
1970 | panic("hfs_remove: attempting to delete a non-empty file!"); | |
1971 | #else | |
1972 | printf("hfs_remove: attempting to delete a non-empty file %s\n", | |
1973 | cp->c_desc.cd_nameptr); | |
1974 | error = EBUSY; | |
1975 | goto out; | |
1976 | #endif | |
1977 | } | |
1978 | ||
1979 | /* Lock catalog b-tree */ | |
1980 | error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p); | |
1981 | if (error) | |
1982 | goto out; | |
1983 | ||
1984 | error = cat_delete(hfsmp, &cp->c_desc, &cp->c_attr); | |
1985 | ||
1986 | if (error && error != ENXIO && error != ENOENT && truncated) { | |
1987 | if ((cp->c_datafork && cp->c_datafork->ff_size != 0) || | |
1988 | (cp->c_rsrcfork && cp->c_rsrcfork->ff_size != 0)) { | |
1989 | panic("hfs: remove: couldn't delete a truncated file! (%d, data sz %lld; rsrc sz %lld)", | |
1990 | error, cp->c_datafork->ff_size, cp->c_rsrcfork->ff_size); | |
1991 | } else { | |
1992 | printf("hfs: remove: strangely enough, deleting truncated file %s (%d) got err %d\n", | |
1993 | cp->c_desc.cd_nameptr, cp->c_attr.ca_fileid, error); | |
1994 | } | |
1995 | } | |
1996 | ||
1997 | /* Unlock the Catalog */ | |
1998 | (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); | |
1999 | if (error) goto out; | |
2000 | ||
2001 | #if QUOTA | |
2002 | (void)hfs_chkiq(cp, -1, NOCRED, 0); | |
2003 | #endif /* QUOTA */ | |
2004 | ||
2005 | cp->c_mode = 0; | |
2006 | truncated = 0; // because the catalog entry is gone | |
2007 | cp->c_flag |= C_CHANGE | C_NOEXISTS; | |
2008 | --cp->c_nlink; | |
2009 | hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID)); | |
2010 | } | |
2011 | ||
2012 | /* | |
2013 | * All done with this cnode's descriptor... | |
2014 | * | |
2015 | * Note: all future catalog calls for this cnode must be | |
2016 | * by fileid only. This is OK for HFS (which doesn't have | |
2017 | * file thread records) since HFS doesn't support hard | |
2018 | * links or the removal of busy files. | |
2019 | */ | |
2020 | cat_releasedesc(&cp->c_desc); | |
2021 | ||
2022 | /* In all three cases the parent lost a child */ | |
2023 | if (dcp->c_entries > 0) | |
2024 | dcp->c_entries--; | |
2025 | if (dcp->c_nlink > 0) | |
2026 | dcp->c_nlink--; | |
2027 | dcp->c_flag |= C_CHANGE | C_UPDATE; | |
2028 | tv = time; | |
2029 | (void) VOP_UPDATE(dvp, &tv, &tv, 0); | |
2030 | HFS_KNOTE(dvp, NOTE_WRITE); | |
2031 | ||
2032 | out: | |
2033 | /* All done with component name... */ | |
2034 | if ((options & HFSRM_SAVE_NAME) == 0 && | |
2035 | (cnp != 0) && | |
2036 | (cnp->cn_flags & (HASBUF | SAVENAME)) == (HASBUF | SAVENAME)) { | |
2037 | char *tmp = cnp->cn_pnbuf; | |
2038 | cnp->cn_pnbuf = NULL; | |
2039 | cnp->cn_flags &= ~HASBUF; | |
2040 | FREE_ZONE(tmp, cnp->cn_pnlen, M_NAMEI); | |
2041 | } | |
2042 | ||
2043 | if (!(options & HFSRM_SKIP_RESERVE)) { | |
2044 | cat_postflight(hfsmp, &cookie, p); | |
2045 | } | |
2046 | ||
2047 | /* Commit the truncation to the catalog record */ | |
2048 | if (truncated) { | |
2049 | cp->c_flag |= C_CHANGE | C_UPDATE | C_FORCEUPDATE; | |
2050 | tv = time; | |
2051 | (void) VOP_UPDATE(vp, &tv, &tv, 0); | |
2052 | } | |
2053 | ||
2054 | // XXXdbg | |
2055 | if (started_tr) { | |
2056 | journal_end_transaction(hfsmp->jnl); | |
2057 | } | |
2058 | if (grabbed_lock) { | |
2059 | hfs_global_shared_lock_release(hfsmp); | |
2060 | } | |
2061 | ||
2062 | HFS_KNOTE(vp, NOTE_DELETE); | |
2063 | if (rvp) { | |
2064 | HFS_KNOTE(rvp, NOTE_DELETE); | |
2065 | vrele(rvp); | |
2066 | }; | |
2067 | ||
2068 | if (error) { | |
2069 | vput(vp); | |
2070 | } else { | |
2071 | VOP_UNLOCK(vp, 0, p); | |
2072 | // XXXdbg - try to prevent the lost ubc_info panic | |
2073 | if ((cp->c_flag & C_HARDLINK) == 0 || cp->c_nlink == 0) { | |
2074 | (void) ubc_uncache(vp); | |
2075 | } | |
2076 | vrele(vp); | |
2077 | } | |
2078 | if (!(options & HFSRM_PARENT_LOCKED)) { | |
2079 | vput(dvp); | |
2080 | } | |
2081 | ||
2082 | return (error); | |
2083 | } | |
2084 | ||
2085 | ||
2086 | __private_extern__ void | |
2087 | replace_desc(struct cnode *cp, struct cat_desc *cdp) | |
2088 | { | |
2089 | /* First release allocated name buffer */ | |
2090 | if (cp->c_desc.cd_flags & CD_HASBUF && cp->c_desc.cd_nameptr != 0) { | |
2091 | char *name = cp->c_desc.cd_nameptr; | |
2092 | ||
2093 | cp->c_desc.cd_nameptr = 0; | |
2094 | cp->c_desc.cd_namelen = 0; | |
2095 | cp->c_desc.cd_flags &= ~CD_HASBUF; | |
2096 | remove_name(name); | |
2097 | } | |
2098 | bcopy(cdp, &cp->c_desc, sizeof(cp->c_desc)); | |
2099 | ||
2100 | /* Cnode now owns the name buffer */ | |
2101 | cdp->cd_nameptr = 0; | |
2102 | cdp->cd_namelen = 0; | |
2103 | cdp->cd_flags &= ~CD_HASBUF; | |
2104 | } | |
2105 | ||
2106 | ||
2107 | /* | |
2108 | # | |
2109 | #% rename fdvp U U U | |
2110 | #% rename fvp U U U | |
2111 | #% rename tdvp L U U | |
2112 | #% rename tvp X U U | |
2113 | # | |
2114 | */ | |
2115 | /* | |
2116 | * Rename a cnode. | |
2117 | * | |
2118 | * The VFS layer guarantees that source and destination will | |
2119 | * either both be directories, or both not be directories. | |
2120 | * | |
2121 | * When the target is a directory, hfs_rename must ensure | |
2122 | * that it is empty. | |
2123 | * | |
2124 | * The rename system call is responsible for freeing | |
2125 | * the pathname buffers (ie no need to call VOP_ABORTOP). | |
2126 | */ | |
2127 | ||
2128 | static int | |
2129 | hfs_rename(ap) | |
2130 | struct vop_rename_args /* { | |
2131 | struct vnode *a_fdvp; | |
2132 | struct vnode *a_fvp; | |
2133 | struct componentname *a_fcnp; | |
2134 | struct vnode *a_tdvp; | |
2135 | struct vnode *a_tvp; | |
2136 | struct componentname *a_tcnp; | |
2137 | } */ *ap; | |
2138 | { | |
2139 | struct vnode *tvp = ap->a_tvp; | |
2140 | struct vnode *tdvp = ap->a_tdvp; | |
2141 | struct vnode *fvp = ap->a_fvp; | |
2142 | struct vnode *fdvp = ap->a_fdvp; | |
2143 | struct componentname *tcnp = ap->a_tcnp; | |
2144 | struct componentname *fcnp = ap->a_fcnp; | |
2145 | struct proc *p = fcnp->cn_proc; | |
2146 | struct cnode *fcp = NULL; | |
2147 | struct cnode *fdcp = NULL; | |
2148 | struct cnode *tdcp = VTOC(tdvp); | |
2149 | struct cat_desc from_desc; | |
2150 | struct cat_desc to_desc; | |
2151 | struct cat_desc out_desc; | |
2152 | struct hfsmount *hfsmp = NULL; | |
2153 | struct timeval tv; | |
2154 | cat_cookie_t cookie = {0}; | |
2155 | int fdvp_locked, fvp_locked, tdvp_locked, tvp_locked; | |
2156 | int tvp_deleted; | |
2157 | int started_tr = 0, grabbed_lock = 0; | |
2158 | int error = 0; | |
2159 | ||
2160 | ||
2161 | /* Establish our vnode lock state. */ | |
2162 | tdvp_locked = 1; | |
2163 | tvp_locked = (tvp != 0); | |
2164 | fdvp_locked = 0; | |
2165 | fvp_locked = 0; | |
2166 | tvp_deleted = 0; | |
2167 | ||
2168 | /* | |
2169 | * Check for cross-device rename. | |
2170 | */ | |
2171 | if ((fvp->v_mount != tdvp->v_mount) || | |
2172 | (tvp && (fvp->v_mount != tvp->v_mount))) { | |
2173 | error = EXDEV; | |
2174 | goto out; | |
2175 | } | |
2176 | ||
2177 | /* | |
2178 | * When fvp matches tvp they must be case variants | |
2179 | * or hard links. | |
2180 | * | |
2181 | * In some cases tvp will be locked in other cases | |
2182 | * it be unlocked with no reference. Normalize the | |
2183 | * state here (unlocked with a reference) so that | |
2184 | * we can exit in a known state. | |
2185 | */ | |
2186 | if (fvp == tvp) { | |
2187 | if (VOP_ISLOCKED(tvp) && | |
2188 | (VTOC(tvp)->c_lock.lk_lockholder == p->p_pid) && | |
2189 | (VTOC(tvp)->c_lock.lk_lockthread == current_thread())) { | |
2190 | vput(tvp); | |
2191 | } | |
2192 | tvp = NULL; | |
2193 | tvp_locked = 0; | |
2194 | ||
2195 | /* | |
2196 | * If this a hard link with different parents | |
2197 | * and its not a case variant then keep tvp | |
2198 | * around for removal. | |
2199 | */ | |
2200 | if ((VTOC(fvp)->c_flag & C_HARDLINK) && | |
2201 | ((fdvp != tdvp) || | |
2202 | (hfs_namecmp(fcnp->cn_nameptr, fcnp->cn_namelen, | |
2203 | tcnp->cn_nameptr, tcnp->cn_namelen) != 0))) { | |
2204 | tvp = fvp; | |
2205 | vref(tvp); | |
2206 | } | |
2207 | } | |
2208 | ||
2209 | /* | |
2210 | * The following edge case is caught here: | |
2211 | * (to cannot be a descendent of from) | |
2212 | * | |
2213 | * o fdvp | |
2214 | * / | |
2215 | * / | |
2216 | * o fvp | |
2217 | * \ | |
2218 | * \ | |
2219 | * o tdvp | |
2220 | * / | |
2221 | * / | |
2222 | * o tvp | |
2223 | */ | |
2224 | if (tdcp->c_parentcnid == VTOC(fvp)->c_cnid) { | |
2225 | error = EINVAL; | |
2226 | goto out; | |
2227 | } | |
2228 | ||
2229 | /* | |
2230 | * The following two edge cases are caught here: | |
2231 | * (note tvp is not empty) | |
2232 | * | |
2233 | * o tdvp o tdvp | |
2234 | * / / | |
2235 | * / / | |
2236 | * o tvp tvp o fdvp | |
2237 | * \ \ | |
2238 | * \ \ | |
2239 | * o fdvp o fvp | |
2240 | * / | |
2241 | * / | |
2242 | * o fvp | |
2243 | */ | |
2244 | if (tvp && (tvp->v_type == VDIR) && (VTOC(tvp)->c_entries != 0)) { | |
2245 | error = ENOTEMPTY; | |
2246 | goto out; | |
2247 | } | |
2248 | ||
2249 | /* | |
2250 | * The following edge case is caught here: | |
2251 | * (the from child and parent are the same) | |
2252 | * | |
2253 | * o tdvp | |
2254 | * / | |
2255 | * / | |
2256 | * fdvp o fvp | |
2257 | */ | |
2258 | if (fdvp == fvp) { | |
2259 | error = EINVAL; | |
2260 | goto out; | |
2261 | } | |
2262 | ||
2263 | /* | |
2264 | * Make sure "from" vnode and its parent are changeable. | |
2265 | */ | |
2266 | if ((VTOC(fvp)->c_flags & (IMMUTABLE | APPEND)) || | |
2267 | (VTOC(fdvp)->c_flags & APPEND)) { | |
2268 | error = EPERM; | |
2269 | goto out; | |
2270 | } | |
2271 | ||
2272 | hfsmp = VTOHFS(tdvp); | |
2273 | ||
2274 | /* | |
2275 | * If the destination parent directory is "sticky", then the | |
2276 | * user must own the parent directory, or the destination of | |
2277 | * the rename, otherwise the destination may not be changed | |
2278 | * (except by root). This implements append-only directories. | |
2279 | * | |
2280 | * Note that checks for immutable and write access are done | |
2281 | * by the call to VOP_REMOVE. | |
2282 | */ | |
2283 | if (tvp && (tdcp->c_mode & S_ISTXT) && | |
2284 | (tcnp->cn_cred->cr_uid != 0) && | |
2285 | (tcnp->cn_cred->cr_uid != tdcp->c_uid) && | |
2286 | (hfs_owner_rights(hfsmp, VTOC(tvp)->c_uid, tcnp->cn_cred, p, false)) ) { | |
2287 | error = EPERM; | |
2288 | goto out; | |
2289 | } | |
2290 | ||
2291 | #if QUOTA | |
2292 | if (tvp) | |
2293 | (void)hfs_getinoquota(VTOC(tvp)); | |
2294 | #endif | |
2295 | ||
2296 | /* | |
2297 | * Lock all the vnodes before starting a journal transaction. | |
2298 | */ | |
2299 | ||
2300 | /* | |
2301 | * Simple case (same parent) - just lock child (fvp). | |
2302 | */ | |
2303 | if (fdvp == tdvp) { | |
2304 | if (error = vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY, p)) | |
2305 | goto out; | |
2306 | fvp_locked = 1; | |
2307 | goto vnlocked; | |
2308 | } | |
2309 | ||
2310 | /* | |
2311 | * If fdvp is the parent of tdvp then we'll need to | |
2312 | * drop tdvp's lock before acquiring a lock on fdvp. | |
2313 | * | |
2314 | * fdvp | |
2315 | * o | |
2316 | * / \ | |
2317 | * / \ | |
2318 | * tdvp o o fvp | |
2319 | * \ | |
2320 | * \ | |
2321 | * o tvp | |
2322 | * | |
2323 | * | |
2324 | * If the parent directories are unrelated then we'll | |
2325 | * need to aquire their vnode locks in vnode address | |
2326 | * order. Otherwise we can race with another rename | |
2327 | * call that involves the same vnodes except that to | |
2328 | * and from are switched and potentially deadlock. | |
2329 | * [ie rename("a/b", "c/d") vs rename("c/d", "a/b")] | |
2330 | * | |
2331 | * If its not either of the two above cases then we | |
2332 | * can safely lock fdvp and fvp. | |
2333 | */ | |
2334 | if ((VTOC(fdvp)->c_cnid == VTOC(tdvp)->c_parentcnid) || | |
2335 | ((VTOC(tdvp)->c_cnid != VTOC(fdvp)->c_parentcnid) && | |
2336 | (fdvp < tdvp))) { | |
2337 | ||
2338 | /* Drop locks on tvp and tdvp */ | |
2339 | if (tvp_locked) { | |
2340 | VOP_UNLOCK(tvp, 0, p); | |
2341 | tvp_locked = 0; | |
2342 | } | |
2343 | VOP_UNLOCK(tdvp, 0, p); | |
2344 | tdvp_locked = 0; | |
2345 | ||
2346 | /* Aquire locks in correct order */ | |
2347 | if ((error = vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, p))) | |
2348 | goto out; | |
2349 | fdvp_locked = 1; | |
2350 | if ((error = vn_lock(tdvp, LK_EXCLUSIVE | LK_RETRY, p))) | |
2351 | goto out; | |
2352 | tdvp_locked = 1; | |
2353 | ||
2354 | /* | |
2355 | * Now that the parents are locked only one thread | |
2356 | * can continue. So the lock order of the children | |
2357 | * doesn't really matter | |
2358 | */ | |
2359 | if (tvp == fvp) { | |
2360 | if ((error = vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, p))) | |
2361 | goto out; | |
2362 | tvp_locked = 1; | |
2363 | } else { | |
2364 | if (tvp) { | |
2365 | if ((error = vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, p))) | |
2366 | goto out; | |
2367 | tvp_locked = 1; | |
2368 | } | |
2369 | if ((error = vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY, p))) | |
2370 | goto out; | |
2371 | fvp_locked = 1; | |
2372 | } | |
2373 | ||
2374 | } else /* OK to lock fdvp and fvp */ { | |
2375 | if ((error = vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, p))) | |
2376 | goto out; | |
2377 | fdvp_locked = 1; | |
2378 | if (error = vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY, p)) | |
2379 | goto out; | |
2380 | if (tvp == fvp) | |
2381 | tvp_locked = 1; | |
2382 | else | |
2383 | fvp_locked = 1; | |
2384 | } | |
2385 | ||
2386 | vnlocked: | |
2387 | fdcp = VTOC(fdvp); | |
2388 | fcp = VTOC(fvp); | |
2389 | ||
2390 | /* | |
2391 | * While fvp is still locked, purge it from the name cache and | |
2392 | * grab it's c_cnid value. Note that the removal of tvp (below) | |
2393 | * can drop fvp's lock when fvp == tvp. | |
2394 | */ | |
2395 | cache_purge(fvp); | |
2396 | ||
2397 | /* | |
2398 | * When a file moves out of "Cleanup At Startup" | |
2399 | * we can drop its NODUMP status. | |
2400 | */ | |
2401 | if ((fcp->c_flags & UF_NODUMP) && | |
2402 | (fvp->v_type == VREG) && | |
2403 | (fdvp != tdvp) && | |
2404 | (fdcp->c_desc.cd_nameptr != NULL) && | |
2405 | (strcmp(fdcp->c_desc.cd_nameptr, CARBON_TEMP_DIR_NAME) == 0)) { | |
2406 | fcp->c_flags &= ~UF_NODUMP; | |
2407 | fcp->c_flag |= C_CHANGE; | |
2408 | tv = time; | |
2409 | (void) VOP_UPDATE(fvp, &tv, &tv, 0); | |
2410 | } | |
2411 | ||
2412 | bzero(&from_desc, sizeof(from_desc)); | |
2413 | from_desc.cd_nameptr = fcnp->cn_nameptr; | |
2414 | from_desc.cd_namelen = fcnp->cn_namelen; | |
2415 | from_desc.cd_parentcnid = fdcp->c_cnid; | |
2416 | from_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED); | |
2417 | from_desc.cd_cnid = fcp->c_cnid; | |
2418 | ||
2419 | bzero(&to_desc, sizeof(to_desc)); | |
2420 | to_desc.cd_nameptr = tcnp->cn_nameptr; | |
2421 | to_desc.cd_namelen = tcnp->cn_namelen; | |
2422 | to_desc.cd_parentcnid = tdcp->c_cnid; | |
2423 | to_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED); | |
2424 | to_desc.cd_cnid = fcp->c_cnid; | |
2425 | ||
2426 | hfs_global_shared_lock_acquire(hfsmp); | |
2427 | grabbed_lock = 1; | |
2428 | if (hfsmp->jnl) { | |
2429 | if ((error = journal_start_transaction(hfsmp->jnl)) != 0) { | |
2430 | goto out; | |
2431 | } | |
2432 | started_tr = 1; | |
2433 | } | |
2434 | ||
2435 | /* | |
2436 | * Reserve some space in the Catalog file. | |
2437 | */ | |
2438 | if ((error = cat_preflight(hfsmp, CAT_RENAME + CAT_DELETE, &cookie, p))) { | |
2439 | goto out; | |
2440 | } | |
2441 | ||
2442 | /* | |
2443 | * If the destination exists then it needs to be removed. | |
2444 | */ | |
2445 | ||
2446 | if (tvp) { | |
2447 | if (tvp != fvp) | |
2448 | cache_purge(tvp); | |
2449 | /* | |
2450 | * Note that hfs_removedir and hfs_removefile | |
2451 | * will keep tdvp locked with a reference. | |
2452 | * But tvp will lose its lock and reference. | |
2453 | */ | |
2454 | if (tvp->v_type == VDIR) | |
2455 | error = hfs_removedir(tdvp, tvp, tcnp, HFSRM_RENAMEOPTS); | |
2456 | else | |
2457 | error = hfs_removefile(tdvp, tvp, tcnp, HFSRM_RENAMEOPTS); | |
2458 | ||
2459 | if (tvp == fvp) | |
2460 | fvp_locked = 0; | |
2461 | tvp = NULL; | |
2462 | tvp_locked = 0; | |
2463 | tvp_deleted = 1; | |
2464 | if (error) | |
2465 | goto out; | |
2466 | } | |
2467 | ||
2468 | /* | |
2469 | * All done with tvp and fvp | |
2470 | */ | |
2471 | ||
2472 | /* Lock catalog b-tree */ | |
2473 | error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p); | |
2474 | if (error) | |
2475 | goto out; | |
2476 | ||
2477 | error = cat_rename(hfsmp, &from_desc, &tdcp->c_desc, &to_desc, &out_desc); | |
2478 | ||
2479 | /* Unlock catalog b-tree */ | |
2480 | (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); | |
2481 | ||
2482 | if (error) { | |
2483 | goto out; | |
2484 | } | |
2485 | ||
2486 | /* Update cnode's catalog descriptor */ | |
2487 | if (fvp_locked) { | |
2488 | replace_desc(fcp, &out_desc); | |
2489 | fcp->c_parentcnid = tdcp->c_cnid; | |
2490 | fcp->c_hint = 0; | |
2491 | } | |
2492 | ||
2493 | hfs_volupdate(hfsmp, fvp->v_type == VDIR ? VOL_RMDIR : VOL_RMFILE, | |
2494 | (fdcp->c_cnid == kHFSRootFolderID)); | |
2495 | hfs_volupdate(hfsmp, fvp->v_type == VDIR ? VOL_MKDIR : VOL_MKFILE, | |
2496 | (tdcp->c_cnid == kHFSRootFolderID)); | |
2497 | ||
2498 | /* Update both parent directories. */ | |
2499 | tv = time; | |
2500 | if (fdvp != tdvp) { | |
2501 | tdcp->c_nlink++; | |
2502 | tdcp->c_entries++; | |
2503 | if (fdcp->c_nlink > 0) | |
2504 | fdcp->c_nlink--; | |
2505 | if (fdcp->c_entries > 0) | |
2506 | fdcp->c_entries--; | |
2507 | fdcp->c_flag |= C_CHANGE | C_UPDATE; | |
2508 | (void) VOP_UPDATE(fdvp, &tv, &tv, 0); | |
2509 | } | |
2510 | tdcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */ | |
2511 | tdcp->c_flag |= C_CHANGE | C_UPDATE; | |
2512 | (void) VOP_UPDATE(tdvp, &tv, &tv, 0); | |
2513 | ||
2514 | out: | |
2515 | if (hfsmp) { | |
2516 | cat_postflight(hfsmp, &cookie, p); | |
2517 | } | |
2518 | if (started_tr) { | |
2519 | journal_end_transaction(hfsmp->jnl); | |
2520 | } | |
2521 | if (grabbed_lock) { | |
2522 | hfs_global_shared_lock_release(hfsmp); | |
2523 | } | |
2524 | ||
2525 | /* Note that if hfs_removedir or hfs_removefile was invoked above they will already have | |
2526 | generated a NOTE_WRITE for tdvp and a NOTE_DELETE for tvp. | |
2527 | */ | |
2528 | if (error == 0) { | |
2529 | HFS_KNOTE(fvp, NOTE_RENAME); | |
2530 | HFS_KNOTE(fdvp, NOTE_WRITE); | |
2531 | if (tdvp != fdvp) HFS_KNOTE(tdvp, NOTE_WRITE); | |
2532 | }; | |
2533 | if (fvp_locked) { | |
2534 | VOP_UNLOCK(fvp, 0, p); | |
2535 | } | |
2536 | if (fdvp_locked) { | |
2537 | VOP_UNLOCK(fdvp, 0, p); | |
2538 | } | |
2539 | if (tdvp_locked) { | |
2540 | VOP_UNLOCK(tdvp, 0, p); | |
2541 | } | |
2542 | if (tvp_locked) { | |
2543 | VOP_UNLOCK(tvp, 0, p); | |
2544 | } | |
2545 | ||
2546 | vrele(fvp); | |
2547 | vrele(fdvp); | |
2548 | if (tvp) | |
2549 | vrele(tvp); | |
2550 | vrele(tdvp); | |
2551 | ||
2552 | /* After tvp is removed the only acceptable error is EIO */ | |
2553 | if (error && tvp_deleted) | |
2554 | error = EIO; | |
2555 | ||
2556 | return (error); | |
2557 | } | |
2558 | ||
2559 | ||
2560 | ||
2561 | /* | |
2562 | * Mkdir system call | |
2563 | #% mkdir dvp L U U | |
2564 | #% mkdir vpp - L - | |
2565 | # | |
2566 | vop_mkdir { | |
2567 | IN WILLRELE struct vnode *dvp; | |
2568 | OUT struct vnode **vpp; | |
2569 | IN struct componentname *cnp; | |
2570 | IN struct vattr *vap; | |
2571 | ||
2572 | We are responsible for freeing the namei buffer, | |
2573 | it is done in hfs_makenode() | |
2574 | */ | |
2575 | ||
2576 | static int | |
2577 | hfs_mkdir(ap) | |
2578 | struct vop_mkdir_args /* { | |
2579 | struct vnode *a_dvp; | |
2580 | struct vnode **a_vpp; | |
2581 | struct componentname *a_cnp; | |
2582 | struct vattr *a_vap; | |
2583 | } */ *ap; | |
2584 | { | |
2585 | struct vattr *vap = ap->a_vap; | |
2586 | ||
2587 | return (hfs_makenode(MAKEIMODE(vap->va_type, vap->va_mode), | |
2588 | ap->a_dvp, ap->a_vpp, ap->a_cnp)); | |
2589 | } | |
2590 | ||
2591 | ||
2592 | /* | |
2593 | * symlink -- make a symbolic link | |
2594 | #% symlink dvp L U U | |
2595 | #% symlink vpp - U - | |
2596 | # | |
2597 | # XXX - note that the return vnode has already been VRELE'ed | |
2598 | # by the filesystem layer. To use it you must use vget, | |
2599 | # possibly with a further namei. | |
2600 | # | |
2601 | vop_symlink { | |
2602 | IN WILLRELE struct vnode *dvp; | |
2603 | OUT WILLRELE struct vnode **vpp; | |
2604 | IN struct componentname *cnp; | |
2605 | IN struct vattr *vap; | |
2606 | IN char *target; | |
2607 | ||
2608 | We are responsible for freeing the namei buffer, | |
2609 | it is done in hfs_makenode(). | |
2610 | ||
2611 | */ | |
2612 | ||
2613 | static int | |
2614 | hfs_symlink(ap) | |
2615 | struct vop_symlink_args /* { | |
2616 | struct vnode *a_dvp; | |
2617 | struct vnode **a_vpp; | |
2618 | struct componentname *a_cnp; | |
2619 | struct vattr *a_vap; | |
2620 | char *a_target; | |
2621 | } */ *ap; | |
2622 | { | |
2623 | register struct vnode *vp, **vpp = ap->a_vpp; | |
2624 | struct hfsmount *hfsmp; | |
2625 | struct filefork *fp; | |
2626 | int len, error; | |
2627 | struct buf *bp = NULL; | |
2628 | ||
2629 | /* HFS standard disks don't support symbolic links */ | |
2630 | if (VTOVCB(ap->a_dvp)->vcbSigWord != kHFSPlusSigWord) { | |
2631 | VOP_ABORTOP(ap->a_dvp, ap->a_cnp); | |
2632 | vput(ap->a_dvp); | |
2633 | return (EOPNOTSUPP); | |
2634 | } | |
2635 | ||
2636 | /* Check for empty target name */ | |
2637 | if (ap->a_target[0] == 0) { | |
2638 | VOP_ABORTOP(ap->a_dvp, ap->a_cnp); | |
2639 | vput(ap->a_dvp); | |
2640 | return (EINVAL); | |
2641 | } | |
2642 | ||
2643 | ||
2644 | hfsmp = VTOHFS(ap->a_dvp); | |
2645 | ||
2646 | /* Create the vnode */ | |
2647 | if ((error = hfs_makenode(S_IFLNK | ap->a_vap->va_mode, | |
2648 | ap->a_dvp, vpp, ap->a_cnp))) { | |
2649 | return (error); | |
2650 | } | |
2651 | ||
2652 | vp = *vpp; | |
2653 | len = strlen(ap->a_target); | |
2654 | fp = VTOF(vp); | |
2655 | ||
2656 | #if QUOTA | |
2657 | (void)hfs_getinoquota(VTOC(vp)); | |
2658 | #endif /* QUOTA */ | |
2659 | ||
2660 | // XXXdbg | |
2661 | hfs_global_shared_lock_acquire(hfsmp); | |
2662 | if (hfsmp->jnl) { | |
2663 | if ((error = journal_start_transaction(hfsmp->jnl)) != 0) { | |
2664 | hfs_global_shared_lock_release(hfsmp); | |
2665 | vput(vp); | |
2666 | return error; | |
2667 | } | |
2668 | } | |
2669 | ||
2670 | /* Allocate space for the link */ | |
2671 | error = VOP_TRUNCATE(vp, len, IO_NOZEROFILL, | |
2672 | ap->a_cnp->cn_cred, ap->a_cnp->cn_proc); | |
2673 | if (error) | |
2674 | goto out; /* XXX need to remove link */ | |
2675 | ||
2676 | /* Write the link to disk */ | |
2677 | bp = getblk(vp, 0, roundup((int)fp->ff_size, VTOHFS(vp)->hfs_phys_block_size), | |
2678 | 0, 0, BLK_META); | |
2679 | if (hfsmp->jnl) { | |
2680 | journal_modify_block_start(hfsmp->jnl, bp); | |
2681 | } | |
2682 | bzero(bp->b_data, bp->b_bufsize); | |
2683 | bcopy(ap->a_target, bp->b_data, len); | |
2684 | if (hfsmp->jnl) { | |
2685 | journal_modify_block_end(hfsmp->jnl, bp); | |
2686 | } else { | |
2687 | bawrite(bp); | |
2688 | } | |
2689 | out: | |
2690 | if (hfsmp->jnl) { | |
2691 | journal_end_transaction(hfsmp->jnl); | |
2692 | } | |
2693 | hfs_global_shared_lock_release(hfsmp); | |
2694 | vput(vp); | |
2695 | return (error); | |
2696 | } | |
2697 | ||
2698 | ||
2699 | /* | |
2700 | * Dummy dirents to simulate the "." and ".." entries of the directory | |
2701 | * in a hfs filesystem. HFS doesn't provide these on disk. Note that | |
2702 | * the size of these entries is the smallest needed to represent them | |
2703 | * (only 12 byte each). | |
2704 | */ | |
2705 | static hfsdotentry rootdots[2] = { | |
2706 | { | |
2707 | 1, /* d_fileno */ | |
2708 | sizeof(struct hfsdotentry), /* d_reclen */ | |
2709 | DT_DIR, /* d_type */ | |
2710 | 1, /* d_namlen */ | |
2711 | "." /* d_name */ | |
2712 | }, | |
2713 | { | |
2714 | 1, /* d_fileno */ | |
2715 | sizeof(struct hfsdotentry), /* d_reclen */ | |
2716 | DT_DIR, /* d_type */ | |
2717 | 2, /* d_namlen */ | |
2718 | ".." /* d_name */ | |
2719 | } | |
2720 | }; | |
2721 | ||
2722 | /* 4.3 Note: | |
2723 | * There is some confusion as to what the semantics of uio_offset are. | |
2724 | * In ufs, it represents the actual byte offset within the directory | |
2725 | * "file." HFS, however, just uses it as an entry counter - essentially | |
2726 | * assuming that it has no meaning except to the hfs_readdir function. | |
2727 | * This approach would be more efficient here, but some callers may | |
2728 | * assume the uio_offset acts like a byte offset. NFS in fact | |
2729 | * monkeys around with the offset field a lot between readdir calls. | |
2730 | * | |
2731 | * The use of the resid uiop->uio_resid and uiop->uio_iov->iov_len | |
2732 | * fields is a mess as well. The libc function readdir() returns | |
2733 | * NULL (indicating the end of a directory) when either | |
2734 | * the getdirentries() syscall (which calls this and returns | |
2735 | * the size of the buffer passed in less the value of uiop->uio_resid) | |
2736 | * returns 0, or a direct record with a d_reclen of zero. | |
2737 | * nfs_server.c:rfs_readdir(), on the other hand, checks for the end | |
2738 | * of the directory by testing uiop->uio_resid == 0. The solution | |
2739 | * is to pad the size of the last struct direct in a given | |
2740 | * block to fill the block if we are not at the end of the directory. | |
2741 | */ | |
2742 | ||
2743 | ||
2744 | /* | |
2745 | * NOTE: We require a minimal buffer size of DIRBLKSIZ for two reasons. One, it is the same value | |
2746 | * returned be stat() call as the block size. This is mentioned in the man page for getdirentries(): | |
2747 | * "Nbytes must be greater than or equal to the block size associated with the file, | |
2748 | * see stat(2)". Might as well settle on the same size of ufs. Second, this makes sure there is enough | |
2749 | * room for the . and .. entries that have to added manually. | |
2750 | */ | |
2751 | ||
2752 | /* | |
2753 | #% readdir vp L L L | |
2754 | # | |
2755 | vop_readdir { | |
2756 | IN struct vnode *vp; | |
2757 | INOUT struct uio *uio; | |
2758 | IN struct ucred *cred; | |
2759 | INOUT int *eofflag; | |
2760 | OUT int *ncookies; | |
2761 | INOUT u_long **cookies; | |
2762 | */ | |
2763 | static int | |
2764 | hfs_readdir(ap) | |
2765 | struct vop_readdir_args /* { | |
2766 | struct vnode *vp; | |
2767 | struct uio *uio; | |
2768 | struct ucred *cred; | |
2769 | int *eofflag; | |
2770 | int *ncookies; | |
2771 | u_long **cookies; | |
2772 | } */ *ap; | |
2773 | { | |
2774 | register struct uio *uio = ap->a_uio; | |
2775 | struct cnode *cp = VTOC(ap->a_vp); | |
2776 | struct hfsmount *hfsmp = VTOHFS(ap->a_vp); | |
2777 | struct proc *p = current_proc(); | |
2778 | off_t off = uio->uio_offset; | |
2779 | int retval = 0; | |
2780 | int eofflag = 0; | |
2781 | void *user_start = NULL; | |
2782 | int user_len; | |
2783 | ||
2784 | int ncookies=0; | |
2785 | u_long *cookies=NULL; | |
2786 | u_long *cookiep=NULL; | |
2787 | ||
2788 | /* We assume it's all one big buffer... */ | |
2789 | if (uio->uio_iovcnt > 1 || uio->uio_resid < AVERAGE_HFSDIRENTRY_SIZE) | |
2790 | return EINVAL; | |
2791 | ||
2792 | // XXXdbg | |
2793 | // We have to lock the user's buffer here so that we won't | |
2794 | // fault on it after we've acquired a shared lock on the | |
2795 | // catalog file. The issue is that you can get a 3-way | |
2796 | // deadlock if someone else starts a transaction and then | |
2797 | // tries to lock the catalog file but can't because we're | |
2798 | // here and we can't service our page fault because VM is | |
2799 | // blocked trying to start a transaction as a result of | |
2800 | // trying to free up pages for our page fault. It's messy | |
2801 | // but it does happen on dual-procesors that are paging | |
2802 | // heavily (see radar 3082639 for more info). By locking | |
2803 | // the buffer up-front we prevent ourselves from faulting | |
2804 | // while holding the shared catalog file lock. | |
2805 | // | |
2806 | // Fortunately this and hfs_search() are the only two places | |
2807 | // currently (10/30/02) that can fault on user data with a | |
2808 | // shared lock on the catalog file. | |
2809 | // | |
2810 | if (hfsmp->jnl && uio->uio_segflg == UIO_USERSPACE) { | |
2811 | user_start = uio->uio_iov->iov_base; | |
2812 | user_len = uio->uio_iov->iov_len; | |
2813 | ||
2814 | if ((retval = vslock(user_start, user_len)) != 0) { | |
2815 | return retval; | |
2816 | } | |
2817 | } | |
2818 | ||
2819 | /* Create the entries for . and .. */ | |
2820 | if (uio->uio_offset < sizeof(rootdots)) { | |
2821 | caddr_t dep; | |
2822 | size_t dotsize; | |
2823 | ||
2824 | rootdots[0].d_fileno = cp->c_cnid; | |
2825 | rootdots[1].d_fileno = cp->c_parentcnid; | |
2826 | ||
2827 | if (uio->uio_offset == 0) { | |
2828 | dep = (caddr_t) &rootdots[0]; | |
2829 | dotsize = 2* sizeof(struct hfsdotentry); | |
2830 | } else if (uio->uio_offset == sizeof(struct hfsdotentry)) { | |
2831 | dep = (caddr_t) &rootdots[1]; | |
2832 | dotsize = sizeof(struct hfsdotentry); | |
2833 | } else { | |
2834 | retval = EINVAL; | |
2835 | goto Exit; | |
2836 | } | |
2837 | ||
2838 | retval = uiomove(dep, dotsize, uio); | |
2839 | if (retval != 0) | |
2840 | goto Exit; | |
2841 | } | |
2842 | ||
2843 | if (ap->a_ncookies != NULL) { | |
2844 | /* | |
2845 | * These cookies are handles that allow NFS to restart | |
2846 | * scanning through a directory. If a directory is large | |
2847 | * enough, NFS will issue a successive readdir() with a | |
2848 | * uio->uio_offset that is equal to one of these cookies. | |
2849 | * | |
2850 | * The cookies that we generate are synthesized byte-offsets. | |
2851 | * The offset is where the dirent the dirent would be if the | |
2852 | * directory were an array of packed dirent structs. It is | |
2853 | * synthetic because that's not how directories are stored in | |
2854 | * HFS but other code expects that the cookie is a byte offset. | |
2855 | * | |
2856 | * We have to pre-allocate the cookies because cat_getdirentries() | |
2857 | * is the only one that can properly synthesize the offsets (since | |
2858 | * it may have to skip over entries and only it knows the true | |
2859 | * virtual offset of any particular directory entry). So we allocate | |
2860 | * a cookie table here and pass it in to cat_getdirentries(). | |
2861 | * | |
2862 | * Note that the handling of "." and ".." is mostly done here but | |
2863 | * cat_getdirentries() is aware of. | |
2864 | * | |
2865 | * Only the NFS server uses cookies so fortunately this code is | |
2866 | * not executed unless the NFS server is issuing the readdir | |
2867 | * request. | |
2868 | * | |
2869 | * Also note that the NFS server is the one responsible for | |
2870 | * free'ing the cookies even though we allocated them. Ick. | |
2871 | * | |
2872 | * We allocate a reasonable number of entries for the size of | |
2873 | * the buffer that we're going to fill in. cat_getdirentries() | |
2874 | * is smart enough to not overflow if there's more room in the | |
2875 | * buffer but not enough room in the cookie table. | |
2876 | */ | |
2877 | if (uio->uio_segflg != UIO_SYSSPACE) | |
2878 | panic("hfs_readdir: unexpected uio from NFS server"); | |
2879 | ||
2880 | ncookies = uio->uio_iov->iov_len / (AVERAGE_HFSDIRENTRY_SIZE/2); | |
2881 | MALLOC(cookies, u_long *, ncookies * sizeof(u_long), M_TEMP, M_WAITOK); | |
2882 | ||
2883 | *ap->a_ncookies = ncookies; | |
2884 | *ap->a_cookies = cookies; | |
2885 | } | |
2886 | ||
2887 | /* If there are no children then we're done */ | |
2888 | if (cp->c_entries == 0) { | |
2889 | eofflag = 1; | |
2890 | retval = 0; | |
2891 | if (cookies) { | |
2892 | cookies[0] = 0; | |
2893 | cookies[1] = sizeof(struct hfsdotentry); | |
2894 | } | |
2895 | goto Exit; | |
2896 | } | |
2897 | ||
2898 | /* Lock catalog b-tree */ | |
2899 | retval = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_SHARED, p); | |
2900 | if (retval) goto Exit; | |
2901 | ||
2902 | retval = cat_getdirentries(hfsmp, &cp->c_desc, cp->c_entries, uio, &eofflag, cookies, ncookies); | |
2903 | ||
2904 | /* Unlock catalog b-tree */ | |
2905 | (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); | |
2906 | ||
2907 | if (retval != E_NONE) { | |
2908 | goto Exit; | |
2909 | } | |
2910 | ||
2911 | /* were we already past eof ? */ | |
2912 | if (uio->uio_offset == off) { | |
2913 | retval = E_NONE; | |
2914 | goto Exit; | |
2915 | } | |
2916 | ||
2917 | cp->c_flag |= C_ACCESS; | |
2918 | ||
2919 | Exit:; | |
2920 | if (hfsmp->jnl && user_start) { | |
2921 | vsunlock(user_start, user_len, TRUE); | |
2922 | } | |
2923 | ||
2924 | if (ap->a_eofflag) | |
2925 | *ap->a_eofflag = eofflag; | |
2926 | ||
2927 | return (retval); | |
2928 | } | |
2929 | ||
2930 | ||
2931 | /* | |
2932 | * Return target name of a symbolic link | |
2933 | #% readlink vp L L L | |
2934 | # | |
2935 | vop_readlink { | |
2936 | IN struct vnode *vp; | |
2937 | INOUT struct uio *uio; | |
2938 | IN struct ucred *cred; | |
2939 | */ | |
2940 | ||
2941 | static int | |
2942 | hfs_readlink(ap) | |
2943 | struct vop_readlink_args /* { | |
2944 | struct vnode *a_vp; | |
2945 | struct uio *a_uio; | |
2946 | struct ucred *a_cred; | |
2947 | } */ *ap; | |
2948 | { | |
2949 | int retval; | |
2950 | struct vnode *vp = ap->a_vp; | |
2951 | struct cnode *cp; | |
2952 | struct filefork *fp; | |
2953 | ||
2954 | if (vp->v_type != VLNK) | |
2955 | return (EINVAL); | |
2956 | ||
2957 | cp = VTOC(vp); | |
2958 | fp = VTOF(vp); | |
2959 | ||
2960 | /* Zero length sym links are not allowed */ | |
2961 | if (fp->ff_size == 0 || fp->ff_size > MAXPATHLEN) { | |
2962 | VTOVCB(vp)->vcbFlags |= kHFS_DamagedVolume; | |
2963 | return (EINVAL); | |
2964 | } | |
2965 | ||
2966 | /* Cache the path so we don't waste buffer cache resources */ | |
2967 | if (fp->ff_symlinkptr == NULL) { | |
2968 | struct buf *bp = NULL; | |
2969 | ||
2970 | MALLOC(fp->ff_symlinkptr, char *, fp->ff_size, M_TEMP, M_WAITOK); | |
2971 | retval = meta_bread(vp, 0, | |
2972 | roundup((int)fp->ff_size, | |
2973 | VTOHFS(vp)->hfs_phys_block_size), | |
2974 | ap->a_cred, &bp); | |
2975 | if (retval) { | |
2976 | if (bp) | |
2977 | brelse(bp); | |
2978 | if (fp->ff_symlinkptr) { | |
2979 | FREE(fp->ff_symlinkptr, M_TEMP); | |
2980 | fp->ff_symlinkptr = NULL; | |
2981 | } | |
2982 | return (retval); | |
2983 | } | |
2984 | bcopy(bp->b_data, fp->ff_symlinkptr, (size_t)fp->ff_size); | |
2985 | if (bp) { | |
2986 | if (VTOHFS(vp)->jnl && (bp->b_flags & B_LOCKED) == 0) { | |
2987 | bp->b_flags |= B_INVAL; /* data no longer needed */ | |
2988 | } | |
2989 | brelse(bp); | |
2990 | } | |
2991 | } | |
2992 | retval = uiomove((caddr_t)fp->ff_symlinkptr, (int)fp->ff_size, ap->a_uio); | |
2993 | #if 1 | |
2994 | /* | |
2995 | * Keep track blocks read | |
2996 | */ | |
2997 | if ((VTOHFS(vp)->hfc_stage == HFC_RECORDING) && (retval == 0)) { | |
2998 | ||
2999 | /* | |
3000 | * If this file hasn't been seen since the start of | |
3001 | * the current sampling period then start over. | |
3002 | */ | |
3003 | if (cp->c_atime < VTOHFS(vp)->hfc_timebase) | |
3004 | VTOF(vp)->ff_bytesread = fp->ff_size; | |
3005 | else | |
3006 | VTOF(vp)->ff_bytesread += fp->ff_size; | |
3007 | ||
3008 | // if (VTOF(vp)->ff_bytesread > fp->ff_size) | |
3009 | // cp->c_flag |= C_ACCESS; | |
3010 | } | |
3011 | #endif | |
3012 | return (retval); | |
3013 | } | |
3014 | ||
3015 | /* | |
3016 | * Lock an cnode. If its already locked, set the WANT bit and sleep. | |
3017 | #% lock vp U L U | |
3018 | # | |
3019 | vop_lock { | |
3020 | IN struct vnode *vp; | |
3021 | IN int flags; | |
3022 | IN struct proc *p; | |
3023 | */ | |
3024 | ||
3025 | static int | |
3026 | hfs_lock(ap) | |
3027 | struct vop_lock_args /* { | |
3028 | struct vnode *a_vp; | |
3029 | int a_flags; | |
3030 | struct proc *a_p; | |
3031 | } */ *ap; | |
3032 | { | |
3033 | struct vnode *vp = ap->a_vp; | |
3034 | struct cnode *cp = VTOC(vp); | |
3035 | ||
3036 | return (lockmgr(&cp->c_lock, ap->a_flags, &vp->v_interlock, ap->a_p)); | |
3037 | } | |
3038 | ||
3039 | /* | |
3040 | * Unlock an cnode. | |
3041 | #% unlock vp L U L | |
3042 | # | |
3043 | vop_unlock { | |
3044 | IN struct vnode *vp; | |
3045 | IN int flags; | |
3046 | IN struct proc *p; | |
3047 | ||
3048 | */ | |
3049 | static int | |
3050 | hfs_unlock(ap) | |
3051 | struct vop_unlock_args /* { | |
3052 | struct vnode *a_vp; | |
3053 | int a_flags; | |
3054 | struct proc *a_p; | |
3055 | } */ *ap; | |
3056 | { | |
3057 | struct vnode *vp = ap->a_vp; | |
3058 | struct cnode *cp = VTOC(vp); | |
3059 | #if 0 | |
3060 | if (!lockstatus(&cp->c_lock)) { | |
3061 | printf("hfs_unlock: vnode %s wasn't locked!\n", | |
3062 | cp->c_desc.cd_nameptr ? cp->c_desc.cd_nameptr : ""); | |
3063 | } | |
3064 | #endif | |
3065 | return (lockmgr(&cp->c_lock, ap->a_flags | LK_RELEASE, | |
3066 | &vp->v_interlock, ap->a_p)); | |
3067 | } | |
3068 | ||
3069 | ||
3070 | /* | |
3071 | * Print out the contents of a cnode. | |
3072 | #% print vp = = = | |
3073 | # | |
3074 | vop_print { | |
3075 | IN struct vnode *vp; | |
3076 | */ | |
3077 | static int | |
3078 | hfs_print(ap) | |
3079 | struct vop_print_args /* { | |
3080 | struct vnode *a_vp; | |
3081 | } */ *ap; | |
3082 | { | |
3083 | struct vnode * vp = ap->a_vp; | |
3084 | struct cnode *cp = VTOC(vp); | |
3085 | ||
3086 | printf("tag VT_HFS, cnid %d, on dev %d, %d", cp->c_cnid, | |
3087 | major(cp->c_dev), minor(cp->c_dev)); | |
3088 | #if FIFO | |
3089 | if (vp->v_type == VFIFO) | |
3090 | fifo_printinfo(vp); | |
3091 | #endif /* FIFO */ | |
3092 | lockmgr_printinfo(&cp->c_lock); | |
3093 | printf("\n"); | |
3094 | return (0); | |
3095 | } | |
3096 | ||
3097 | ||
3098 | /* | |
3099 | * Check for a locked cnode. | |
3100 | #% islocked vp = = = | |
3101 | # | |
3102 | vop_islocked { | |
3103 | IN struct vnode *vp; | |
3104 | ||
3105 | */ | |
3106 | static int | |
3107 | hfs_islocked(ap) | |
3108 | struct vop_islocked_args /* { | |
3109 | struct vnode *a_vp; | |
3110 | } */ *ap; | |
3111 | { | |
3112 | return (lockstatus(&VTOC(ap->a_vp)->c_lock)); | |
3113 | } | |
3114 | ||
3115 | /* | |
3116 | ||
3117 | #% pathconf vp L L L | |
3118 | # | |
3119 | vop_pathconf { | |
3120 | IN struct vnode *vp; | |
3121 | IN int name; | |
3122 | OUT register_t *retval; | |
3123 | ||
3124 | */ | |
3125 | static int | |
3126 | hfs_pathconf(ap) | |
3127 | struct vop_pathconf_args /* { | |
3128 | struct vnode *a_vp; | |
3129 | int a_name; | |
3130 | int *a_retval; | |
3131 | } */ *ap; | |
3132 | { | |
3133 | int retval = 0; | |
3134 | ||
3135 | switch (ap->a_name) { | |
3136 | case _PC_LINK_MAX: | |
3137 | if (VTOVCB(ap->a_vp)->vcbSigWord == kHFSPlusSigWord) | |
3138 | *ap->a_retval = HFS_LINK_MAX; | |
3139 | else | |
3140 | *ap->a_retval = 1; | |
3141 | break; | |
3142 | case _PC_NAME_MAX: | |
3143 | *ap->a_retval = kHFSPlusMaxFileNameBytes; /* max # of characters x max utf8 representation */ | |
3144 | break; | |
3145 | case _PC_PATH_MAX: | |
3146 | *ap->a_retval = PATH_MAX; /* 1024 */ | |
3147 | break; | |
3148 | case _PC_PIPE_BUF: | |
3149 | *ap->a_retval = PIPE_BUF; | |
3150 | break; | |
3151 | case _PC_CHOWN_RESTRICTED: | |
3152 | *ap->a_retval = 1; | |
3153 | break; | |
3154 | case _PC_NO_TRUNC: | |
3155 | *ap->a_retval = 0; | |
3156 | break; | |
3157 | case _PC_NAME_CHARS_MAX: | |
3158 | *ap->a_retval = kHFSPlusMaxFileNameChars; | |
3159 | break; | |
3160 | case _PC_CASE_SENSITIVE: | |
3161 | if (VTOHFS(ap->a_vp)->hfs_flags & HFS_CASE_SENSITIVE) | |
3162 | *ap->a_retval = 1; | |
3163 | else | |
3164 | *ap->a_retval = 0; | |
3165 | break; | |
3166 | case _PC_CASE_PRESERVING: | |
3167 | *ap->a_retval = 1; | |
3168 | break; | |
3169 | default: | |
3170 | retval = EINVAL; | |
3171 | } | |
3172 | ||
3173 | return (retval); | |
3174 | } | |
3175 | ||
3176 | ||
3177 | /* | |
3178 | * Advisory record locking support | |
3179 | #% advlock vp U U U | |
3180 | # | |
3181 | vop_advlock { | |
3182 | IN struct vnode *vp; | |
3183 | IN caddr_t id; | |
3184 | IN int op; | |
3185 | IN struct flock *fl; | |
3186 | IN int flags; | |
3187 | ||
3188 | */ | |
3189 | static int | |
3190 | hfs_advlock(ap) | |
3191 | struct vop_advlock_args /* { | |
3192 | struct vnode *a_vp; | |
3193 | caddr_t a_id; | |
3194 | int a_op; | |
3195 | struct flock *a_fl; | |
3196 | int a_flags; | |
3197 | } */ *ap; | |
3198 | { | |
3199 | struct vnode *vp = ap->a_vp; | |
3200 | struct flock *fl = ap->a_fl; | |
3201 | struct hfslockf *lock; | |
3202 | struct filefork *fork; | |
3203 | off_t start, end; | |
3204 | int retval; | |
3205 | ||
3206 | /* Only regular files can have locks */ | |
3207 | if (vp->v_type != VREG) | |
3208 | return (EISDIR); | |
3209 | ||
3210 | fork = VTOF(ap->a_vp); | |
3211 | /* | |
3212 | * Avoid the common case of unlocking when cnode has no locks. | |
3213 | */ | |
3214 | if (fork->ff_lockf == (struct hfslockf *)0) { | |
3215 | if (ap->a_op != F_SETLK) { | |
3216 | fl->l_type = F_UNLCK; | |
3217 | return (0); | |
3218 | } | |
3219 | } | |
3220 | /* | |
3221 | * Convert the flock structure into a start and end. | |
3222 | */ | |
3223 | start = 0; | |
3224 | switch (fl->l_whence) { | |
3225 | case SEEK_SET: | |
3226 | case SEEK_CUR: | |
3227 | /* | |
3228 | * Caller is responsible for adding any necessary offset | |
3229 | * when SEEK_CUR is used. | |
3230 | */ | |
3231 | start = fl->l_start; | |
3232 | break; | |
3233 | case SEEK_END: | |
3234 | start = fork->ff_size + fl->l_start; | |
3235 | break; | |
3236 | default: | |
3237 | return (EINVAL); | |
3238 | } | |
3239 | ||
3240 | if (fl->l_len == 0) | |
3241 | end = -1; | |
3242 | else if (fl->l_len > 0) | |
3243 | end = start + fl->l_len - 1; | |
3244 | else { /* l_len is negative */ | |
3245 | end = start - 1; | |
3246 | start += fl->l_len; | |
3247 | } | |
3248 | if (start < 0) | |
3249 | return (EINVAL); | |
3250 | ||
3251 | /* | |
3252 | * Create the hfslockf structure | |
3253 | */ | |
3254 | MALLOC(lock, struct hfslockf *, sizeof *lock, M_LOCKF, M_WAITOK); | |
3255 | lock->lf_start = start; | |
3256 | lock->lf_end = end; | |
3257 | lock->lf_id = ap->a_id; | |
3258 | lock->lf_fork = fork; | |
3259 | lock->lf_type = fl->l_type; | |
3260 | lock->lf_next = (struct hfslockf *)0; | |
3261 | TAILQ_INIT(&lock->lf_blkhd); | |
3262 | lock->lf_flags = ap->a_flags; | |
3263 | /* | |
3264 | * Do the requested operation. | |
3265 | */ | |
3266 | switch(ap->a_op) { | |
3267 | case F_SETLK: | |
3268 | retval = hfs_setlock(lock); | |
3269 | break; | |
3270 | case F_UNLCK: | |
3271 | retval = hfs_clearlock(lock); | |
3272 | FREE(lock, M_LOCKF); | |
3273 | break; | |
3274 | case F_GETLK: | |
3275 | retval = hfs_getlock(lock, fl); | |
3276 | FREE(lock, M_LOCKF); | |
3277 | break; | |
3278 | default: | |
3279 | retval = EINVAL; | |
3280 | _FREE(lock, M_LOCKF); | |
3281 | break; | |
3282 | } | |
3283 | ||
3284 | return (retval); | |
3285 | } | |
3286 | ||
3287 | ||
3288 | ||
3289 | /* | |
3290 | * Update the access, modified, and node change times as specified | |
3291 | * by the C_ACCESS, C_UPDATE, and C_CHANGE flags respectively. The | |
3292 | * C_MODIFIED flag is used to specify that the node needs to be | |
3293 | * updated but that the times have already been set. The access and | |
3294 | * modified times are input parameters but the node change time is | |
3295 | * always taken from the current time. If waitfor is set, then wait | |
3296 | * for the disk write of the node to complete. | |
3297 | */ | |
3298 | /* | |
3299 | #% update vp L L L | |
3300 | IN struct vnode *vp; | |
3301 | IN struct timeval *access; | |
3302 | IN struct timeval *modify; | |
3303 | IN int waitfor; | |
3304 | */ | |
3305 | static int | |
3306 | hfs_update(ap) | |
3307 | struct vop_update_args /* { | |
3308 | struct vnode *a_vp; | |
3309 | struct timeval *a_access; | |
3310 | struct timeval *a_modify; | |
3311 | int a_waitfor; | |
3312 | } */ *ap; | |
3313 | { | |
3314 | struct vnode *vp = ap->a_vp; | |
3315 | struct cnode *cp = VTOC(ap->a_vp); | |
3316 | struct proc *p; | |
3317 | struct cat_fork *dataforkp = NULL; | |
3318 | struct cat_fork *rsrcforkp = NULL; | |
3319 | struct cat_fork datafork; | |
3320 | int updateflag; | |
3321 | struct hfsmount *hfsmp; | |
3322 | int error; | |
3323 | ||
3324 | hfsmp = VTOHFS(vp); | |
3325 | ||
3326 | /* XXX do we really want to clear the sytem cnode flags here???? */ | |
3327 | if (((vp->v_flag & VSYSTEM) && (cp->c_cnid < kHFSFirstUserCatalogNodeID))|| | |
3328 | (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) || | |
3329 | (cp->c_mode == 0)) { | |
3330 | cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE); | |
3331 | return (0); | |
3332 | } | |
3333 | ||
3334 | updateflag = cp->c_flag & (C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE | C_FORCEUPDATE); | |
3335 | ||
3336 | /* Nothing to update. */ | |
3337 | if (updateflag == 0) { | |
3338 | return (0); | |
3339 | } | |
3340 | /* HFS standard doesn't have access times. */ | |
3341 | if ((updateflag == C_ACCESS) && (VTOVCB(vp)->vcbSigWord == kHFSSigWord)) { | |
3342 | return (0); | |
3343 | } | |
3344 | if (updateflag & C_ACCESS) { | |
3345 | /* | |
3346 | * When the access time is the only thing changing | |
3347 | * then make sure its sufficiently newer before | |
3348 | * committing it to disk. | |
3349 | */ | |
3350 | if ((updateflag == C_ACCESS) && | |
3351 | (ap->a_access->tv_sec < (cp->c_atime + ATIME_ONDISK_ACCURACY))) { | |
3352 | return (0); | |
3353 | } | |
3354 | cp->c_atime = ap->a_access->tv_sec; | |
3355 | } | |
3356 | if (updateflag & C_UPDATE) { | |
3357 | cp->c_mtime = ap->a_modify->tv_sec; | |
3358 | cp->c_mtime_nsec = ap->a_modify->tv_usec * 1000; | |
3359 | } | |
3360 | if (updateflag & C_CHANGE) { | |
3361 | cp->c_ctime = time.tv_sec; | |
3362 | /* | |
3363 | * HFS dates that WE set must be adjusted for DST | |
3364 | */ | |
3365 | if ((VTOVCB(vp)->vcbSigWord == kHFSSigWord) && gTimeZone.tz_dsttime) { | |
3366 | cp->c_ctime += 3600; | |
3367 | cp->c_mtime = cp->c_ctime; | |
3368 | } | |
3369 | } | |
3370 | ||
3371 | if (cp->c_datafork) | |
3372 | dataforkp = &cp->c_datafork->ff_data; | |
3373 | if (cp->c_rsrcfork) | |
3374 | rsrcforkp = &cp->c_rsrcfork->ff_data; | |
3375 | ||
3376 | p = current_proc(); | |
3377 | ||
3378 | /* | |
3379 | * For delayed allocations updates are | |
3380 | * postponed until an fsync or the file | |
3381 | * gets written to disk. | |
3382 | * | |
3383 | * Deleted files can defer meta data updates until inactive. | |
3384 | * | |
3385 | * If we're ever called with the C_FORCEUPDATE flag though | |
3386 | * we have to do the update. | |
3387 | */ | |
3388 | if (ISSET(cp->c_flag, C_FORCEUPDATE) == 0 && | |
3389 | (ISSET(cp->c_flag, C_DELETED) || | |
3390 | (dataforkp && cp->c_datafork->ff_unallocblocks) || | |
3391 | (rsrcforkp && cp->c_rsrcfork->ff_unallocblocks))) { | |
3392 | if (updateflag & (C_CHANGE | C_UPDATE)) | |
3393 | hfs_volupdate(hfsmp, VOL_UPDATE, 0); | |
3394 | cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_UPDATE); | |
3395 | cp->c_flag |= C_MODIFIED; | |
3396 | ||
3397 | HFS_KNOTE(vp, NOTE_ATTRIB); | |
3398 | ||
3399 | return (0); | |
3400 | } | |
3401 | ||
3402 | ||
3403 | // XXXdbg | |
3404 | hfs_global_shared_lock_acquire(hfsmp); | |
3405 | if (hfsmp->jnl) { | |
3406 | if ((error = journal_start_transaction(hfsmp->jnl)) != 0) { | |
3407 | hfs_global_shared_lock_release(hfsmp); | |
3408 | return error; | |
3409 | } | |
3410 | } | |
3411 | ||
3412 | ||
3413 | /* | |
3414 | * For files with invalid ranges (holes) the on-disk | |
3415 | * field representing the size of the file (cf_size) | |
3416 | * must be no larger than the start of the first hole. | |
3417 | */ | |
3418 | if (dataforkp && !CIRCLEQ_EMPTY(&cp->c_datafork->ff_invalidranges)) { | |
3419 | bcopy(dataforkp, &datafork, sizeof(datafork)); | |
3420 | datafork.cf_size = CIRCLEQ_FIRST(&cp->c_datafork->ff_invalidranges)->rl_start; | |
3421 | dataforkp = &datafork; | |
3422 | } else if (dataforkp && (cp->c_datafork->ff_unallocblocks != 0)) { | |
3423 | // always make sure the block count and the size | |
3424 | // of the file match the number of blocks actually | |
3425 | // allocated to the file on disk | |
3426 | bcopy(dataforkp, &datafork, sizeof(datafork)); | |
3427 | // make sure that we don't assign a negative block count | |
3428 | if (cp->c_datafork->ff_blocks < cp->c_datafork->ff_unallocblocks) { | |
3429 | panic("hfs: ff_blocks %d is less than unalloc blocks %d\n", | |
3430 | cp->c_datafork->ff_blocks, cp->c_datafork->ff_unallocblocks); | |
3431 | } | |
3432 | datafork.cf_blocks = (cp->c_datafork->ff_blocks - cp->c_datafork->ff_unallocblocks); | |
3433 | datafork.cf_size = datafork.cf_blocks * HFSTOVCB(hfsmp)->blockSize; | |
3434 | dataforkp = &datafork; | |
3435 | } | |
3436 | ||
3437 | /* | |
3438 | * Lock the Catalog b-tree file. | |
3439 | * A shared lock is sufficient since an update doesn't change | |
3440 | * the tree and the lock on vp protects the cnode. | |
3441 | */ | |
3442 | error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_SHARED, p); | |
3443 | if (error) { | |
3444 | if (hfsmp->jnl) { | |
3445 | journal_end_transaction(hfsmp->jnl); | |
3446 | } | |
3447 | hfs_global_shared_lock_release(hfsmp); | |
3448 | return (error); | |
3449 | } | |
3450 | ||
3451 | /* XXX - waitfor is not enforced */ | |
3452 | error = cat_update(hfsmp, &cp->c_desc, &cp->c_attr, dataforkp, rsrcforkp); | |
3453 | ||
3454 | /* Unlock the Catalog b-tree file. */ | |
3455 | (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); | |
3456 | ||
3457 | if (updateflag & (C_CHANGE | C_UPDATE | C_FORCEUPDATE)) | |
3458 | hfs_volupdate(hfsmp, VOL_UPDATE, 0); | |
3459 | ||
3460 | /* After the updates are finished, clear the flags */ | |
3461 | cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE | C_FORCEUPDATE); | |
3462 | ||
3463 | // XXXdbg | |
3464 | if (hfsmp->jnl) { | |
3465 | journal_end_transaction(hfsmp->jnl); | |
3466 | } | |
3467 | hfs_global_shared_lock_release(hfsmp); | |
3468 | ||
3469 | HFS_KNOTE(vp, NOTE_ATTRIB); | |
3470 | ||
3471 | return (error); | |
3472 | } | |
3473 | ||
3474 | /* | |
3475 | * Allocate a new node | |
3476 | * | |
3477 | * Upon leaving, namei buffer must be freed. | |
3478 | * | |
3479 | */ | |
3480 | static int | |
3481 | hfs_makenode(mode, dvp, vpp, cnp) | |
3482 | int mode; | |
3483 | struct vnode *dvp; | |
3484 | struct vnode **vpp; | |
3485 | struct componentname *cnp; | |
3486 | { | |
3487 | struct cnode *cp; | |
3488 | struct cnode *dcp; | |
3489 | struct vnode *tvp; | |
3490 | struct hfsmount *hfsmp; | |
3491 | struct timeval tv; | |
3492 | struct proc *p; | |
3493 | struct cat_desc in_desc, out_desc; | |
3494 | struct cat_attr attr; | |
3495 | cat_cookie_t cookie = {0}; | |
3496 | int error, started_tr = 0, grabbed_lock = 0; | |
3497 | enum vtype vnodetype; | |
3498 | ||
3499 | p = cnp->cn_proc; | |
3500 | dcp = VTOC(dvp); | |
3501 | hfsmp = VTOHFS(dvp); | |
3502 | *vpp = NULL; | |
3503 | tvp = NULL; | |
3504 | bzero(&out_desc, sizeof(out_desc)); | |
3505 | ||
3506 | if ((mode & S_IFMT) == 0) | |
3507 | mode |= S_IFREG; | |
3508 | vnodetype = IFTOVT(mode); | |
3509 | ||
3510 | /* Check if unmount in progress */ | |
3511 | if (VTOVFS(dvp)->mnt_kern_flag & MNTK_UNMOUNT) { | |
3512 | error = EPERM; | |
3513 | goto exit; | |
3514 | } | |
3515 | /* Check if were out of usable disk space. */ | |
3516 | if ((suser(cnp->cn_cred, NULL) != 0) && (hfs_freeblks(hfsmp, 1) <= 0)) { | |
3517 | error = ENOSPC; | |
3518 | goto exit; | |
3519 | } | |
3520 | ||
3521 | /* Setup the default attributes */ | |
3522 | bzero(&attr, sizeof(attr)); | |
3523 | attr.ca_mode = mode; | |
3524 | attr.ca_nlink = vnodetype == VDIR ? 2 : 1; | |
3525 | attr.ca_mtime = time.tv_sec; | |
3526 | attr.ca_mtime_nsec = time.tv_usec * 1000; | |
3527 | if ((VTOVCB(dvp)->vcbSigWord == kHFSSigWord) && gTimeZone.tz_dsttime) { | |
3528 | attr.ca_mtime += 3600; /* Same as what hfs_update does */ | |
3529 | } | |
3530 | attr.ca_atime = attr.ca_ctime = attr.ca_itime = attr.ca_mtime; | |
3531 | if (VTOVFS(dvp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) { | |
3532 | attr.ca_uid = hfsmp->hfs_uid; | |
3533 | attr.ca_gid = hfsmp->hfs_gid; | |
3534 | } else { | |
3535 | if (vnodetype == VLNK) | |
3536 | attr.ca_uid = dcp->c_uid; | |
3537 | else | |
3538 | attr.ca_uid = cnp->cn_cred->cr_uid; | |
3539 | attr.ca_gid = dcp->c_gid; | |
3540 | } | |
3541 | /* | |
3542 | * Don't tag as a special file (BLK or CHR) until *after* | |
3543 | * hfs_getnewvnode is called. This insures that any | |
3544 | * alias checking is defered until hfs_mknod completes. | |
3545 | */ | |
3546 | if (vnodetype == VBLK || vnodetype == VCHR) | |
3547 | attr.ca_mode = (attr.ca_mode & ~S_IFMT) | S_IFREG; | |
3548 | ||
3549 | /* Tag symlinks with a type and creator. */ | |
3550 | if (vnodetype == VLNK) { | |
3551 | struct FndrFileInfo *fip; | |
3552 | ||
3553 | fip = (struct FndrFileInfo *)&attr.ca_finderinfo; | |
3554 | fip->fdType = SWAP_BE32(kSymLinkFileType); | |
3555 | fip->fdCreator = SWAP_BE32(kSymLinkCreator); | |
3556 | } | |
3557 | if ((attr.ca_mode & S_ISGID) && | |
3558 | !groupmember(dcp->c_gid, cnp->cn_cred) && | |
3559 | suser(cnp->cn_cred, NULL)) { | |
3560 | attr.ca_mode &= ~S_ISGID; | |
3561 | } | |
3562 | if (cnp->cn_flags & ISWHITEOUT) | |
3563 | attr.ca_flags |= UF_OPAQUE; | |
3564 | ||
3565 | /* Setup the descriptor */ | |
3566 | bzero(&in_desc, sizeof(in_desc)); | |
3567 | in_desc.cd_nameptr = cnp->cn_nameptr; | |
3568 | in_desc.cd_namelen = cnp->cn_namelen; | |
3569 | in_desc.cd_parentcnid = dcp->c_cnid; | |
3570 | in_desc.cd_flags = S_ISDIR(mode) ? CD_ISDIR : 0; | |
3571 | ||
3572 | // XXXdbg | |
3573 | hfs_global_shared_lock_acquire(hfsmp); | |
3574 | grabbed_lock = 1; | |
3575 | if (hfsmp->jnl) { | |
3576 | if ((error = journal_start_transaction(hfsmp->jnl)) != 0) { | |
3577 | goto exit; | |
3578 | } | |
3579 | started_tr = 1; | |
3580 | } | |
3581 | ||
3582 | /* | |
3583 | * Reserve some space in the Catalog file. | |
3584 | * | |
3585 | * (we also add CAT_DELETE since our getnewvnode | |
3586 | * request can cause an hfs_inactive call to | |
3587 | * delete an unlinked file) | |
3588 | */ | |
3589 | if ((error = cat_preflight(hfsmp, CAT_CREATE | CAT_DELETE, &cookie, p))) { | |
3590 | goto exit; | |
3591 | } | |
3592 | ||
3593 | /* Lock catalog b-tree */ | |
3594 | error = hfs_metafilelocking(VTOHFS(dvp), kHFSCatalogFileID, LK_EXCLUSIVE, p); | |
3595 | if (error) | |
3596 | goto exit; | |
3597 | ||
3598 | error = cat_create(hfsmp, &in_desc, &attr, &out_desc); | |
3599 | ||
3600 | /* Unlock catalog b-tree */ | |
3601 | (void) hfs_metafilelocking(VTOHFS(dvp), kHFSCatalogFileID, LK_RELEASE, p); | |
3602 | if (error) | |
3603 | goto exit; | |
3604 | ||
3605 | /* Update the parent directory */ | |
3606 | dcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */ | |
3607 | dcp->c_nlink++; | |
3608 | dcp->c_entries++; | |
3609 | dcp->c_flag |= C_CHANGE | C_UPDATE; | |
3610 | tv = time; | |
3611 | (void) VOP_UPDATE(dvp, &tv, &tv, 0); | |
3612 | if (vnodetype == VDIR) { | |
3613 | HFS_KNOTE(dvp, NOTE_WRITE | NOTE_LINK); | |
3614 | } else { | |
3615 | HFS_KNOTE(dvp, NOTE_WRITE); | |
3616 | }; | |
3617 | ||
3618 | hfs_volupdate(hfsmp, vnodetype == VDIR ? VOL_MKDIR : VOL_MKFILE, | |
3619 | (dcp->c_cnid == kHFSRootFolderID)); | |
3620 | ||
3621 | // XXXdbg | |
3622 | // have to end the transaction here before we call hfs_getnewvnode() | |
3623 | // because that can cause us to try and reclaim a vnode on a different | |
3624 | // file system which could cause us to start a transaction which can | |
3625 | // deadlock with someone on that other file system (since we could be | |
3626 | // holding two transaction locks as well as various vnodes and we did | |
3627 | // not obtain the locks on them in the proper order). | |
3628 | // | |
3629 | // NOTE: this means that if the quota check fails or we have to update | |
3630 | // the change time on a block-special device that those changes | |
3631 | // will happen as part of independent transactions. | |
3632 | // | |
3633 | if (started_tr) { | |
3634 | journal_end_transaction(hfsmp->jnl); | |
3635 | started_tr = 0; | |
3636 | } | |
3637 | if (grabbed_lock) { | |
3638 | hfs_global_shared_lock_release(hfsmp); | |
3639 | grabbed_lock = 0; | |
3640 | } | |
3641 | ||
3642 | /* Create a vnode for the object just created: */ | |
3643 | error = hfs_getnewvnode(hfsmp, NULL, &out_desc, 0, &attr, NULL, &tvp); | |
3644 | if (error) | |
3645 | goto exit; | |
3646 | ||
3647 | // XXXdbg | |
3648 | cache_enter(dvp, tvp, cnp); | |
3649 | ||
3650 | #if QUOTA | |
3651 | cp = VTOC(tvp); | |
3652 | /* | |
3653 | * We call hfs_chkiq with FORCE flag so that if we | |
3654 | * fall through to the rmdir we actually have | |
3655 | * accounted for the inode | |
3656 | */ | |
3657 | if ((error = hfs_getinoquota(cp)) || | |
3658 | (error = hfs_chkiq(cp, 1, cnp->cn_cred, FORCE))) { | |
3659 | if (tvp->v_type == VDIR) | |
3660 | VOP_RMDIR(dvp,tvp, cnp); | |
3661 | else | |
3662 | VOP_REMOVE(dvp,tvp, cnp); | |
3663 | ||
3664 | // because VOP_RMDIR and VOP_REMOVE already | |
3665 | // have done the vput() | |
3666 | dvp = NULL; | |
3667 | goto exit; | |
3668 | } | |
3669 | #endif /* QUOTA */ | |
3670 | ||
3671 | /* | |
3672 | * restore vtype and mode for VBLK and VCHR | |
3673 | */ | |
3674 | if (vnodetype == VBLK || vnodetype == VCHR) { | |
3675 | struct cnode *cp; | |
3676 | ||
3677 | cp = VTOC(tvp); | |
3678 | cp->c_mode = mode; | |
3679 | tvp->v_type = IFTOVT(mode); | |
3680 | cp->c_flag |= C_CHANGE; | |
3681 | tv = time; | |
3682 | if ((error = VOP_UPDATE(tvp, &tv, &tv, 1))) { | |
3683 | vput(tvp); | |
3684 | goto exit; | |
3685 | } | |
3686 | } | |
3687 | ||
3688 | *vpp = tvp; | |
3689 | exit: | |
3690 | cat_releasedesc(&out_desc); | |
3691 | ||
3692 | cat_postflight(hfsmp, &cookie, p); | |
3693 | ||
3694 | if ((cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) { | |
3695 | char *tmp = cnp->cn_pnbuf; | |
3696 | cnp->cn_pnbuf = NULL; | |
3697 | cnp->cn_flags &= ~HASBUF; | |
3698 | FREE_ZONE(tmp, cnp->cn_pnlen, M_NAMEI); | |
3699 | } | |
3700 | /* | |
3701 | * Check if a file is located in the "Cleanup At Startup" | |
3702 | * directory. If it is then tag it as NODUMP so that we | |
3703 | * can be lazy about zero filling data holes. | |
3704 | */ | |
3705 | if ((error == 0) && dvp && (vnodetype == VREG) && | |
3706 | (dcp->c_desc.cd_nameptr != NULL) && | |
3707 | (strcmp(dcp->c_desc.cd_nameptr, CARBON_TEMP_DIR_NAME) == 0)) { | |
3708 | struct vnode *ddvp; | |
3709 | cnid_t parid; | |
3710 | ||
3711 | parid = dcp->c_parentcnid; | |
3712 | vput(dvp); | |
3713 | dvp = NULL; | |
3714 | ||
3715 | /* | |
3716 | * The parent of "Cleanup At Startup" should | |
3717 | * have the ASCII name of the userid. | |
3718 | */ | |
3719 | if (VFS_VGET(HFSTOVFS(hfsmp), &parid, &ddvp) == 0) { | |
3720 | if (VTOC(ddvp)->c_desc.cd_nameptr && | |
3721 | (cp->c_uid == strtoul(VTOC(ddvp)->c_desc.cd_nameptr, 0, 0))) { | |
3722 | cp->c_flags |= UF_NODUMP; | |
3723 | cp->c_flag |= C_CHANGE; | |
3724 | } | |
3725 | vput(ddvp); | |
3726 | } | |
3727 | } | |
3728 | if (dvp) | |
3729 | vput(dvp); | |
3730 | ||
3731 | if (started_tr) { | |
3732 | journal_end_transaction(hfsmp->jnl); | |
3733 | started_tr = 0; | |
3734 | } | |
3735 | if (grabbed_lock) { | |
3736 | hfs_global_shared_lock_release(hfsmp); | |
3737 | grabbed_lock = 0; | |
3738 | } | |
3739 | ||
3740 | return (error); | |
3741 | } | |
3742 | ||
3743 | ||
3744 | static int | |
3745 | hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp, struct vnode **rvpp, struct proc *p) | |
3746 | { | |
3747 | struct vnode *rvp; | |
3748 | struct cnode *cp = VTOC(vp); | |
3749 | int error; | |
3750 | ||
3751 | if ((rvp = cp->c_rsrc_vp)) { | |
3752 | /* Use exising vnode */ | |
3753 | error = vget(rvp, 0, p); | |
3754 | if (error) { | |
3755 | char * name = VTOC(vp)->c_desc.cd_nameptr; | |
3756 | ||
3757 | if (name) | |
3758 | printf("hfs_vgetrsrc: couldn't get" | |
3759 | " resource fork for %s\n", name); | |
3760 | return (error); | |
3761 | } | |
3762 | } else { | |
3763 | struct cat_fork rsrcfork; | |
3764 | ||
3765 | /* Lock catalog b-tree */ | |
3766 | error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_SHARED, p); | |
3767 | if (error) | |
3768 | return (error); | |
3769 | ||
3770 | /* Get resource fork data */ | |
3771 | error = cat_lookup(hfsmp, &cp->c_desc, 1, (struct cat_desc *)0, | |
3772 | (struct cat_attr *)0, &rsrcfork); | |
3773 | ||
3774 | /* Unlock the Catalog */ | |
3775 | (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); | |
3776 | if (error) | |
3777 | return (error); | |
3778 | ||
3779 | error = hfs_getnewvnode(hfsmp, cp, &cp->c_desc, 1, &cp->c_attr, | |
3780 | &rsrcfork, &rvp); | |
3781 | if (error) | |
3782 | return (error); | |
3783 | } | |
3784 | ||
3785 | *rvpp = rvp; | |
3786 | return (0); | |
3787 | } | |
3788 | ||
3789 | ||
3790 | static void | |
3791 | filt_hfsdetach(struct knote *kn) | |
3792 | { | |
3793 | struct vnode *vp; | |
3794 | int result; | |
3795 | struct proc *p = current_proc(); | |
3796 | ||
3797 | vp = (struct vnode *)kn->kn_hook; | |
3798 | if (1) { /* ! KNDETACH_VNLOCKED */ | |
3799 | result = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); | |
3800 | if (result) return; | |
3801 | }; | |
3802 | ||
3803 | result = KNOTE_DETACH(&VTOC(vp)->c_knotes, kn); | |
3804 | ||
3805 | if (1) { /* ! KNDETACH_VNLOCKED */ | |
3806 | VOP_UNLOCK(vp, 0, p); | |
3807 | }; | |
3808 | } | |
3809 | ||
3810 | /*ARGSUSED*/ | |
3811 | static int | |
3812 | filt_hfsread(struct knote *kn, long hint) | |
3813 | { | |
3814 | struct vnode *vp = (struct vnode *)kn->kn_fp->f_data; | |
3815 | ||
3816 | if (hint == NOTE_REVOKE) { | |
3817 | /* | |
3818 | * filesystem is gone, so set the EOF flag and schedule | |
3819 | * the knote for deletion. | |
3820 | */ | |
3821 | kn->kn_flags |= (EV_EOF | EV_ONESHOT); | |
3822 | return (1); | |
3823 | } | |
3824 | ||
3825 | kn->kn_data = VTOF(vp)->ff_size - kn->kn_fp->f_offset; | |
3826 | return (kn->kn_data != 0); | |
3827 | } | |
3828 | ||
3829 | /*ARGSUSED*/ | |
3830 | static int | |
3831 | filt_hfswrite(struct knote *kn, long hint) | |
3832 | { | |
3833 | if (hint == NOTE_REVOKE) { | |
3834 | /* | |
3835 | * filesystem is gone, so set the EOF flag and schedule | |
3836 | * the knote for deletion. | |
3837 | */ | |
3838 | kn->kn_flags |= (EV_EOF | EV_ONESHOT); | |
3839 | } | |
3840 | ||
3841 | kn->kn_data = 0; | |
3842 | return (1); | |
3843 | } | |
3844 | ||
3845 | static int | |
3846 | filt_hfsvnode(struct knote *kn, long hint) | |
3847 | { | |
3848 | ||
3849 | if (kn->kn_sfflags & hint) | |
3850 | kn->kn_fflags |= hint; | |
3851 | if (hint == NOTE_REVOKE) { | |
3852 | kn->kn_flags |= EV_EOF; | |
3853 | return (1); | |
3854 | } | |
3855 | return (kn->kn_fflags != 0); | |
3856 | } | |
3857 | ||
3858 | static struct filterops hfsread_filtops = | |
3859 | { 1, NULL, filt_hfsdetach, filt_hfsread }; | |
3860 | static struct filterops hfswrite_filtops = | |
3861 | { 1, NULL, filt_hfsdetach, filt_hfswrite }; | |
3862 | static struct filterops hfsvnode_filtops = | |
3863 | { 1, NULL, filt_hfsdetach, filt_hfsvnode }; | |
3864 | ||
3865 | /* | |
3866 | # | |
3867 | #% kqfilt_add vp L L L | |
3868 | # | |
3869 | vop_kqfilt_add | |
3870 | IN struct vnode *vp; | |
3871 | IN struct knote *kn; | |
3872 | IN struct proc *p; | |
3873 | */ | |
3874 | static int | |
3875 | hfs_kqfilt_add(ap) | |
3876 | struct vop_kqfilt_add_args /* { | |
3877 | struct vnode *a_vp; | |
3878 | struct knote *a_kn; | |
3879 | struct proc *p; | |
3880 | } */ *ap; | |
3881 | { | |
3882 | struct vnode *vp = ap->a_vp; | |
3883 | struct knote *kn = ap->a_kn; | |
3884 | ||
3885 | switch (kn->kn_filter) { | |
3886 | case EVFILT_READ: | |
3887 | if (vp->v_type == VREG) { | |
3888 | kn->kn_fop = &hfsread_filtops; | |
3889 | } else { | |
3890 | return EINVAL; | |
3891 | }; | |
3892 | break; | |
3893 | case EVFILT_WRITE: | |
3894 | if (vp->v_type == VREG) { | |
3895 | kn->kn_fop = &hfswrite_filtops; | |
3896 | } else { | |
3897 | return EINVAL; | |
3898 | }; | |
3899 | break; | |
3900 | case EVFILT_VNODE: | |
3901 | kn->kn_fop = &hfsvnode_filtops; | |
3902 | break; | |
3903 | default: | |
3904 | return (1); | |
3905 | } | |
3906 | ||
3907 | kn->kn_hook = (caddr_t)vp; | |
3908 | ||
3909 | /* simple_lock(&vp->v_pollinfo.vpi_lock); */ | |
3910 | KNOTE_ATTACH(&VTOC(vp)->c_knotes, kn); | |
3911 | /* simple_unlock(&vp->v_pollinfo.vpi_lock); */ | |
3912 | ||
3913 | return (0); | |
3914 | } | |
3915 | ||
3916 | /* | |
3917 | # | |
3918 | #% kqfilt_remove vp L L L | |
3919 | # | |
3920 | vop_kqfilt_remove | |
3921 | IN struct vnode *vp; | |
3922 | IN uintptr_t ident; | |
3923 | IN struct proc *p; | |
3924 | */ | |
3925 | static int | |
3926 | hfs_kqfilt_remove(ap) | |
3927 | struct vop_kqfilt_remove_args /* { | |
3928 | struct vnode *a_vp; | |
3929 | uintptr_t ident; | |
3930 | struct proc *p; | |
3931 | } */ *ap; | |
3932 | { | |
3933 | struct vnode *vp = ap->a_vp; | |
3934 | uintptr_t ident = ap->a_ident; | |
3935 | int result; | |
3936 | ||
3937 | result = ENOTSUP; /* XXX */ | |
3938 | ||
3939 | return (result); | |
3940 | } | |
3941 | ||
3942 | /* | |
3943 | * Wrapper for special device reads | |
3944 | */ | |
3945 | static int | |
3946 | hfsspec_read(ap) | |
3947 | struct vop_read_args /* { | |
3948 | struct vnode *a_vp; | |
3949 | struct uio *a_uio; | |
3950 | int a_ioflag; | |
3951 | struct ucred *a_cred; | |
3952 | } */ *ap; | |
3953 | { | |
3954 | /* | |
3955 | * Set access flag. | |
3956 | */ | |
3957 | VTOC(ap->a_vp)->c_flag |= C_ACCESS; | |
3958 | return (VOCALL (spec_vnodeop_p, VOFFSET(vop_read), ap)); | |
3959 | } | |
3960 | ||
3961 | /* | |
3962 | * Wrapper for special device writes | |
3963 | */ | |
3964 | static int | |
3965 | hfsspec_write(ap) | |
3966 | struct vop_write_args /* { | |
3967 | struct vnode *a_vp; | |
3968 | struct uio *a_uio; | |
3969 | int a_ioflag; | |
3970 | struct ucred *a_cred; | |
3971 | } */ *ap; | |
3972 | { | |
3973 | /* | |
3974 | * Set update and change flags. | |
3975 | */ | |
3976 | VTOC(ap->a_vp)->c_flag |= C_CHANGE | C_UPDATE; | |
3977 | return (VOCALL (spec_vnodeop_p, VOFFSET(vop_write), ap)); | |
3978 | } | |
3979 | ||
3980 | /* | |
3981 | * Wrapper for special device close | |
3982 | * | |
3983 | * Update the times on the cnode then do device close. | |
3984 | */ | |
3985 | static int | |
3986 | hfsspec_close(ap) | |
3987 | struct vop_close_args /* { | |
3988 | struct vnode *a_vp; | |
3989 | int a_fflag; | |
3990 | struct ucred *a_cred; | |
3991 | struct proc *a_p; | |
3992 | } */ *ap; | |
3993 | { | |
3994 | struct vnode *vp = ap->a_vp; | |
3995 | struct cnode *cp = VTOC(vp); | |
3996 | ||
3997 | simple_lock(&vp->v_interlock); | |
3998 | if (ap->a_vp->v_usecount > 1) | |
3999 | CTIMES(cp, &time, &time); | |
4000 | simple_unlock(&vp->v_interlock); | |
4001 | return (VOCALL (spec_vnodeop_p, VOFFSET(vop_close), ap)); | |
4002 | } | |
4003 | ||
4004 | #if FIFO | |
4005 | /* | |
4006 | * Wrapper for fifo reads | |
4007 | */ | |
4008 | static int | |
4009 | hfsfifo_read(ap) | |
4010 | struct vop_read_args /* { | |
4011 | struct vnode *a_vp; | |
4012 | struct uio *a_uio; | |
4013 | int a_ioflag; | |
4014 | struct ucred *a_cred; | |
4015 | } */ *ap; | |
4016 | { | |
4017 | extern int (**fifo_vnodeop_p)(void *); | |
4018 | ||
4019 | /* | |
4020 | * Set access flag. | |
4021 | */ | |
4022 | VTOC(ap->a_vp)->c_flag |= C_ACCESS; | |
4023 | return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_read), ap)); | |
4024 | } | |
4025 | ||
4026 | /* | |
4027 | * Wrapper for fifo writes | |
4028 | */ | |
4029 | static int | |
4030 | hfsfifo_write(ap) | |
4031 | struct vop_write_args /* { | |
4032 | struct vnode *a_vp; | |
4033 | struct uio *a_uio; | |
4034 | int a_ioflag; | |
4035 | struct ucred *a_cred; | |
4036 | } */ *ap; | |
4037 | { | |
4038 | extern int (**fifo_vnodeop_p)(void *); | |
4039 | ||
4040 | /* | |
4041 | * Set update and change flags. | |
4042 | */ | |
4043 | VTOC(ap->a_vp)->c_flag |= C_CHANGE | C_UPDATE; | |
4044 | return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_write), ap)); | |
4045 | } | |
4046 | ||
4047 | /* | |
4048 | * Wrapper for fifo close | |
4049 | * | |
4050 | * Update the times on the cnode then do device close. | |
4051 | */ | |
4052 | static int | |
4053 | hfsfifo_close(ap) | |
4054 | struct vop_close_args /* { | |
4055 | struct vnode *a_vp; | |
4056 | int a_fflag; | |
4057 | struct ucred *a_cred; | |
4058 | struct proc *a_p; | |
4059 | } */ *ap; | |
4060 | { | |
4061 | extern int (**fifo_vnodeop_p)(void *); | |
4062 | struct vnode *vp = ap->a_vp; | |
4063 | struct cnode *cp = VTOC(vp); | |
4064 | ||
4065 | simple_lock(&vp->v_interlock); | |
4066 | if (ap->a_vp->v_usecount > 1) | |
4067 | CTIMES(cp, &time, &time); | |
4068 | simple_unlock(&vp->v_interlock); | |
4069 | return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_close), ap)); | |
4070 | } | |
4071 | ||
4072 | /* | |
4073 | * kqfilt_add wrapper for fifos. | |
4074 | * | |
4075 | * Fall through to hfs kqfilt_add routines if needed | |
4076 | */ | |
4077 | int | |
4078 | hfsfifo_kqfilt_add(ap) | |
4079 | struct vop_kqfilt_add_args *ap; | |
4080 | { | |
4081 | extern int (**fifo_vnodeop_p)(void *); | |
4082 | int error; | |
4083 | ||
4084 | error = VOCALL(fifo_vnodeop_p, VOFFSET(vop_kqfilt_add), ap); | |
4085 | if (error) | |
4086 | error = hfs_kqfilt_add(ap); | |
4087 | return (error); | |
4088 | } | |
4089 | ||
4090 | /* | |
4091 | * kqfilt_remove wrapper for fifos. | |
4092 | * | |
4093 | * Fall through to hfs kqfilt_remove routines if needed | |
4094 | */ | |
4095 | int | |
4096 | hfsfifo_kqfilt_remove(ap) | |
4097 | struct vop_kqfilt_remove_args *ap; | |
4098 | { | |
4099 | extern int (**fifo_vnodeop_p)(void *); | |
4100 | int error; | |
4101 | ||
4102 | error = VOCALL(fifo_vnodeop_p, VOFFSET(vop_kqfilt_remove), ap); | |
4103 | if (error) | |
4104 | error = hfs_kqfilt_remove(ap); | |
4105 | return (error); | |
4106 | } | |
4107 | ||
4108 | #endif /* FIFO */ | |
4109 | ||
4110 | ||
4111 | /***************************************************************************** | |
4112 | * | |
4113 | * VOP Tables | |
4114 | * | |
4115 | *****************************************************************************/ | |
4116 | int hfs_cache_lookup(); /* in hfs_lookup.c */ | |
4117 | int hfs_lookup(); /* in hfs_lookup.c */ | |
4118 | int hfs_read(); /* in hfs_readwrite.c */ | |
4119 | int hfs_write(); /* in hfs_readwrite.c */ | |
4120 | int hfs_ioctl(); /* in hfs_readwrite.c */ | |
4121 | int hfs_select(); /* in hfs_readwrite.c */ | |
4122 | int hfs_bmap(); /* in hfs_readwrite.c */ | |
4123 | int hfs_strategy(); /* in hfs_readwrite.c */ | |
4124 | int hfs_truncate(); /* in hfs_readwrite.c */ | |
4125 | int hfs_allocate(); /* in hfs_readwrite.c */ | |
4126 | int hfs_pagein(); /* in hfs_readwrite.c */ | |
4127 | int hfs_pageout(); /* in hfs_readwrite.c */ | |
4128 | int hfs_search(); /* in hfs_search.c */ | |
4129 | int hfs_bwrite(); /* in hfs_readwrite.c */ | |
4130 | int hfs_link(); /* in hfs_link.c */ | |
4131 | int hfs_blktooff(); /* in hfs_readwrite.c */ | |
4132 | int hfs_offtoblk(); /* in hfs_readwrite.c */ | |
4133 | int hfs_cmap(); /* in hfs_readwrite.c */ | |
4134 | int hfs_getattrlist(); /* in hfs_attrlist.c */ | |
4135 | int hfs_setattrlist(); /* in hfs_attrlist.c */ | |
4136 | int hfs_readdirattr(); /* in hfs_attrlist.c */ | |
4137 | int hfs_inactive(); /* in hfs_cnode.c */ | |
4138 | int hfs_reclaim(); /* in hfs_cnode.c */ | |
4139 | ||
4140 | int (**hfs_vnodeop_p)(void *); | |
4141 | ||
4142 | #define VOPFUNC int (*)(void *) | |
4143 | ||
4144 | struct vnodeopv_entry_desc hfs_vnodeop_entries[] = { | |
4145 | { &vop_default_desc, (VOPFUNC)vn_default_error }, | |
4146 | { &vop_lookup_desc, (VOPFUNC)hfs_cache_lookup }, /* lookup */ | |
4147 | { &vop_create_desc, (VOPFUNC)hfs_create }, /* create */ | |
4148 | { &vop_mknod_desc, (VOPFUNC)hfs_mknod }, /* mknod */ | |
4149 | { &vop_open_desc, (VOPFUNC)hfs_open }, /* open */ | |
4150 | { &vop_close_desc, (VOPFUNC)hfs_close }, /* close */ | |
4151 | { &vop_access_desc, (VOPFUNC)hfs_access }, /* access */ | |
4152 | { &vop_getattr_desc, (VOPFUNC)hfs_getattr }, /* getattr */ | |
4153 | { &vop_setattr_desc, (VOPFUNC)hfs_setattr }, /* setattr */ | |
4154 | { &vop_read_desc, (VOPFUNC)hfs_read }, /* read */ | |
4155 | { &vop_write_desc, (VOPFUNC)hfs_write }, /* write */ | |
4156 | { &vop_ioctl_desc, (VOPFUNC)hfs_ioctl }, /* ioctl */ | |
4157 | { &vop_select_desc, (VOPFUNC)hfs_select }, /* select */ | |
4158 | { &vop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */ | |
4159 | { &vop_exchange_desc, (VOPFUNC)hfs_exchange }, /* exchange */ | |
4160 | { &vop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */ | |
4161 | { &vop_fsync_desc, (VOPFUNC)hfs_fsync }, /* fsync */ | |
4162 | { &vop_seek_desc, (VOPFUNC)nop_seek }, /* seek */ | |
4163 | { &vop_remove_desc, (VOPFUNC)hfs_remove }, /* remove */ | |
4164 | { &vop_link_desc, (VOPFUNC)hfs_link }, /* link */ | |
4165 | { &vop_rename_desc, (VOPFUNC)hfs_rename }, /* rename */ | |
4166 | { &vop_mkdir_desc, (VOPFUNC)hfs_mkdir }, /* mkdir */ | |
4167 | { &vop_rmdir_desc, (VOPFUNC)hfs_rmdir }, /* rmdir */ | |
4168 | { &vop_mkcomplex_desc, (VOPFUNC)err_mkcomplex }, /* mkcomplex */ | |
4169 | { &vop_getattrlist_desc, (VOPFUNC)hfs_getattrlist }, /* getattrlist */ | |
4170 | { &vop_setattrlist_desc, (VOPFUNC)hfs_setattrlist }, /* setattrlist */ | |
4171 | { &vop_symlink_desc, (VOPFUNC)hfs_symlink }, /* symlink */ | |
4172 | { &vop_readdir_desc, (VOPFUNC)hfs_readdir }, /* readdir */ | |
4173 | { &vop_readdirattr_desc, (VOPFUNC)hfs_readdirattr }, /* readdirattr */ | |
4174 | { &vop_readlink_desc, (VOPFUNC)hfs_readlink }, /* readlink */ | |
4175 | { &vop_abortop_desc, (VOPFUNC)nop_abortop }, /* abortop */ | |
4176 | { &vop_inactive_desc, (VOPFUNC)hfs_inactive }, /* inactive */ | |
4177 | { &vop_reclaim_desc, (VOPFUNC)hfs_reclaim }, /* reclaim */ | |
4178 | { &vop_lock_desc, (VOPFUNC)hfs_lock }, /* lock */ | |
4179 | { &vop_unlock_desc, (VOPFUNC)hfs_unlock }, /* unlock */ | |
4180 | { &vop_bmap_desc, (VOPFUNC)hfs_bmap }, /* bmap */ | |
4181 | { &vop_strategy_desc, (VOPFUNC)hfs_strategy }, /* strategy */ | |
4182 | { &vop_print_desc, (VOPFUNC)hfs_print }, /* print */ | |
4183 | { &vop_islocked_desc, (VOPFUNC)hfs_islocked }, /* islocked */ | |
4184 | { &vop_pathconf_desc, (VOPFUNC)hfs_pathconf }, /* pathconf */ | |
4185 | { &vop_advlock_desc, (VOPFUNC)hfs_advlock }, /* advlock */ | |
4186 | { &vop_reallocblks_desc, (VOPFUNC)err_reallocblks }, /* reallocblks */ | |
4187 | { &vop_truncate_desc, (VOPFUNC)hfs_truncate }, /* truncate */ | |
4188 | { &vop_allocate_desc, (VOPFUNC)hfs_allocate }, /* allocate */ | |
4189 | { &vop_update_desc, (VOPFUNC)hfs_update }, /* update */ | |
4190 | { &vop_searchfs_desc, (VOPFUNC)hfs_search }, /* search fs */ | |
4191 | { &vop_bwrite_desc, (VOPFUNC)hfs_bwrite }, /* bwrite */ | |
4192 | { &vop_pagein_desc, (VOPFUNC)hfs_pagein }, /* pagein */ | |
4193 | { &vop_pageout_desc,(VOPFUNC) hfs_pageout }, /* pageout */ | |
4194 | { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */ | |
4195 | { &vop_blktooff_desc, (VOPFUNC)hfs_blktooff }, /* blktooff */ | |
4196 | { &vop_offtoblk_desc, (VOPFUNC)hfs_offtoblk }, /* offtoblk */ | |
4197 | { &vop_cmap_desc, (VOPFUNC)hfs_cmap }, /* cmap */ | |
4198 | { &vop_kqfilt_add_desc, (VOPFUNC)hfs_kqfilt_add }, /* kqfilt_add */ | |
4199 | { &vop_kqfilt_remove_desc, (VOPFUNC)hfs_kqfilt_remove }, /* kqfilt_remove */ | |
4200 | { NULL, (VOPFUNC)NULL } | |
4201 | }; | |
4202 | ||
4203 | struct vnodeopv_desc hfs_vnodeop_opv_desc = | |
4204 | { &hfs_vnodeop_p, hfs_vnodeop_entries }; | |
4205 | ||
4206 | int (**hfs_specop_p)(void *); | |
4207 | struct vnodeopv_entry_desc hfs_specop_entries[] = { | |
4208 | { &vop_default_desc, (VOPFUNC)vn_default_error }, | |
4209 | { &vop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */ | |
4210 | { &vop_create_desc, (VOPFUNC)spec_create }, /* create */ | |
4211 | { &vop_mknod_desc, (VOPFUNC)spec_mknod }, /* mknod */ | |
4212 | { &vop_open_desc, (VOPFUNC)spec_open }, /* open */ | |
4213 | { &vop_close_desc, (VOPFUNC)hfsspec_close }, /* close */ | |
4214 | { &vop_access_desc, (VOPFUNC)hfs_access }, /* access */ | |
4215 | { &vop_getattr_desc, (VOPFUNC)hfs_getattr }, /* getattr */ | |
4216 | { &vop_setattr_desc, (VOPFUNC)hfs_setattr }, /* setattr */ | |
4217 | { &vop_read_desc, (VOPFUNC)hfsspec_read }, /* read */ | |
4218 | { &vop_write_desc, (VOPFUNC)hfsspec_write }, /* write */ | |
4219 | { &vop_lease_desc, (VOPFUNC)spec_lease_check }, /* lease */ | |
4220 | { &vop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */ | |
4221 | { &vop_select_desc, (VOPFUNC)spec_select }, /* select */ | |
4222 | { &vop_revoke_desc, (VOPFUNC)spec_revoke }, /* revoke */ | |
4223 | { &vop_mmap_desc, (VOPFUNC)spec_mmap }, /* mmap */ | |
4224 | { &vop_fsync_desc, (VOPFUNC)hfs_fsync }, /* fsync */ | |
4225 | { &vop_seek_desc, (VOPFUNC)spec_seek }, /* seek */ | |
4226 | { &vop_remove_desc, (VOPFUNC)spec_remove }, /* remove */ | |
4227 | { &vop_link_desc, (VOPFUNC)spec_link }, /* link */ | |
4228 | { &vop_rename_desc, (VOPFUNC)spec_rename }, /* rename */ | |
4229 | { &vop_mkdir_desc, (VOPFUNC)spec_mkdir }, /* mkdir */ | |
4230 | { &vop_rmdir_desc, (VOPFUNC)spec_rmdir }, /* rmdir */ | |
4231 | { &vop_getattrlist_desc, (VOPFUNC)hfs_getattrlist }, | |
4232 | { &vop_symlink_desc, (VOPFUNC)spec_symlink }, /* symlink */ | |
4233 | { &vop_readdir_desc, (VOPFUNC)spec_readdir }, /* readdir */ | |
4234 | { &vop_readlink_desc, (VOPFUNC)spec_readlink }, /* readlink */ | |
4235 | { &vop_abortop_desc, (VOPFUNC)spec_abortop }, /* abortop */ | |
4236 | { &vop_inactive_desc, (VOPFUNC)hfs_inactive }, /* inactive */ | |
4237 | { &vop_reclaim_desc, (VOPFUNC)hfs_reclaim }, /* reclaim */ | |
4238 | { &vop_lock_desc, (VOPFUNC)hfs_lock }, /* lock */ | |
4239 | { &vop_unlock_desc, (VOPFUNC)hfs_unlock }, /* unlock */ | |
4240 | { &vop_bmap_desc, (VOPFUNC)spec_bmap }, /* bmap */ | |
4241 | { &vop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */ | |
4242 | { &vop_print_desc, (VOPFUNC)hfs_print }, /* print */ | |
4243 | { &vop_islocked_desc, (VOPFUNC)hfs_islocked }, /* islocked */ | |
4244 | { &vop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */ | |
4245 | { &vop_advlock_desc, (VOPFUNC)spec_advlock }, /* advlock */ | |
4246 | { &vop_blkatoff_desc, (VOPFUNC)spec_blkatoff }, /* blkatoff */ | |
4247 | { &vop_valloc_desc, (VOPFUNC)spec_valloc }, /* valloc */ | |
4248 | { &vop_reallocblks_desc, (VOPFUNC)spec_reallocblks }, /* reallocblks */ | |
4249 | { &vop_vfree_desc, (VOPFUNC)err_vfree }, /* vfree */ | |
4250 | { &vop_truncate_desc, (VOPFUNC)spec_truncate }, /* truncate */ | |
4251 | { &vop_update_desc, (VOPFUNC)hfs_update }, /* update */ | |
4252 | { &vop_bwrite_desc, (VOPFUNC)hfs_bwrite }, | |
4253 | { &vop_devblocksize_desc, (VOPFUNC)spec_devblocksize }, /* devblocksize */ | |
4254 | { &vop_pagein_desc, (VOPFUNC)hfs_pagein }, /* Pagein */ | |
4255 | { &vop_pageout_desc, (VOPFUNC)hfs_pageout }, /* Pageout */ | |
4256 | { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */ | |
4257 | { &vop_blktooff_desc, (VOPFUNC)hfs_blktooff }, /* blktooff */ | |
4258 | { &vop_offtoblk_desc, (VOPFUNC)hfs_offtoblk }, /* offtoblk */ | |
4259 | { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL } | |
4260 | }; | |
4261 | struct vnodeopv_desc hfs_specop_opv_desc = | |
4262 | { &hfs_specop_p, hfs_specop_entries }; | |
4263 | ||
4264 | #if FIFO | |
4265 | int (**hfs_fifoop_p)(void *); | |
4266 | struct vnodeopv_entry_desc hfs_fifoop_entries[] = { | |
4267 | { &vop_default_desc, (VOPFUNC)vn_default_error }, | |
4268 | { &vop_lookup_desc, (VOPFUNC)fifo_lookup }, /* lookup */ | |
4269 | { &vop_create_desc, (VOPFUNC)fifo_create }, /* create */ | |
4270 | { &vop_mknod_desc, (VOPFUNC)fifo_mknod }, /* mknod */ | |
4271 | { &vop_open_desc, (VOPFUNC)fifo_open }, /* open */ | |
4272 | { &vop_close_desc, (VOPFUNC)hfsfifo_close }, /* close */ | |
4273 | { &vop_access_desc, (VOPFUNC)hfs_access }, /* access */ | |
4274 | { &vop_getattr_desc, (VOPFUNC)hfs_getattr }, /* getattr */ | |
4275 | { &vop_setattr_desc, (VOPFUNC)hfs_setattr }, /* setattr */ | |
4276 | { &vop_read_desc, (VOPFUNC)hfsfifo_read }, /* read */ | |
4277 | { &vop_write_desc, (VOPFUNC)hfsfifo_write }, /* write */ | |
4278 | { &vop_lease_desc, (VOPFUNC)fifo_lease_check }, /* lease */ | |
4279 | { &vop_ioctl_desc, (VOPFUNC)fifo_ioctl }, /* ioctl */ | |
4280 | { &vop_select_desc, (VOPFUNC)fifo_select }, /* select */ | |
4281 | { &vop_revoke_desc, (VOPFUNC)fifo_revoke }, /* revoke */ | |
4282 | { &vop_mmap_desc, (VOPFUNC)fifo_mmap }, /* mmap */ | |
4283 | { &vop_fsync_desc, (VOPFUNC)hfs_fsync }, /* fsync */ | |
4284 | { &vop_seek_desc, (VOPFUNC)fifo_seek }, /* seek */ | |
4285 | { &vop_remove_desc, (VOPFUNC)fifo_remove }, /* remove */ | |
4286 | { &vop_link_desc, (VOPFUNC)fifo_link }, /* link */ | |
4287 | { &vop_rename_desc, (VOPFUNC)fifo_rename }, /* rename */ | |
4288 | { &vop_mkdir_desc, (VOPFUNC)fifo_mkdir }, /* mkdir */ | |
4289 | { &vop_rmdir_desc, (VOPFUNC)fifo_rmdir }, /* rmdir */ | |
4290 | { &vop_getattrlist_desc, (VOPFUNC)hfs_getattrlist }, | |
4291 | { &vop_symlink_desc, (VOPFUNC)fifo_symlink }, /* symlink */ | |
4292 | { &vop_readdir_desc, (VOPFUNC)fifo_readdir }, /* readdir */ | |
4293 | { &vop_readlink_desc, (VOPFUNC)fifo_readlink }, /* readlink */ | |
4294 | { &vop_abortop_desc, (VOPFUNC)fifo_abortop }, /* abortop */ | |
4295 | { &vop_inactive_desc, (VOPFUNC)hfs_inactive }, /* inactive */ | |
4296 | { &vop_reclaim_desc, (VOPFUNC)hfs_reclaim }, /* reclaim */ | |
4297 | { &vop_lock_desc, (VOPFUNC)hfs_lock }, /* lock */ | |
4298 | { &vop_unlock_desc, (VOPFUNC)hfs_unlock }, /* unlock */ | |
4299 | { &vop_bmap_desc, (VOPFUNC)fifo_bmap }, /* bmap */ | |
4300 | { &vop_strategy_desc, (VOPFUNC)fifo_strategy }, /* strategy */ | |
4301 | { &vop_print_desc, (VOPFUNC)hfs_print }, /* print */ | |
4302 | { &vop_islocked_desc, (VOPFUNC)hfs_islocked }, /* islocked */ | |
4303 | { &vop_pathconf_desc, (VOPFUNC)fifo_pathconf }, /* pathconf */ | |
4304 | { &vop_advlock_desc, (VOPFUNC)fifo_advlock }, /* advlock */ | |
4305 | { &vop_blkatoff_desc, (VOPFUNC)fifo_blkatoff }, /* blkatoff */ | |
4306 | { &vop_valloc_desc, (VOPFUNC)fifo_valloc }, /* valloc */ | |
4307 | { &vop_reallocblks_desc, (VOPFUNC)fifo_reallocblks }, /* reallocblks */ | |
4308 | { &vop_vfree_desc, (VOPFUNC)err_vfree }, /* vfree */ | |
4309 | { &vop_truncate_desc, (VOPFUNC)fifo_truncate }, /* truncate */ | |
4310 | { &vop_update_desc, (VOPFUNC)hfs_update }, /* update */ | |
4311 | { &vop_bwrite_desc, (VOPFUNC)hfs_bwrite }, | |
4312 | { &vop_pagein_desc, (VOPFUNC)hfs_pagein }, /* Pagein */ | |
4313 | { &vop_pageout_desc, (VOPFUNC)hfs_pageout }, /* Pageout */ | |
4314 | { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */ | |
4315 | { &vop_blktooff_desc, (VOPFUNC)hfs_blktooff }, /* blktooff */ | |
4316 | { &vop_offtoblk_desc, (VOPFUNC)hfs_offtoblk }, /* offtoblk */ | |
4317 | { &vop_cmap_desc, (VOPFUNC)hfs_cmap }, /* cmap */ | |
4318 | { &vop_kqfilt_add_desc, (VOPFUNC)hfsfifo_kqfilt_add }, /* kqfilt_add */ | |
4319 | { &vop_kqfilt_remove_desc, (VOPFUNC)hfsfifo_kqfilt_remove }, /* kqfilt_remove */ | |
4320 | { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL } | |
4321 | }; | |
4322 | struct vnodeopv_desc hfs_fifoop_opv_desc = | |
4323 | { &hfs_fifoop_p, hfs_fifoop_entries }; | |
4324 | #endif /* FIFO */ | |
4325 | ||
4326 | ||
4327 |