]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_cnode.c
8351989ed54824ed1cb75340f768dd6a1146ddcd
[apple/xnu.git] / bsd / hfs / hfs_cnode.c
1 /*
2 * Copyright (c) 2002-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/proc.h>
25 #include <sys/vnode.h>
26 #include <sys/mount.h>
27 #include <sys/kernel.h>
28 #include <sys/malloc.h>
29 #include <sys/time.h>
30 #include <sys/ubc.h>
31 #include <sys/quota.h>
32 #include <sys/kdebug.h>
33
34 #include <kern/locks.h>
35
36 #include <miscfs/specfs/specdev.h>
37 #include <miscfs/fifofs/fifo.h>
38
39 #include <hfs/hfs.h>
40 #include <hfs/hfs_catalog.h>
41 #include <hfs/hfs_cnode.h>
42 #include <hfs/hfs_quota.h>
43
44 extern int prtactive;
45
46 extern lck_attr_t * hfs_lock_attr;
47 extern lck_grp_t * hfs_mutex_group;
48 extern lck_grp_t * hfs_rwlock_group;
49
50 static int hfs_filedone(struct vnode *vp, vfs_context_t context);
51
52 static void hfs_reclaim_cnode(struct cnode *);
53
54 static int hfs_valid_cnode(struct hfsmount *, struct vnode *, struct componentname *, cnid_t);
55
56 static int hfs_isordered(struct cnode *, struct cnode *);
57
58 int hfs_vnop_inactive(struct vnop_inactive_args *);
59
60 int hfs_vnop_reclaim(struct vnop_reclaim_args *);
61
62
63 /*
64 * Last reference to an cnode. If necessary, write or delete it.
65 */
66 __private_extern__
67 int
68 hfs_vnop_inactive(struct vnop_inactive_args *ap)
69 {
70 struct vnode *vp = ap->a_vp;
71 struct cnode *cp;
72 struct hfsmount *hfsmp = VTOHFS(vp);
73 struct proc *p = vfs_context_proc(ap->a_context);
74 int error = 0;
75 int recycle = 0;
76 int forkcount = 0;
77 int truncated = 0;
78 int started_tr = 0;
79 int took_trunc_lock = 0;
80 cat_cookie_t cookie;
81 int cat_reserve = 0;
82 int lockflags;
83 enum vtype v_type;
84
85 v_type = vnode_vtype(vp);
86 cp = VTOC(vp);
87
88 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || vnode_issystem(vp)) {
89 return (0);
90 }
91 /*
92 * Ignore nodes related to stale file handles.
93 */
94 if (cp->c_mode == 0) {
95 vnode_recycle(vp);
96 return (0);
97 }
98
99 if ((v_type == VREG) &&
100 (ISSET(cp->c_flag, C_DELETED) || VTOF(vp)->ff_blocks)) {
101 hfs_lock_truncate(cp, TRUE);
102 took_trunc_lock = 1;
103 }
104
105 /*
106 * We do the ubc_setsize before we take the cnode
107 * lock and before the hfs_truncate (since we'll
108 * be inside a transaction).
109 */
110 if ((v_type == VREG || v_type == VLNK) &&
111 (cp->c_flag & C_DELETED) &&
112 (VTOF(vp)->ff_blocks != 0)) {
113 ubc_setsize(vp, 0);
114 }
115
116 (void) hfs_lock(cp, HFS_FORCE_LOCK);
117
118 if (v_type == VREG && !ISSET(cp->c_flag, C_DELETED) && VTOF(vp)->ff_blocks) {
119 hfs_filedone(vp, ap->a_context);
120 }
121 /*
122 * Remove any directory hints
123 */
124 if (v_type == VDIR)
125 hfs_reldirhints(cp, 0);
126
127 if (cp->c_datafork)
128 ++forkcount;
129 if (cp->c_rsrcfork)
130 ++forkcount;
131
132 /* If needed, get rid of any fork's data for a deleted file */
133 if ((v_type == VREG || v_type == VLNK) && (cp->c_flag & C_DELETED)) {
134 if (VTOF(vp)->ff_blocks != 0) {
135 // start the transaction out here so that
136 // the truncate and the removal of the file
137 // are all in one transaction. otherwise
138 // because this cnode is marked for deletion
139 // the truncate won't cause the catalog entry
140 // to get updated which means that we could
141 // free blocks but still keep a reference to
142 // them in the catalog entry and then double
143 // free them later.
144 //
145 if (hfs_start_transaction(hfsmp) != 0) {
146 error = EINVAL;
147 goto out;
148 }
149 started_tr = 1;
150
151 /*
152 * Since we're already inside a transaction,
153 * tell hfs_truncate to skip the ubc_setsize.
154 */
155 error = hfs_truncate(vp, (off_t)0, IO_NDELAY, 1, ap->a_context);
156 if (error)
157 goto out;
158 truncated = 1;
159 }
160 recycle = 1;
161 }
162
163 /*
164 * Check for a postponed deletion.
165 * (only delete cnode when the last fork goes inactive)
166 */
167 if ((cp->c_flag & C_DELETED) && (forkcount <= 1)) {
168 /*
169 * Mark cnode in transit so that no one can get this
170 * cnode from cnode hash.
171 */
172 hfs_chash_mark_in_transit(cp);
173
174 cp->c_flag &= ~C_DELETED;
175 cp->c_flag |= C_NOEXISTS; // XXXdbg
176 cp->c_rdev = 0;
177
178 if (started_tr == 0) {
179 if (hfs_start_transaction(hfsmp) != 0) {
180 error = EINVAL;
181 goto out;
182 }
183 started_tr = 1;
184 }
185
186 /*
187 * Reserve some space in the Catalog file.
188 */
189 if ((error = cat_preflight(hfsmp, CAT_DELETE, &cookie, p))) {
190 goto out;
191 }
192 cat_reserve = 1;
193
194 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
195
196 if (cp->c_blocks > 0)
197 printf("hfs_inactive: attempting to delete a non-empty file!");
198
199
200 //
201 // release the name pointer in the descriptor so that
202 // cat_delete() will use the file-id to do the deletion.
203 // in the case of hard links this is imperative (in the
204 // case of regular files the fileid and cnid are the
205 // same so it doesn't matter).
206 //
207 cat_releasedesc(&cp->c_desc);
208
209 /*
210 * The descriptor name may be zero,
211 * in which case the fileid is used.
212 */
213 error = cat_delete(hfsmp, &cp->c_desc, &cp->c_attr);
214
215 if (error && truncated && (error != ENXIO))
216 printf("hfs_inactive: couldn't delete a truncated file!");
217
218 /* Update HFS Private Data dir */
219 if (error == 0) {
220 hfsmp->hfs_privdir_attr.ca_entries--;
221 (void)cat_update(hfsmp, &hfsmp->hfs_privdir_desc,
222 &hfsmp->hfs_privdir_attr, NULL, NULL);
223 }
224
225 if (error == 0) {
226 /* Delete any attributes, ignore errors */
227 (void) hfs_removeallattr(hfsmp, cp->c_fileid);
228 }
229
230 hfs_systemfile_unlock(hfsmp, lockflags);
231
232 if (error)
233 goto out;
234
235 #if QUOTA
236 (void)hfs_chkiq(cp, -1, NOCRED, 0);
237 #endif /* QUOTA */
238
239 cp->c_mode = 0;
240 cp->c_flag |= C_NOEXISTS;
241 cp->c_touch_chgtime = TRUE;
242 cp->c_touch_modtime = TRUE;
243
244 if (error == 0)
245 hfs_volupdate(hfsmp, VOL_RMFILE, 0);
246 }
247
248 if ((cp->c_flag & C_MODIFIED) ||
249 cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime) {
250 hfs_update(vp, 0);
251 }
252 out:
253 if (cat_reserve)
254 cat_postflight(hfsmp, &cookie, p);
255
256 // XXXdbg - have to do this because a goto could have come here
257 if (started_tr) {
258 hfs_end_transaction(hfsmp);
259 started_tr = 0;
260 }
261
262 hfs_unlock(cp);
263
264 if (took_trunc_lock)
265 hfs_unlock_truncate(cp);
266
267 /*
268 * If we are done with the vnode, reclaim it
269 * so that it can be reused immediately.
270 */
271 if (cp->c_mode == 0 || recycle)
272 vnode_recycle(vp);
273
274 return (error);
275 }
276
277 /*
278 * File clean-up (zero fill and shrink peof).
279 */
280 static int
281 hfs_filedone(struct vnode *vp, vfs_context_t context)
282 {
283 struct cnode *cp;
284 struct filefork *fp;
285 struct hfsmount *hfsmp;
286 off_t leof;
287 u_long blks, blocksize;
288
289 cp = VTOC(vp);
290 fp = VTOF(vp);
291 hfsmp = VTOHFS(vp);
292 leof = fp->ff_size;
293
294 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || (fp->ff_blocks == 0))
295 return (0);
296
297 hfs_unlock(cp);
298 (void) cluster_push(vp, IO_CLOSE);
299 hfs_lock(cp, HFS_FORCE_LOCK);
300
301 /*
302 * Explicitly zero out the areas of file
303 * that are currently marked invalid.
304 */
305 while (!CIRCLEQ_EMPTY(&fp->ff_invalidranges)) {
306 struct rl_entry *invalid_range = CIRCLEQ_FIRST(&fp->ff_invalidranges);
307 off_t start = invalid_range->rl_start;
308 off_t end = invalid_range->rl_end;
309
310 /* The range about to be written must be validated
311 * first, so that VNOP_BLOCKMAP() will return the
312 * appropriate mapping for the cluster code:
313 */
314 rl_remove(start, end, &fp->ff_invalidranges);
315
316 hfs_unlock(cp);
317 (void) cluster_write(vp, (struct uio *) 0,
318 leof, end + 1, start, (off_t)0,
319 IO_HEADZEROFILL | IO_NOZERODIRTY | IO_NOCACHE);
320 hfs_lock(cp, HFS_FORCE_LOCK);
321 cp->c_flag |= C_MODIFIED;
322 }
323 cp->c_flag &= ~C_ZFWANTSYNC;
324 cp->c_zftimeout = 0;
325 blocksize = VTOVCB(vp)->blockSize;
326 blks = leof / blocksize;
327 if (((off_t)blks * (off_t)blocksize) != leof)
328 blks++;
329 /*
330 * Shrink the peof to the smallest size neccessary to contain the leof.
331 */
332 if (blks < fp->ff_blocks)
333 (void) hfs_truncate(vp, leof, IO_NDELAY, 0, context);
334 hfs_unlock(cp);
335 (void) cluster_push(vp, IO_CLOSE);
336 hfs_lock(cp, HFS_FORCE_LOCK);
337
338 /*
339 * If the hfs_truncate didn't happen to flush the vnode's
340 * information out to disk, force it to be updated now that
341 * all invalid ranges have been zero-filled and validated:
342 */
343 if (cp->c_flag & C_MODIFIED) {
344 hfs_update(vp, 0);
345 }
346 return (0);
347 }
348
349
350 /*
351 * Reclaim a cnode so that it can be used for other purposes.
352 */
353 __private_extern__
354 int
355 hfs_vnop_reclaim(struct vnop_reclaim_args *ap)
356 {
357 struct vnode *vp = ap->a_vp;
358 struct cnode *cp;
359 struct filefork *fp = NULL;
360 struct filefork *altfp = NULL;
361 int reclaim_cnode = 0;
362
363 (void) hfs_lock(VTOC(vp), HFS_FORCE_LOCK);
364 cp = VTOC(vp);
365
366 /*
367 * Keep track of an inactive hot file.
368 */
369 if (!vnode_isdir(vp) && !vnode_issystem(vp))
370 (void) hfs_addhotfile(vp);
371
372 vnode_removefsref(vp);
373
374 /*
375 * Find file fork for this vnode (if any)
376 * Also check if another fork is active
377 */
378 if (cp->c_vp == vp) {
379 fp = cp->c_datafork;
380 altfp = cp->c_rsrcfork;
381
382 cp->c_datafork = NULL;
383 cp->c_vp = NULL;
384 } else if (cp->c_rsrc_vp == vp) {
385 fp = cp->c_rsrcfork;
386 altfp = cp->c_datafork;
387
388 cp->c_rsrcfork = NULL;
389 cp->c_rsrc_vp = NULL;
390 } else {
391 panic("hfs_vnop_reclaim: vp points to wrong cnode\n");
392 }
393 /*
394 * On the last fork, remove the cnode from its hash chain.
395 */
396 if (altfp == NULL) {
397 /* If we can't remove it then the cnode must persist! */
398 if (hfs_chashremove(cp) == 0)
399 reclaim_cnode = 1;
400 /*
401 * Remove any directory hints
402 */
403 if (vnode_isdir(vp)) {
404 hfs_reldirhints(cp, 0);
405 }
406 }
407 /* Release the file fork and related data */
408 if (fp) {
409 /* Dump cached symlink data */
410 if (vnode_islnk(vp) && (fp->ff_symlinkptr != NULL)) {
411 FREE(fp->ff_symlinkptr, M_TEMP);
412 }
413 FREE_ZONE(fp, sizeof(struct filefork), M_HFSFORK);
414 }
415
416 /*
417 * If there was only one active fork then we can release the cnode.
418 */
419 if (reclaim_cnode) {
420 hfs_chashwakeup(cp, H_ALLOC | H_TRANSIT);
421 hfs_reclaim_cnode(cp);
422 } else /* cnode in use */ {
423 hfs_unlock(cp);
424 }
425
426 vnode_clearfsnode(vp);
427 return (0);
428 }
429
430
431 extern int (**hfs_vnodeop_p) (void *);
432 extern int (**hfs_specop_p) (void *);
433 extern int (**hfs_fifoop_p) (void *);
434
435 /*
436 * hfs_getnewvnode - get new default vnode
437 *
438 * The vnode is returned with an iocount and the cnode locked
439 */
440 __private_extern__
441 int
442 hfs_getnewvnode(
443 struct hfsmount *hfsmp,
444 struct vnode *dvp,
445 struct componentname *cnp,
446 struct cat_desc *descp,
447 int wantrsrc,
448 struct cat_attr *attrp,
449 struct cat_fork *forkp,
450 struct vnode **vpp)
451 {
452 struct mount *mp = HFSTOVFS(hfsmp);
453 struct vnode *vp = NULL;
454 struct vnode **cvpp;
455 struct vnode *tvp = NULLVP;
456 struct cnode *cp = NULL;
457 struct filefork *fp = NULL;
458 int i;
459 int retval;
460 int issystemfile;
461 struct vnode_fsparam vfsp;
462 enum vtype vtype;
463
464 if (attrp->ca_fileid == 0) {
465 *vpp = NULL;
466 return (ENOENT);
467 }
468
469 #if !FIFO
470 if (IFTOVT(attrp->ca_mode) == VFIFO) {
471 *vpp = NULL;
472 return (ENOTSUP);
473 }
474 #endif
475 vtype = IFTOVT(attrp->ca_mode);
476 issystemfile = (descp->cd_flags & CD_ISMETA) && (vtype == VREG);
477
478 /*
479 * Get a cnode (new or existing)
480 * skip getting the cnode lock if we are getting resource fork (wantrsrc == 2)
481 */
482 cp = hfs_chash_getcnode(hfsmp->hfs_raw_dev, attrp->ca_fileid, vpp, wantrsrc, (wantrsrc == 2));
483
484 /* Hardlinks may need an updated catalog descriptor */
485 if ((cp->c_flag & C_HARDLINK) && descp->cd_nameptr && descp->cd_namelen > 0) {
486 replace_desc(cp, descp);
487 }
488 /* Check if we found a matching vnode */
489 if (*vpp != NULL)
490 return (0);
491
492 /*
493 * If this is a new cnode then initialize it.
494 */
495 if (ISSET(cp->c_hflag, H_ALLOC)) {
496 lck_rw_init(&cp->c_truncatelock, hfs_rwlock_group, hfs_lock_attr);
497
498 /* Make sure its still valid (ie exists on disk). */
499 if (!hfs_valid_cnode(hfsmp, dvp, (wantrsrc ? NULL : cnp), cp->c_fileid)) {
500 hfs_chash_abort(cp);
501 hfs_reclaim_cnode(cp);
502 *vpp = NULL;
503 return (ENOENT);
504 }
505 bcopy(attrp, &cp->c_attr, sizeof(struct cat_attr));
506 bcopy(descp, &cp->c_desc, sizeof(struct cat_desc));
507
508 /* The name was inherited so clear descriptor state... */
509 descp->cd_namelen = 0;
510 descp->cd_nameptr = NULL;
511 descp->cd_flags &= ~CD_HASBUF;
512
513 /* Tag hardlinks */
514 if (IFTOVT(cp->c_mode) == VREG &&
515 (descp->cd_cnid != attrp->ca_fileid)) {
516 cp->c_flag |= C_HARDLINK;
517 }
518
519 /* Take one dev reference for each non-directory cnode */
520 if (IFTOVT(cp->c_mode) != VDIR) {
521 cp->c_devvp = hfsmp->hfs_devvp;
522 vnode_ref(cp->c_devvp);
523 }
524 #if QUOTA
525 for (i = 0; i < MAXQUOTAS; i++)
526 cp->c_dquot[i] = NODQUOT;
527 #endif /* QUOTA */
528 }
529
530 if (IFTOVT(cp->c_mode) == VDIR) {
531 if (cp->c_vp != NULL)
532 panic("hfs_getnewvnode: orphaned vnode (data)");
533 cvpp = &cp->c_vp;
534 } else {
535 if (forkp && attrp->ca_blocks < forkp->cf_blocks)
536 panic("hfs_getnewvnode: bad ca_blocks (too small)");
537 /*
538 * Allocate and initialize a file fork...
539 */
540 MALLOC_ZONE(fp, struct filefork *, sizeof(struct filefork),
541 M_HFSFORK, M_WAITOK);
542 fp->ff_cp = cp;
543 if (forkp)
544 bcopy(forkp, &fp->ff_data, sizeof(struct cat_fork));
545 else
546 bzero(&fp->ff_data, sizeof(struct cat_fork));
547 rl_init(&fp->ff_invalidranges);
548 fp->ff_sysfileinfo = 0;
549
550 if (wantrsrc) {
551 if (cp->c_rsrcfork != NULL)
552 panic("hfs_getnewvnode: orphaned rsrc fork");
553 if (cp->c_rsrc_vp != NULL)
554 panic("hfs_getnewvnode: orphaned vnode (rsrc)");
555 cp->c_rsrcfork = fp;
556 cvpp = &cp->c_rsrc_vp;
557 if ( (tvp = cp->c_vp) != NULLVP )
558 cp->c_flag |= C_NEED_DVNODE_PUT;
559 } else {
560 if (cp->c_datafork != NULL)
561 panic("hfs_getnewvnode: orphaned data fork");
562 if (cp->c_vp != NULL)
563 panic("hfs_getnewvnode: orphaned vnode (data)");
564 cp->c_datafork = fp;
565 cvpp = &cp->c_vp;
566 if ( (tvp = cp->c_rsrc_vp) != NULLVP)
567 cp->c_flag |= C_NEED_RVNODE_PUT;
568 }
569 }
570 if (tvp != NULLVP) {
571 /*
572 * grab an iocount on the vnode we weren't
573 * interested in (i.e. we want the resource fork
574 * but the cnode already has the data fork)
575 * to prevent it from being
576 * recycled by us when we call vnode_create
577 * which will result in a deadlock when we
578 * try to take the cnode lock in hfs_vnop_fsync or
579 * hfs_vnop_reclaim... vnode_get can be called here
580 * because we already hold the cnode lock which will
581 * prevent the vnode from changing identity until
582 * we drop it.. vnode_get will not block waiting for
583 * a change of state... however, it will return an
584 * error if the current iocount == 0 and we've already
585 * started to terminate the vnode... we don't need/want to
586 * grab an iocount in the case since we can't cause
587 * the fileystem to be re-entered on this thread for this vp
588 *
589 * the matching vnode_put will happen in hfs_unlock
590 * after we've dropped the cnode lock
591 */
592 if ( vnode_get(tvp) != 0)
593 cp->c_flag &= ~(C_NEED_RVNODE_PUT | C_NEED_DVNODE_PUT);
594 }
595 vfsp.vnfs_mp = mp;
596 vfsp.vnfs_vtype = vtype;
597 vfsp.vnfs_str = "hfs";
598 vfsp.vnfs_dvp = dvp;
599 vfsp.vnfs_fsnode = cp;
600 vfsp.vnfs_cnp = cnp;
601 if (vtype == VFIFO )
602 vfsp.vnfs_vops = hfs_fifoop_p;
603 else if (vtype == VBLK || vtype == VCHR)
604 vfsp.vnfs_vops = hfs_specop_p;
605 else
606 vfsp.vnfs_vops = hfs_vnodeop_p;
607
608 if (vtype == VBLK || vtype == VCHR)
609 vfsp.vnfs_rdev = attrp->ca_rdev;
610 else
611 vfsp.vnfs_rdev = 0;
612
613 if (forkp)
614 vfsp.vnfs_filesize = forkp->cf_size;
615 else
616 vfsp.vnfs_filesize = 0;
617
618 if (dvp && cnp && (cnp->cn_flags & MAKEENTRY))
619 vfsp.vnfs_flags = 0;
620 else
621 vfsp.vnfs_flags = VNFS_NOCACHE;
622
623 /* Tag system files */
624 vfsp.vnfs_marksystem = issystemfile;
625
626 /* Tag root directory */
627 if (descp->cd_cnid == kHFSRootFolderID)
628 vfsp.vnfs_markroot = 1;
629 else
630 vfsp.vnfs_markroot = 0;
631
632 if ((retval = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, cvpp))) {
633 if (fp) {
634 if (fp == cp->c_datafork)
635 cp->c_datafork = NULL;
636 else
637 cp->c_rsrcfork = NULL;
638
639 FREE_ZONE(fp, sizeof(struct filefork), M_HFSFORK);
640 }
641 /*
642 * If this is a newly created cnode or a vnode reclaim
643 * occurred during the attachment, then cleanup the cnode.
644 */
645 if ((cp->c_vp == NULL) && (cp->c_rsrc_vp == NULL)) {
646 hfs_chash_abort(cp);
647 hfs_reclaim_cnode(cp);
648 } else {
649 hfs_chashwakeup(cp, H_ALLOC | H_ATTACH);
650 hfs_unlock(cp);
651 }
652 *vpp = NULL;
653 return (retval);
654 }
655 vp = *cvpp;
656 vnode_addfsref(vp);
657 vnode_settag(vp, VT_HFS);
658 if (cp->c_flag & C_HARDLINK)
659 vnode_set_hard_link(vp);
660 hfs_chashwakeup(cp, H_ALLOC | H_ATTACH);
661
662 /*
663 * Stop tracking an active hot file.
664 */
665 if (!vnode_isdir(vp) && !vnode_issystem(vp))
666 (void) hfs_removehotfile(vp);
667
668 *vpp = vp;
669 return (0);
670 }
671
672
673 static void
674 hfs_reclaim_cnode(struct cnode *cp)
675 {
676 #if QUOTA
677 int i;
678
679 for (i = 0; i < MAXQUOTAS; i++) {
680 if (cp->c_dquot[i] != NODQUOT) {
681 dqreclaim(cp->c_dquot[i]);
682 cp->c_dquot[i] = NODQUOT;
683 }
684 }
685 #endif /* QUOTA */
686
687 if (cp->c_devvp) {
688 struct vnode *tmp_vp = cp->c_devvp;
689
690 cp->c_devvp = NULL;
691 vnode_rele(tmp_vp);
692 }
693
694 /*
695 * If the descriptor has a name then release it
696 */
697 if (cp->c_desc.cd_flags & CD_HASBUF) {
698 char *nameptr;
699
700 nameptr = cp->c_desc.cd_nameptr;
701 cp->c_desc.cd_nameptr = 0;
702 cp->c_desc.cd_flags &= ~CD_HASBUF;
703 cp->c_desc.cd_namelen = 0;
704 vfs_removename(nameptr);
705 }
706
707 lck_rw_destroy(&cp->c_rwlock, hfs_rwlock_group);
708 lck_rw_destroy(&cp->c_truncatelock, hfs_rwlock_group);
709 bzero(cp, sizeof(struct cnode));
710 FREE_ZONE(cp, sizeof(struct cnode), M_HFSNODE);
711 }
712
713
714 static int
715 hfs_valid_cnode(struct hfsmount *hfsmp, struct vnode *dvp, struct componentname *cnp, cnid_t cnid)
716 {
717 struct cat_attr attr;
718 struct cat_desc cndesc;
719 int stillvalid = 0;
720 int lockflags;
721
722 /* System files are always valid */
723 if (cnid < kHFSFirstUserCatalogNodeID)
724 return (1);
725
726 /* XXX optimization: check write count in dvp */
727
728 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
729
730 if (dvp && cnp) {
731 bzero(&cndesc, sizeof(cndesc));
732 cndesc.cd_nameptr = cnp->cn_nameptr;
733 cndesc.cd_namelen = cnp->cn_namelen;
734 cndesc.cd_parentcnid = VTOC(dvp)->c_cnid;
735 cndesc.cd_hint = VTOC(dvp)->c_childhint;
736
737 if ((cat_lookup(hfsmp, &cndesc, 0, NULL, &attr, NULL, NULL) == 0) &&
738 (cnid == attr.ca_fileid)) {
739 stillvalid = 1;
740 }
741 } else {
742 if (cat_idlookup(hfsmp, cnid, NULL, NULL, NULL) == 0) {
743 stillvalid = 1;
744 }
745 }
746 hfs_systemfile_unlock(hfsmp, lockflags);
747
748 return (stillvalid);
749 }
750
751 /*
752 * Touch cnode times based on c_touch_xxx flags
753 *
754 * cnode must be locked exclusive
755 *
756 * This will also update the volume modify time
757 */
758 __private_extern__
759 void
760 hfs_touchtimes(struct hfsmount *hfsmp, struct cnode* cp)
761 {
762 /* HFS Standard doesn't support access times */
763 if (hfsmp->hfs_flags & HFS_STANDARD) {
764 cp->c_touch_acctime = FALSE;
765 }
766
767 if (cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime) {
768 struct timeval tv;
769 int touchvol = 0;
770
771 microtime(&tv);
772
773 if (cp->c_touch_acctime) {
774 cp->c_atime = tv.tv_sec;
775 /*
776 * When the access time is the only thing changing
777 * then make sure its sufficiently newer before
778 * committing it to disk.
779 */
780 if ((((u_int32_t)cp->c_atime - (u_int32_t)(cp)->c_attr.ca_atimeondisk) >
781 ATIME_ONDISK_ACCURACY)) {
782 cp->c_flag |= C_MODIFIED;
783 }
784 cp->c_touch_acctime = FALSE;
785 }
786 if (cp->c_touch_modtime) {
787 cp->c_mtime = tv.tv_sec;
788 cp->c_touch_modtime = FALSE;
789 cp->c_flag |= C_MODIFIED;
790 touchvol = 1;
791 #if 1
792 /*
793 * HFS dates that WE set must be adjusted for DST
794 */
795 if ((hfsmp->hfs_flags & HFS_STANDARD) && gTimeZone.tz_dsttime) {
796 cp->c_mtime += 3600;
797 }
798 #endif
799 }
800 if (cp->c_touch_chgtime) {
801 cp->c_ctime = tv.tv_sec;
802 cp->c_touch_chgtime = FALSE;
803 cp->c_flag |= C_MODIFIED;
804 touchvol = 1;
805 }
806
807 /* Touch the volume modtime if needed */
808 if (touchvol) {
809 HFSTOVCB(hfsmp)->vcbFlags |= 0xFF00;
810 HFSTOVCB(hfsmp)->vcbLsMod = tv.tv_sec;
811 }
812 }
813 }
814
815 /*
816 * Lock a cnode.
817 */
818 __private_extern__
819 int
820 hfs_lock(struct cnode *cp, enum hfslocktype locktype)
821 {
822 void * thread = current_thread();
823
824 /* System files need to keep track of owner */
825 if ((cp->c_fileid < kHFSFirstUserCatalogNodeID) &&
826 (cp->c_fileid > kHFSRootFolderID) &&
827 (locktype != HFS_SHARED_LOCK)) {
828
829 /*
830 * The extents and bitmap file locks support
831 * recursion and are always taken exclusive.
832 */
833 if (cp->c_fileid == kHFSExtentsFileID ||
834 cp->c_fileid == kHFSAllocationFileID) {
835 if (cp->c_lockowner == thread) {
836 cp->c_syslockcount++;
837 } else {
838 lck_rw_lock_exclusive(&cp->c_rwlock);
839 cp->c_lockowner = thread;
840 cp->c_syslockcount = 1;
841 }
842 } else {
843 lck_rw_lock_exclusive(&cp->c_rwlock);
844 cp->c_lockowner = thread;
845 }
846 } else if (locktype == HFS_SHARED_LOCK) {
847 lck_rw_lock_shared(&cp->c_rwlock);
848 cp->c_lockowner = HFS_SHARED_OWNER;
849 } else {
850 lck_rw_lock_exclusive(&cp->c_rwlock);
851 cp->c_lockowner = thread;
852 }
853 /*
854 * Skip cnodes that no longer exist (were deleted).
855 */
856 if ((locktype != HFS_FORCE_LOCK) &&
857 ((cp->c_desc.cd_flags & CD_ISMETA) == 0) &&
858 (cp->c_flag & C_NOEXISTS)) {
859 hfs_unlock(cp);
860 return (ENOENT);
861 }
862 return (0);
863 }
864
865 /*
866 * Lock a pair of cnodes.
867 */
868 __private_extern__
869 int
870 hfs_lockpair(struct cnode *cp1, struct cnode *cp2, enum hfslocktype locktype)
871 {
872 struct cnode *first, *last;
873 int error;
874
875 /*
876 * If cnodes match then just lock one.
877 */
878 if (cp1 == cp2) {
879 return hfs_lock(cp1, locktype);
880 }
881
882 /*
883 * Lock in cnode parent-child order (if there is a relationship);
884 * otherwise lock in cnode address order.
885 */
886 if ((IFTOVT(cp1->c_mode) == VDIR) && (cp1->c_fileid == cp2->c_parentcnid)) {
887 first = cp1;
888 last = cp2;
889 } else if (cp1 < cp2) {
890 first = cp1;
891 last = cp2;
892 } else {
893 first = cp2;
894 last = cp1;
895 }
896
897 if ( (error = hfs_lock(first, locktype))) {
898 return (error);
899 }
900 if ( (error = hfs_lock(last, locktype))) {
901 hfs_unlock(first);
902 return (error);
903 }
904 return (0);
905 }
906
907 /*
908 * Check ordering of two cnodes. Return true if they are are in-order.
909 */
910 static int
911 hfs_isordered(struct cnode *cp1, struct cnode *cp2)
912 {
913 if (cp1 == cp2)
914 return (0);
915 if (cp1 == NULL || cp2 == (struct cnode *)0xffffffff)
916 return (1);
917 if (cp2 == NULL || cp1 == (struct cnode *)0xffffffff)
918 return (0);
919 if (cp1->c_fileid == cp2->c_parentcnid)
920 return (1); /* cp1 is the parent and should go first */
921 if (cp2->c_fileid == cp1->c_parentcnid)
922 return (0); /* cp1 is the child and should go last */
923
924 return (cp1 < cp2); /* fall-back is to use address order */
925 }
926
927 /*
928 * Acquire 4 cnode locks.
929 * - locked in cnode parent-child order (if there is a relationship)
930 * otherwise lock in cnode address order (lesser address first).
931 * - all or none of the locks are taken
932 * - only one lock taken per cnode (dup cnodes are skipped)
933 * - some of the cnode pointers may be null
934 */
935 __private_extern__
936 int
937 hfs_lockfour(struct cnode *cp1, struct cnode *cp2, struct cnode *cp3,
938 struct cnode *cp4, enum hfslocktype locktype)
939 {
940 struct cnode * a[3];
941 struct cnode * b[3];
942 struct cnode * list[4];
943 struct cnode * tmp;
944 int i, j, k;
945 int error;
946
947 if (hfs_isordered(cp1, cp2)) {
948 a[0] = cp1; a[1] = cp2;
949 } else {
950 a[0] = cp2; a[1] = cp1;
951 }
952 if (hfs_isordered(cp3, cp4)) {
953 b[0] = cp3; b[1] = cp4;
954 } else {
955 b[0] = cp4; b[1] = cp3;
956 }
957 a[2] = (struct cnode *)0xffffffff; /* sentinel value */
958 b[2] = (struct cnode *)0xffffffff; /* sentinel value */
959
960 /*
961 * Build the lock list, skipping over duplicates
962 */
963 for (i = 0, j = 0, k = 0; (i < 2 || j < 2); ) {
964 tmp = hfs_isordered(a[i], b[j]) ? a[i++] : b[j++];
965 if (k == 0 || tmp != list[k-1])
966 list[k++] = tmp;
967 }
968
969 /*
970 * Now we can lock using list[0 - k].
971 * Skip over NULL entries.
972 */
973 for (i = 0; i < k; ++i) {
974 if (list[i])
975 if ((error = hfs_lock(list[i], locktype))) {
976 /* Drop any locks we acquired. */
977 while (--i >= 0) {
978 if (list[i])
979 hfs_unlock(list[i]);
980 }
981 return (error);
982 }
983 }
984 return (0);
985 }
986
987
988 /*
989 * Unlock a cnode.
990 */
991 __private_extern__
992 void
993 hfs_unlock(struct cnode *cp)
994 {
995 vnode_t rvp = NULLVP;
996 vnode_t dvp = NULLVP;
997
998 /* System files need to keep track of owner */
999 if ((cp->c_fileid < kHFSFirstUserCatalogNodeID) &&
1000 (cp->c_fileid > kHFSRootFolderID) &&
1001 (cp->c_datafork != NULL)) {
1002 /*
1003 * The extents and bitmap file locks support
1004 * recursion and are always taken exclusive.
1005 */
1006 if (cp->c_fileid == kHFSExtentsFileID ||
1007 cp->c_fileid == kHFSAllocationFileID) {
1008 if (--cp->c_syslockcount > 0) {
1009 return;
1010 }
1011 }
1012 }
1013 if (cp->c_flag & C_NEED_DVNODE_PUT)
1014 dvp = cp->c_vp;
1015
1016 if (cp->c_flag & C_NEED_RVNODE_PUT)
1017 rvp = cp->c_rsrc_vp;
1018
1019 cp->c_flag &= ~(C_NEED_DVNODE_PUT | C_NEED_RVNODE_PUT);
1020
1021 cp-> c_lockowner = NULL;
1022 lck_rw_done(&cp->c_rwlock);
1023
1024 if (dvp)
1025 vnode_put(dvp);
1026 if (rvp)
1027 vnode_put(rvp);
1028 }
1029
1030 /*
1031 * Unlock a pair of cnodes.
1032 */
1033 __private_extern__
1034 void
1035 hfs_unlockpair(struct cnode *cp1, struct cnode *cp2)
1036 {
1037 hfs_unlock(cp1);
1038 if (cp2 != cp1)
1039 hfs_unlock(cp2);
1040 }
1041
1042 /*
1043 * Unlock a group of cnodes.
1044 */
1045 __private_extern__
1046 void
1047 hfs_unlockfour(struct cnode *cp1, struct cnode *cp2, struct cnode *cp3, struct cnode *cp4)
1048 {
1049 struct cnode * list[4];
1050 int i, k = 0;
1051
1052 if (cp1) {
1053 hfs_unlock(cp1);
1054 list[k++] = cp1;
1055 }
1056 if (cp2) {
1057 for (i = 0; i < k; ++i) {
1058 if (list[i] == cp2)
1059 goto skip1;
1060 }
1061 hfs_unlock(cp2);
1062 list[k++] = cp2;
1063 }
1064 skip1:
1065 if (cp3) {
1066 for (i = 0; i < k; ++i) {
1067 if (list[i] == cp3)
1068 goto skip2;
1069 }
1070 hfs_unlock(cp3);
1071 list[k++] = cp3;
1072 }
1073 skip2:
1074 if (cp4) {
1075 for (i = 0; i < k; ++i) {
1076 if (list[i] == cp4)
1077 return;
1078 }
1079 hfs_unlock(cp4);
1080 }
1081 }
1082
1083
1084 /*
1085 * Protect a cnode against a truncation.
1086 *
1087 * Used mainly by read/write since they don't hold the
1088 * cnode lock across calls to the cluster layer.
1089 *
1090 * The process doing a truncation must take the lock
1091 * exclusive. The read/write processes can take it
1092 * non-exclusive.
1093 */
1094 __private_extern__
1095 void
1096 hfs_lock_truncate(struct cnode *cp, int exclusive)
1097 {
1098 if (cp->c_lockowner == current_thread())
1099 panic("hfs_lock_truncate: cnode 0x%08x locked!", cp);
1100
1101 if (exclusive)
1102 lck_rw_lock_exclusive(&cp->c_truncatelock);
1103 else
1104 lck_rw_lock_shared(&cp->c_truncatelock);
1105 }
1106
1107 __private_extern__
1108 void
1109 hfs_unlock_truncate(struct cnode *cp)
1110 {
1111 lck_rw_done(&cp->c_truncatelock);
1112 }
1113
1114
1115
1116