]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_cnode.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / bsd / hfs / hfs_cnode.c
1 /*
2 * Copyright (c) 2002-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/proc.h>
26 #include <sys/vnode.h>
27 #include <sys/mount.h>
28 #include <sys/kernel.h>
29 #include <sys/malloc.h>
30 #include <sys/time.h>
31 #include <sys/ubc.h>
32 #include <sys/quota.h>
33 #include <sys/kdebug.h>
34
35 #include <kern/locks.h>
36
37 #include <miscfs/specfs/specdev.h>
38 #include <miscfs/fifofs/fifo.h>
39
40 #include <hfs/hfs.h>
41 #include <hfs/hfs_catalog.h>
42 #include <hfs/hfs_cnode.h>
43 #include <hfs/hfs_quota.h>
44
45 extern int prtactive;
46
47 extern lck_attr_t * hfs_lock_attr;
48 extern lck_grp_t * hfs_mutex_group;
49 extern lck_grp_t * hfs_rwlock_group;
50
51 static int hfs_filedone(struct vnode *vp, vfs_context_t context);
52
53 static void hfs_reclaim_cnode(struct cnode *);
54
55 static int hfs_valid_cnode(struct hfsmount *, struct vnode *, struct componentname *, cnid_t);
56
57 static int hfs_isordered(struct cnode *, struct cnode *);
58
59 int hfs_vnop_inactive(struct vnop_inactive_args *);
60
61 int hfs_vnop_reclaim(struct vnop_reclaim_args *);
62
63
64 /*
65 * Last reference to an cnode. If necessary, write or delete it.
66 */
67 __private_extern__
68 int
69 hfs_vnop_inactive(struct vnop_inactive_args *ap)
70 {
71 struct vnode *vp = ap->a_vp;
72 struct cnode *cp;
73 struct hfsmount *hfsmp = VTOHFS(vp);
74 struct proc *p = vfs_context_proc(ap->a_context);
75 int error = 0;
76 int recycle = 0;
77 int forkcount = 0;
78 int truncated = 0;
79 int started_tr = 0;
80 int took_trunc_lock = 0;
81 cat_cookie_t cookie;
82 int cat_reserve = 0;
83 int lockflags;
84 enum vtype v_type;
85
86 v_type = vnode_vtype(vp);
87 cp = VTOC(vp);
88
89 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || vnode_issystem(vp) ||
90 (hfsmp->hfs_freezing_proc == p)) {
91 return (0);
92 }
93
94 /*
95 * Ignore nodes related to stale file handles.
96 */
97 if (cp->c_mode == 0) {
98 vnode_recycle(vp);
99 return (0);
100 }
101
102 if ((v_type == VREG) &&
103 (ISSET(cp->c_flag, C_DELETED) || VTOF(vp)->ff_blocks)) {
104 hfs_lock_truncate(cp, TRUE);
105 took_trunc_lock = 1;
106 }
107
108 /*
109 * We do the ubc_setsize before we take the cnode
110 * lock and before the hfs_truncate (since we'll
111 * be inside a transaction).
112 */
113 if ((v_type == VREG || v_type == VLNK) &&
114 (cp->c_flag & C_DELETED) &&
115 (VTOF(vp)->ff_blocks != 0)) {
116 ubc_setsize(vp, 0);
117 }
118
119 (void) hfs_lock(cp, HFS_FORCE_LOCK);
120
121 if (v_type == VREG && !ISSET(cp->c_flag, C_DELETED) && VTOF(vp)->ff_blocks) {
122 hfs_filedone(vp, ap->a_context);
123 }
124 /*
125 * Remove any directory hints
126 */
127 if (v_type == VDIR)
128 hfs_reldirhints(cp, 0);
129
130 if (cp->c_datafork)
131 ++forkcount;
132 if (cp->c_rsrcfork)
133 ++forkcount;
134
135 /* If needed, get rid of any fork's data for a deleted file */
136 if ((v_type == VREG || v_type == VLNK) && (cp->c_flag & C_DELETED)) {
137 if (VTOF(vp)->ff_blocks != 0) {
138 // start the transaction out here so that
139 // the truncate and the removal of the file
140 // are all in one transaction. otherwise
141 // because this cnode is marked for deletion
142 // the truncate won't cause the catalog entry
143 // to get updated which means that we could
144 // free blocks but still keep a reference to
145 // them in the catalog entry and then double
146 // free them later.
147 //
148 // if (hfs_start_transaction(hfsmp) != 0) {
149 // error = EINVAL;
150 // goto out;
151 // }
152 // started_tr = 1;
153
154 /*
155 * Since we're already inside a transaction,
156 * tell hfs_truncate to skip the ubc_setsize.
157 */
158 error = hfs_truncate(vp, (off_t)0, IO_NDELAY, 1, ap->a_context);
159 if (error)
160 goto out;
161 truncated = 1;
162 }
163 recycle = 1;
164 }
165
166 /*
167 * Check for a postponed deletion.
168 * (only delete cnode when the last fork goes inactive)
169 */
170 if ((cp->c_flag & C_DELETED) && (forkcount <= 1)) {
171 /*
172 * Mark cnode in transit so that no one can get this
173 * cnode from cnode hash.
174 */
175 hfs_chash_mark_in_transit(cp);
176
177 cp->c_flag &= ~C_DELETED;
178 cp->c_flag |= C_NOEXISTS; // XXXdbg
179 cp->c_rdev = 0;
180
181 if (started_tr == 0) {
182 if (hfs_start_transaction(hfsmp) != 0) {
183 error = EINVAL;
184 goto out;
185 }
186 started_tr = 1;
187 }
188
189 /*
190 * Reserve some space in the Catalog file.
191 */
192 if ((error = cat_preflight(hfsmp, CAT_DELETE, &cookie, p))) {
193 goto out;
194 }
195 cat_reserve = 1;
196
197 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
198
199 if (cp->c_blocks > 0)
200 printf("hfs_inactive: attempting to delete a non-empty file!");
201
202
203 //
204 // release the name pointer in the descriptor so that
205 // cat_delete() will use the file-id to do the deletion.
206 // in the case of hard links this is imperative (in the
207 // case of regular files the fileid and cnid are the
208 // same so it doesn't matter).
209 //
210 cat_releasedesc(&cp->c_desc);
211
212 /*
213 * The descriptor name may be zero,
214 * in which case the fileid is used.
215 */
216 error = cat_delete(hfsmp, &cp->c_desc, &cp->c_attr);
217
218 if (error && truncated && (error != ENXIO))
219 printf("hfs_inactive: couldn't delete a truncated file!");
220
221 /* Update HFS Private Data dir */
222 if (error == 0) {
223 hfsmp->hfs_privdir_attr.ca_entries--;
224 (void)cat_update(hfsmp, &hfsmp->hfs_privdir_desc,
225 &hfsmp->hfs_privdir_attr, NULL, NULL);
226 }
227
228 if (error == 0) {
229 /* Delete any attributes, ignore errors */
230 (void) hfs_removeallattr(hfsmp, cp->c_fileid);
231 }
232
233 hfs_systemfile_unlock(hfsmp, lockflags);
234
235 if (error)
236 goto out;
237
238 #if QUOTA
239 (void)hfs_chkiq(cp, -1, NOCRED, 0);
240 #endif /* QUOTA */
241
242 cp->c_mode = 0;
243 cp->c_flag |= C_NOEXISTS;
244 cp->c_touch_chgtime = TRUE;
245 cp->c_touch_modtime = TRUE;
246
247 if (error == 0)
248 hfs_volupdate(hfsmp, VOL_RMFILE, 0);
249 }
250
251 if ((cp->c_flag & C_MODIFIED) ||
252 cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime) {
253 hfs_update(vp, 0);
254 }
255 out:
256 if (cat_reserve)
257 cat_postflight(hfsmp, &cookie, p);
258
259 // XXXdbg - have to do this because a goto could have come here
260 if (started_tr) {
261 hfs_end_transaction(hfsmp);
262 started_tr = 0;
263 }
264
265 hfs_unlock(cp);
266
267 if (took_trunc_lock)
268 hfs_unlock_truncate(cp);
269
270 /*
271 * If we are done with the vnode, reclaim it
272 * so that it can be reused immediately.
273 */
274 if (cp->c_mode == 0 || recycle)
275 vnode_recycle(vp);
276
277 return (error);
278 }
279
280 /*
281 * File clean-up (zero fill and shrink peof).
282 */
283 static int
284 hfs_filedone(struct vnode *vp, vfs_context_t context)
285 {
286 struct cnode *cp;
287 struct filefork *fp;
288 struct hfsmount *hfsmp;
289 off_t leof;
290 u_long blks, blocksize;
291
292 cp = VTOC(vp);
293 fp = VTOF(vp);
294 hfsmp = VTOHFS(vp);
295 leof = fp->ff_size;
296
297 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || (fp->ff_blocks == 0))
298 return (0);
299
300 hfs_unlock(cp);
301 (void) cluster_push(vp, IO_CLOSE);
302 hfs_lock(cp, HFS_FORCE_LOCK);
303
304 /*
305 * Explicitly zero out the areas of file
306 * that are currently marked invalid.
307 */
308 while (!CIRCLEQ_EMPTY(&fp->ff_invalidranges)) {
309 struct rl_entry *invalid_range = CIRCLEQ_FIRST(&fp->ff_invalidranges);
310 off_t start = invalid_range->rl_start;
311 off_t end = invalid_range->rl_end;
312
313 /* The range about to be written must be validated
314 * first, so that VNOP_BLOCKMAP() will return the
315 * appropriate mapping for the cluster code:
316 */
317 rl_remove(start, end, &fp->ff_invalidranges);
318
319 hfs_unlock(cp);
320 (void) cluster_write(vp, (struct uio *) 0,
321 leof, end + 1, start, (off_t)0,
322 IO_HEADZEROFILL | IO_NOZERODIRTY | IO_NOCACHE);
323 hfs_lock(cp, HFS_FORCE_LOCK);
324 cp->c_flag |= C_MODIFIED;
325 }
326 cp->c_flag &= ~C_ZFWANTSYNC;
327 cp->c_zftimeout = 0;
328 blocksize = VTOVCB(vp)->blockSize;
329 blks = leof / blocksize;
330 if (((off_t)blks * (off_t)blocksize) != leof)
331 blks++;
332 /*
333 * Shrink the peof to the smallest size neccessary to contain the leof.
334 */
335 if (blks < fp->ff_blocks)
336 (void) hfs_truncate(vp, leof, IO_NDELAY, 0, context);
337 hfs_unlock(cp);
338 (void) cluster_push(vp, IO_CLOSE);
339 hfs_lock(cp, HFS_FORCE_LOCK);
340
341 /*
342 * If the hfs_truncate didn't happen to flush the vnode's
343 * information out to disk, force it to be updated now that
344 * all invalid ranges have been zero-filled and validated:
345 */
346 if (cp->c_flag & C_MODIFIED) {
347 hfs_update(vp, 0);
348 }
349 return (0);
350 }
351
352
353 /*
354 * Reclaim a cnode so that it can be used for other purposes.
355 */
356 __private_extern__
357 int
358 hfs_vnop_reclaim(struct vnop_reclaim_args *ap)
359 {
360 struct vnode *vp = ap->a_vp;
361 struct cnode *cp;
362 struct filefork *fp = NULL;
363 struct filefork *altfp = NULL;
364 int reclaim_cnode = 0;
365
366 (void) hfs_lock(VTOC(vp), HFS_FORCE_LOCK);
367 cp = VTOC(vp);
368
369 /*
370 * Keep track of an inactive hot file.
371 */
372 if (!vnode_isdir(vp) && !vnode_issystem(vp))
373 (void) hfs_addhotfile(vp);
374
375 vnode_removefsref(vp);
376
377 /*
378 * Find file fork for this vnode (if any)
379 * Also check if another fork is active
380 */
381 if (cp->c_vp == vp) {
382 fp = cp->c_datafork;
383 altfp = cp->c_rsrcfork;
384
385 cp->c_datafork = NULL;
386 cp->c_vp = NULL;
387 } else if (cp->c_rsrc_vp == vp) {
388 fp = cp->c_rsrcfork;
389 altfp = cp->c_datafork;
390
391 cp->c_rsrcfork = NULL;
392 cp->c_rsrc_vp = NULL;
393 } else {
394 panic("hfs_vnop_reclaim: vp points to wrong cnode\n");
395 }
396 /*
397 * On the last fork, remove the cnode from its hash chain.
398 */
399 if (altfp == NULL) {
400 /* If we can't remove it then the cnode must persist! */
401 if (hfs_chashremove(cp) == 0)
402 reclaim_cnode = 1;
403 /*
404 * Remove any directory hints
405 */
406 if (vnode_isdir(vp)) {
407 hfs_reldirhints(cp, 0);
408 }
409 }
410 /* Release the file fork and related data */
411 if (fp) {
412 /* Dump cached symlink data */
413 if (vnode_islnk(vp) && (fp->ff_symlinkptr != NULL)) {
414 FREE(fp->ff_symlinkptr, M_TEMP);
415 }
416 FREE_ZONE(fp, sizeof(struct filefork), M_HFSFORK);
417 }
418
419 /*
420 * If there was only one active fork then we can release the cnode.
421 */
422 if (reclaim_cnode) {
423 hfs_chashwakeup(cp, H_ALLOC | H_TRANSIT);
424 hfs_reclaim_cnode(cp);
425 } else /* cnode in use */ {
426 hfs_unlock(cp);
427 }
428
429 vnode_clearfsnode(vp);
430 return (0);
431 }
432
433
434 extern int (**hfs_vnodeop_p) (void *);
435 extern int (**hfs_specop_p) (void *);
436 extern int (**hfs_fifoop_p) (void *);
437
438 /*
439 * hfs_getnewvnode - get new default vnode
440 *
441 * The vnode is returned with an iocount and the cnode locked
442 */
443 __private_extern__
444 int
445 hfs_getnewvnode(
446 struct hfsmount *hfsmp,
447 struct vnode *dvp,
448 struct componentname *cnp,
449 struct cat_desc *descp,
450 int wantrsrc,
451 struct cat_attr *attrp,
452 struct cat_fork *forkp,
453 struct vnode **vpp)
454 {
455 struct mount *mp = HFSTOVFS(hfsmp);
456 struct vnode *vp = NULL;
457 struct vnode **cvpp;
458 struct vnode *tvp = NULLVP;
459 struct cnode *cp = NULL;
460 struct filefork *fp = NULL;
461 int i;
462 int retval;
463 int issystemfile;
464 struct vnode_fsparam vfsp;
465 enum vtype vtype;
466
467 if (attrp->ca_fileid == 0) {
468 *vpp = NULL;
469 return (ENOENT);
470 }
471
472 #if !FIFO
473 if (IFTOVT(attrp->ca_mode) == VFIFO) {
474 *vpp = NULL;
475 return (ENOTSUP);
476 }
477 #endif
478 vtype = IFTOVT(attrp->ca_mode);
479 issystemfile = (descp->cd_flags & CD_ISMETA) && (vtype == VREG);
480
481 /*
482 * Get a cnode (new or existing)
483 * skip getting the cnode lock if we are getting resource fork (wantrsrc == 2)
484 */
485 cp = hfs_chash_getcnode(hfsmp->hfs_raw_dev, attrp->ca_fileid, vpp, wantrsrc, (wantrsrc == 2));
486
487 /* Hardlinks may need an updated catalog descriptor */
488 if ((cp->c_flag & C_HARDLINK) && descp->cd_nameptr && descp->cd_namelen > 0) {
489 replace_desc(cp, descp);
490 }
491 /* Check if we found a matching vnode */
492 if (*vpp != NULL)
493 return (0);
494
495 /*
496 * If this is a new cnode then initialize it.
497 */
498 if (ISSET(cp->c_hflag, H_ALLOC)) {
499 lck_rw_init(&cp->c_truncatelock, hfs_rwlock_group, hfs_lock_attr);
500
501 /* Make sure its still valid (ie exists on disk). */
502 if (!hfs_valid_cnode(hfsmp, dvp, (wantrsrc ? NULL : cnp), cp->c_fileid)) {
503 hfs_chash_abort(cp);
504 hfs_reclaim_cnode(cp);
505 *vpp = NULL;
506 return (ENOENT);
507 }
508 bcopy(attrp, &cp->c_attr, sizeof(struct cat_attr));
509 bcopy(descp, &cp->c_desc, sizeof(struct cat_desc));
510
511 /* The name was inherited so clear descriptor state... */
512 descp->cd_namelen = 0;
513 descp->cd_nameptr = NULL;
514 descp->cd_flags &= ~CD_HASBUF;
515
516 /* Tag hardlinks */
517 if (IFTOVT(cp->c_mode) == VREG &&
518 (descp->cd_cnid != attrp->ca_fileid)) {
519 cp->c_flag |= C_HARDLINK;
520 }
521
522 /* Take one dev reference for each non-directory cnode */
523 if (IFTOVT(cp->c_mode) != VDIR) {
524 cp->c_devvp = hfsmp->hfs_devvp;
525 vnode_ref(cp->c_devvp);
526 }
527 #if QUOTA
528 for (i = 0; i < MAXQUOTAS; i++)
529 cp->c_dquot[i] = NODQUOT;
530 #endif /* QUOTA */
531 }
532
533 if (IFTOVT(cp->c_mode) == VDIR) {
534 if (cp->c_vp != NULL)
535 panic("hfs_getnewvnode: orphaned vnode (data)");
536 cvpp = &cp->c_vp;
537 } else {
538 if (forkp && attrp->ca_blocks < forkp->cf_blocks)
539 panic("hfs_getnewvnode: bad ca_blocks (too small)");
540 /*
541 * Allocate and initialize a file fork...
542 */
543 MALLOC_ZONE(fp, struct filefork *, sizeof(struct filefork),
544 M_HFSFORK, M_WAITOK);
545 fp->ff_cp = cp;
546 if (forkp)
547 bcopy(forkp, &fp->ff_data, sizeof(struct cat_fork));
548 else
549 bzero(&fp->ff_data, sizeof(struct cat_fork));
550 rl_init(&fp->ff_invalidranges);
551 fp->ff_sysfileinfo = 0;
552
553 if (wantrsrc) {
554 if (cp->c_rsrcfork != NULL)
555 panic("hfs_getnewvnode: orphaned rsrc fork");
556 if (cp->c_rsrc_vp != NULL)
557 panic("hfs_getnewvnode: orphaned vnode (rsrc)");
558 cp->c_rsrcfork = fp;
559 cvpp = &cp->c_rsrc_vp;
560 if ( (tvp = cp->c_vp) != NULLVP )
561 cp->c_flag |= C_NEED_DVNODE_PUT;
562 } else {
563 if (cp->c_datafork != NULL)
564 panic("hfs_getnewvnode: orphaned data fork");
565 if (cp->c_vp != NULL)
566 panic("hfs_getnewvnode: orphaned vnode (data)");
567 cp->c_datafork = fp;
568 cvpp = &cp->c_vp;
569 if ( (tvp = cp->c_rsrc_vp) != NULLVP)
570 cp->c_flag |= C_NEED_RVNODE_PUT;
571 }
572 }
573 if (tvp != NULLVP) {
574 /*
575 * grab an iocount on the vnode we weren't
576 * interested in (i.e. we want the resource fork
577 * but the cnode already has the data fork)
578 * to prevent it from being
579 * recycled by us when we call vnode_create
580 * which will result in a deadlock when we
581 * try to take the cnode lock in hfs_vnop_fsync or
582 * hfs_vnop_reclaim... vnode_get can be called here
583 * because we already hold the cnode lock which will
584 * prevent the vnode from changing identity until
585 * we drop it.. vnode_get will not block waiting for
586 * a change of state... however, it will return an
587 * error if the current iocount == 0 and we've already
588 * started to terminate the vnode... we don't need/want to
589 * grab an iocount in the case since we can't cause
590 * the fileystem to be re-entered on this thread for this vp
591 *
592 * the matching vnode_put will happen in hfs_unlock
593 * after we've dropped the cnode lock
594 */
595 if ( vnode_get(tvp) != 0)
596 cp->c_flag &= ~(C_NEED_RVNODE_PUT | C_NEED_DVNODE_PUT);
597 }
598 vfsp.vnfs_mp = mp;
599 vfsp.vnfs_vtype = vtype;
600 vfsp.vnfs_str = "hfs";
601 vfsp.vnfs_dvp = dvp;
602 vfsp.vnfs_fsnode = cp;
603 vfsp.vnfs_cnp = cnp;
604 if (vtype == VFIFO )
605 vfsp.vnfs_vops = hfs_fifoop_p;
606 else if (vtype == VBLK || vtype == VCHR)
607 vfsp.vnfs_vops = hfs_specop_p;
608 else
609 vfsp.vnfs_vops = hfs_vnodeop_p;
610
611 if (vtype == VBLK || vtype == VCHR)
612 vfsp.vnfs_rdev = attrp->ca_rdev;
613 else
614 vfsp.vnfs_rdev = 0;
615
616 if (forkp)
617 vfsp.vnfs_filesize = forkp->cf_size;
618 else
619 vfsp.vnfs_filesize = 0;
620
621 if (dvp && cnp && (cnp->cn_flags & MAKEENTRY))
622 vfsp.vnfs_flags = 0;
623 else
624 vfsp.vnfs_flags = VNFS_NOCACHE;
625
626 /* Tag system files */
627 vfsp.vnfs_marksystem = issystemfile;
628
629 /* Tag root directory */
630 if (descp->cd_cnid == kHFSRootFolderID)
631 vfsp.vnfs_markroot = 1;
632 else
633 vfsp.vnfs_markroot = 0;
634
635 if ((retval = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, cvpp))) {
636 if (fp) {
637 if (fp == cp->c_datafork)
638 cp->c_datafork = NULL;
639 else
640 cp->c_rsrcfork = NULL;
641
642 FREE_ZONE(fp, sizeof(struct filefork), M_HFSFORK);
643 }
644 /*
645 * If this is a newly created cnode or a vnode reclaim
646 * occurred during the attachment, then cleanup the cnode.
647 */
648 if ((cp->c_vp == NULL) && (cp->c_rsrc_vp == NULL)) {
649 hfs_chash_abort(cp);
650 hfs_reclaim_cnode(cp);
651 } else {
652 hfs_chashwakeup(cp, H_ALLOC | H_ATTACH);
653 hfs_unlock(cp);
654 }
655 *vpp = NULL;
656 return (retval);
657 }
658 vp = *cvpp;
659 vnode_addfsref(vp);
660 vnode_settag(vp, VT_HFS);
661 if (cp->c_flag & C_HARDLINK)
662 vnode_set_hard_link(vp);
663 hfs_chashwakeup(cp, H_ALLOC | H_ATTACH);
664
665 /*
666 * Stop tracking an active hot file.
667 */
668 if (!vnode_isdir(vp) && !vnode_issystem(vp))
669 (void) hfs_removehotfile(vp);
670
671 *vpp = vp;
672 return (0);
673 }
674
675
676 static void
677 hfs_reclaim_cnode(struct cnode *cp)
678 {
679 #if QUOTA
680 int i;
681
682 for (i = 0; i < MAXQUOTAS; i++) {
683 if (cp->c_dquot[i] != NODQUOT) {
684 dqreclaim(cp->c_dquot[i]);
685 cp->c_dquot[i] = NODQUOT;
686 }
687 }
688 #endif /* QUOTA */
689
690 if (cp->c_devvp) {
691 struct vnode *tmp_vp = cp->c_devvp;
692
693 cp->c_devvp = NULL;
694 vnode_rele(tmp_vp);
695 }
696
697 /*
698 * If the descriptor has a name then release it
699 */
700 if (cp->c_desc.cd_flags & CD_HASBUF) {
701 char *nameptr;
702
703 nameptr = cp->c_desc.cd_nameptr;
704 cp->c_desc.cd_nameptr = 0;
705 cp->c_desc.cd_flags &= ~CD_HASBUF;
706 cp->c_desc.cd_namelen = 0;
707 vfs_removename(nameptr);
708 }
709
710 lck_rw_destroy(&cp->c_rwlock, hfs_rwlock_group);
711 lck_rw_destroy(&cp->c_truncatelock, hfs_rwlock_group);
712 bzero(cp, sizeof(struct cnode));
713 FREE_ZONE(cp, sizeof(struct cnode), M_HFSNODE);
714 }
715
716
717 static int
718 hfs_valid_cnode(struct hfsmount *hfsmp, struct vnode *dvp, struct componentname *cnp, cnid_t cnid)
719 {
720 struct cat_attr attr;
721 struct cat_desc cndesc;
722 int stillvalid = 0;
723 int lockflags;
724
725 /* System files are always valid */
726 if (cnid < kHFSFirstUserCatalogNodeID)
727 return (1);
728
729 /* XXX optimization: check write count in dvp */
730
731 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
732
733 if (dvp && cnp) {
734 bzero(&cndesc, sizeof(cndesc));
735 cndesc.cd_nameptr = cnp->cn_nameptr;
736 cndesc.cd_namelen = cnp->cn_namelen;
737 cndesc.cd_parentcnid = VTOC(dvp)->c_cnid;
738 cndesc.cd_hint = VTOC(dvp)->c_childhint;
739
740 if ((cat_lookup(hfsmp, &cndesc, 0, NULL, &attr, NULL, NULL) == 0) &&
741 (cnid == attr.ca_fileid)) {
742 stillvalid = 1;
743 }
744 } else {
745 if (cat_idlookup(hfsmp, cnid, NULL, NULL, NULL) == 0) {
746 stillvalid = 1;
747 }
748 }
749 hfs_systemfile_unlock(hfsmp, lockflags);
750
751 return (stillvalid);
752 }
753
754 /*
755 * Touch cnode times based on c_touch_xxx flags
756 *
757 * cnode must be locked exclusive
758 *
759 * This will also update the volume modify time
760 */
761 __private_extern__
762 void
763 hfs_touchtimes(struct hfsmount *hfsmp, struct cnode* cp)
764 {
765 /* HFS Standard doesn't support access times */
766 if (hfsmp->hfs_flags & HFS_STANDARD) {
767 cp->c_touch_acctime = FALSE;
768 }
769
770 if (cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime) {
771 struct timeval tv;
772 int touchvol = 0;
773
774 microtime(&tv);
775
776 if (cp->c_touch_acctime) {
777 cp->c_atime = tv.tv_sec;
778 /*
779 * When the access time is the only thing changing
780 * then make sure its sufficiently newer before
781 * committing it to disk.
782 */
783 if ((((u_int32_t)cp->c_atime - (u_int32_t)(cp)->c_attr.ca_atimeondisk) >
784 ATIME_ONDISK_ACCURACY)) {
785 cp->c_flag |= C_MODIFIED;
786 }
787 cp->c_touch_acctime = FALSE;
788 }
789 if (cp->c_touch_modtime) {
790 cp->c_mtime = tv.tv_sec;
791 cp->c_touch_modtime = FALSE;
792 cp->c_flag |= C_MODIFIED;
793 touchvol = 1;
794 #if 1
795 /*
796 * HFS dates that WE set must be adjusted for DST
797 */
798 if ((hfsmp->hfs_flags & HFS_STANDARD) && gTimeZone.tz_dsttime) {
799 cp->c_mtime += 3600;
800 }
801 #endif
802 }
803 if (cp->c_touch_chgtime) {
804 cp->c_ctime = tv.tv_sec;
805 cp->c_touch_chgtime = FALSE;
806 cp->c_flag |= C_MODIFIED;
807 touchvol = 1;
808 }
809
810 /* Touch the volume modtime if needed */
811 if (touchvol) {
812 HFSTOVCB(hfsmp)->vcbFlags |= 0xFF00;
813 HFSTOVCB(hfsmp)->vcbLsMod = tv.tv_sec;
814 }
815 }
816 }
817
818 /*
819 * Lock a cnode.
820 */
821 __private_extern__
822 int
823 hfs_lock(struct cnode *cp, enum hfslocktype locktype)
824 {
825 void * thread = current_thread();
826
827 /* System files need to keep track of owner */
828 if ((cp->c_fileid < kHFSFirstUserCatalogNodeID) &&
829 (cp->c_fileid > kHFSRootFolderID) &&
830 (locktype != HFS_SHARED_LOCK)) {
831
832 /*
833 * The extents and bitmap file locks support
834 * recursion and are always taken exclusive.
835 */
836 if (cp->c_fileid == kHFSExtentsFileID ||
837 cp->c_fileid == kHFSAllocationFileID) {
838 if (cp->c_lockowner == thread) {
839 cp->c_syslockcount++;
840 } else {
841 lck_rw_lock_exclusive(&cp->c_rwlock);
842 cp->c_lockowner = thread;
843 cp->c_syslockcount = 1;
844 }
845 } else {
846 lck_rw_lock_exclusive(&cp->c_rwlock);
847 cp->c_lockowner = thread;
848 }
849 } else if (locktype == HFS_SHARED_LOCK) {
850 lck_rw_lock_shared(&cp->c_rwlock);
851 cp->c_lockowner = HFS_SHARED_OWNER;
852 } else {
853 lck_rw_lock_exclusive(&cp->c_rwlock);
854 cp->c_lockowner = thread;
855 }
856 /*
857 * Skip cnodes that no longer exist (were deleted).
858 */
859 if ((locktype != HFS_FORCE_LOCK) &&
860 ((cp->c_desc.cd_flags & CD_ISMETA) == 0) &&
861 (cp->c_flag & C_NOEXISTS)) {
862 hfs_unlock(cp);
863 return (ENOENT);
864 }
865 return (0);
866 }
867
868 /*
869 * Lock a pair of cnodes.
870 */
871 __private_extern__
872 int
873 hfs_lockpair(struct cnode *cp1, struct cnode *cp2, enum hfslocktype locktype)
874 {
875 struct cnode *first, *last;
876 int error;
877
878 /*
879 * If cnodes match then just lock one.
880 */
881 if (cp1 == cp2) {
882 return hfs_lock(cp1, locktype);
883 }
884
885 /*
886 * Lock in cnode parent-child order (if there is a relationship);
887 * otherwise lock in cnode address order.
888 */
889 if ((IFTOVT(cp1->c_mode) == VDIR) && (cp1->c_fileid == cp2->c_parentcnid)) {
890 first = cp1;
891 last = cp2;
892 } else if (cp1 < cp2) {
893 first = cp1;
894 last = cp2;
895 } else {
896 first = cp2;
897 last = cp1;
898 }
899
900 if ( (error = hfs_lock(first, locktype))) {
901 return (error);
902 }
903 if ( (error = hfs_lock(last, locktype))) {
904 hfs_unlock(first);
905 return (error);
906 }
907 return (0);
908 }
909
910 /*
911 * Check ordering of two cnodes. Return true if they are are in-order.
912 */
913 static int
914 hfs_isordered(struct cnode *cp1, struct cnode *cp2)
915 {
916 if (cp1 == cp2)
917 return (0);
918 if (cp1 == NULL || cp2 == (struct cnode *)0xffffffff)
919 return (1);
920 if (cp2 == NULL || cp1 == (struct cnode *)0xffffffff)
921 return (0);
922 if (cp1->c_fileid == cp2->c_parentcnid)
923 return (1); /* cp1 is the parent and should go first */
924 if (cp2->c_fileid == cp1->c_parentcnid)
925 return (0); /* cp1 is the child and should go last */
926
927 return (cp1 < cp2); /* fall-back is to use address order */
928 }
929
930 /*
931 * Acquire 4 cnode locks.
932 * - locked in cnode parent-child order (if there is a relationship)
933 * otherwise lock in cnode address order (lesser address first).
934 * - all or none of the locks are taken
935 * - only one lock taken per cnode (dup cnodes are skipped)
936 * - some of the cnode pointers may be null
937 */
938 __private_extern__
939 int
940 hfs_lockfour(struct cnode *cp1, struct cnode *cp2, struct cnode *cp3,
941 struct cnode *cp4, enum hfslocktype locktype)
942 {
943 struct cnode * a[3];
944 struct cnode * b[3];
945 struct cnode * list[4];
946 struct cnode * tmp;
947 int i, j, k;
948 int error;
949
950 if (hfs_isordered(cp1, cp2)) {
951 a[0] = cp1; a[1] = cp2;
952 } else {
953 a[0] = cp2; a[1] = cp1;
954 }
955 if (hfs_isordered(cp3, cp4)) {
956 b[0] = cp3; b[1] = cp4;
957 } else {
958 b[0] = cp4; b[1] = cp3;
959 }
960 a[2] = (struct cnode *)0xffffffff; /* sentinel value */
961 b[2] = (struct cnode *)0xffffffff; /* sentinel value */
962
963 /*
964 * Build the lock list, skipping over duplicates
965 */
966 for (i = 0, j = 0, k = 0; (i < 2 || j < 2); ) {
967 tmp = hfs_isordered(a[i], b[j]) ? a[i++] : b[j++];
968 if (k == 0 || tmp != list[k-1])
969 list[k++] = tmp;
970 }
971
972 /*
973 * Now we can lock using list[0 - k].
974 * Skip over NULL entries.
975 */
976 for (i = 0; i < k; ++i) {
977 if (list[i])
978 if ((error = hfs_lock(list[i], locktype))) {
979 /* Drop any locks we acquired. */
980 while (--i >= 0) {
981 if (list[i])
982 hfs_unlock(list[i]);
983 }
984 return (error);
985 }
986 }
987 return (0);
988 }
989
990
991 /*
992 * Unlock a cnode.
993 */
994 __private_extern__
995 void
996 hfs_unlock(struct cnode *cp)
997 {
998 vnode_t rvp = NULLVP;
999 vnode_t dvp = NULLVP;
1000
1001 /* System files need to keep track of owner */
1002 if ((cp->c_fileid < kHFSFirstUserCatalogNodeID) &&
1003 (cp->c_fileid > kHFSRootFolderID) &&
1004 (cp->c_datafork != NULL)) {
1005 /*
1006 * The extents and bitmap file locks support
1007 * recursion and are always taken exclusive.
1008 */
1009 if (cp->c_fileid == kHFSExtentsFileID ||
1010 cp->c_fileid == kHFSAllocationFileID) {
1011 if (--cp->c_syslockcount > 0) {
1012 return;
1013 }
1014 }
1015 }
1016 if (cp->c_flag & C_NEED_DVNODE_PUT)
1017 dvp = cp->c_vp;
1018
1019 if (cp->c_flag & C_NEED_RVNODE_PUT)
1020 rvp = cp->c_rsrc_vp;
1021
1022 cp->c_flag &= ~(C_NEED_DVNODE_PUT | C_NEED_RVNODE_PUT);
1023
1024 cp-> c_lockowner = NULL;
1025 lck_rw_done(&cp->c_rwlock);
1026
1027 if (dvp)
1028 vnode_put(dvp);
1029 if (rvp)
1030 vnode_put(rvp);
1031 }
1032
1033 /*
1034 * Unlock a pair of cnodes.
1035 */
1036 __private_extern__
1037 void
1038 hfs_unlockpair(struct cnode *cp1, struct cnode *cp2)
1039 {
1040 hfs_unlock(cp1);
1041 if (cp2 != cp1)
1042 hfs_unlock(cp2);
1043 }
1044
1045 /*
1046 * Unlock a group of cnodes.
1047 */
1048 __private_extern__
1049 void
1050 hfs_unlockfour(struct cnode *cp1, struct cnode *cp2, struct cnode *cp3, struct cnode *cp4)
1051 {
1052 struct cnode * list[4];
1053 int i, k = 0;
1054
1055 if (cp1) {
1056 hfs_unlock(cp1);
1057 list[k++] = cp1;
1058 }
1059 if (cp2) {
1060 for (i = 0; i < k; ++i) {
1061 if (list[i] == cp2)
1062 goto skip1;
1063 }
1064 hfs_unlock(cp2);
1065 list[k++] = cp2;
1066 }
1067 skip1:
1068 if (cp3) {
1069 for (i = 0; i < k; ++i) {
1070 if (list[i] == cp3)
1071 goto skip2;
1072 }
1073 hfs_unlock(cp3);
1074 list[k++] = cp3;
1075 }
1076 skip2:
1077 if (cp4) {
1078 for (i = 0; i < k; ++i) {
1079 if (list[i] == cp4)
1080 return;
1081 }
1082 hfs_unlock(cp4);
1083 }
1084 }
1085
1086
1087 /*
1088 * Protect a cnode against a truncation.
1089 *
1090 * Used mainly by read/write since they don't hold the
1091 * cnode lock across calls to the cluster layer.
1092 *
1093 * The process doing a truncation must take the lock
1094 * exclusive. The read/write processes can take it
1095 * non-exclusive.
1096 */
1097 __private_extern__
1098 void
1099 hfs_lock_truncate(struct cnode *cp, int exclusive)
1100 {
1101 if (cp->c_lockowner == current_thread())
1102 panic("hfs_lock_truncate: cnode 0x%08x locked!", cp);
1103
1104 if (exclusive)
1105 lck_rw_lock_exclusive(&cp->c_truncatelock);
1106 else
1107 lck_rw_lock_shared(&cp->c_truncatelock);
1108 }
1109
1110 __private_extern__
1111 void
1112 hfs_unlock_truncate(struct cnode *cp)
1113 {
1114 lck_rw_done(&cp->c_truncatelock);
1115 }
1116
1117
1118
1119