1/* $NetBSD: coda_subr.c,v 1.31 2015/01/06 11:24:46 hannken Exp $ */
2
3/*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_subr.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
32 */
33
34/*
35 * Mach Operating System
36 * Copyright (c) 1989 Carnegie-Mellon University
37 * All rights reserved. The CMU software License Agreement specifies
38 * the terms and conditions for use and redistribution.
39 */
40
41/*
42 * This code was written for the Coda file system at Carnegie Mellon
43 * University. Contributers include David Steere, James Kistler, and
44 * M. Satyanarayanan. */
45
46/* NOTES: rvb
47 * 1. Added coda_unmounting to mark all cnodes as being UNMOUNTING. This has to
48 * be done before dounmount is called. Because some of the routines that
49 * dounmount calls before coda_unmounted might try to force flushes to venus.
50 * The vnode pager does this.
51 * 2. coda_unmounting marks all cnodes scanning coda_cache.
52 * 3. cfs_checkunmounting (under DEBUG) checks all cnodes by chasing the vnodes
53 * under the /coda mount point.
54 * 4. coda_cacheprint (under DEBUG) prints names with vnode/cnode address
55 */
56
57#include <sys/cdefs.h>
58__KERNEL_RCSID(0, "$NetBSD: coda_subr.c,v 1.31 2015/01/06 11:24:46 hannken Exp $");
59
60#include <sys/param.h>
61#include <sys/systm.h>
62#include <sys/malloc.h>
63#include <sys/proc.h>
64#include <sys/select.h>
65#include <sys/mount.h>
66#include <sys/kauth.h>
67
68#include <coda/coda.h>
69#include <coda/cnode.h>
70#include <coda/coda_subr.h>
71#include <coda/coda_namecache.h>
72
73int codadebug = 0;
74int coda_printf_delay = 0; /* in microseconds */
75int coda_vnop_print_entry = 0;
76int coda_vfsop_print_entry = 0;
77
78#ifdef CODA_COMPAT_5
79#define coda_hash(fid) \
80 (((fid)->Volume + (fid)->Vnode) & (CODA_CACHESIZE-1))
81#define IS_DIR(cnode) (cnode.Vnode & 0x1)
82#else
83#define coda_hash(fid) \
84 (coda_f2i(fid) & (CODA_CACHESIZE-1))
85#define IS_DIR(cnode) (cnode.opaque[2] & 0x1)
86#endif
87
88struct vnode *coda_ctlvp;
89
90/*
91 * Lookup a cnode by fid. If the cnode is dying, it is bogus so skip it.
92 * The cnode is returned locked with the vnode referenced.
93 */
94struct cnode *
95coda_find(CodaFid *fid)
96{
97 int i;
98 struct vnode *vp;
99 struct cnode *cp;
100
101 for (i = 0; i < NVCODA; i++) {
102 if (!coda_mnttbl[i].mi_started)
103 continue;
104 if (vcache_get(coda_mnttbl[i].mi_vfsp,
105 fid, sizeof(CodaFid), &vp) != 0)
106 continue;
107 mutex_enter(vp->v_interlock);
108 cp = VTOC(vp);
109 if (vp->v_type == VNON || cp == NULL || IS_UNMOUNTING(cp)) {
110 mutex_exit(vp->v_interlock);
111 vrele(vp);
112 continue;
113 }
114 mutex_enter(&cp->c_lock);
115 mutex_exit(vp->v_interlock);
116
117 return cp;
118 }
119
120 return NULL;
121}
122
123/*
124 * Iterate over all nodes attached to coda mounts.
125 */
126static void
127coda_iterate(bool (*f)(void *, struct vnode *), void *cl)
128{
129 int i;
130 struct vnode_iterator *marker;
131 struct vnode *vp;
132
133 for (i = 0; i < NVCODA; i++) {
134 if (coda_mnttbl[i].mi_vfsp == NULL)
135 continue;
136 vfs_vnode_iterator_init(coda_mnttbl[i].mi_vfsp, &marker);
137 while ((vp = vfs_vnode_iterator_next(marker, f, cl)) != NULL)
138 vrele(vp);
139 vfs_vnode_iterator_destroy(marker);
140 }
141}
142
143/*
144 * coda_kill is called as a side effect to vcopen. To prevent any
145 * cnodes left around from an earlier run of a venus or warden from
146 * causing problems with the new instance, mark any outstanding cnodes
147 * as dying. Future operations on these cnodes should fail (excepting
148 * coda_inactive of course!). Since multiple venii/wardens can be
149 * running, only kill the cnodes for a particular entry in the
150 * coda_mnttbl. -- DCS 12/1/94 */
151
152static bool
153coda_kill_selector(void *cl, struct vnode *vp)
154{
155 int *count = cl;
156
157 (*count)++;
158
159 return false;
160}
161
162int
163coda_kill(struct mount *whoIam, enum dc_status dcstat)
164{
165 int count = 0;
166 struct vnode_iterator *marker;
167
168 /*
169 * Algorithm is as follows:
170 * Second, flush whatever vnodes we can from the name cache.
171 */
172
173 /* This is slightly overkill, but should work. Eventually it'd be
174 * nice to only flush those entries from the namecache that
175 * reference a vnode in this vfs. */
176 coda_nc_flush(dcstat);
177
178
179 vfs_vnode_iterator_init(whoIam, &marker);
180 vfs_vnode_iterator_next(marker, coda_kill_selector, &count);
181 vfs_vnode_iterator_destroy(marker);
182
183 return count;
184}
185
186/*
187 * There are two reasons why a cnode may be in use, it may be in the
188 * name cache or it may be executing.
189 */
190static bool
191coda_flush_selector(void *cl, struct vnode *vp)
192{
193 struct cnode *cp = VTOC(vp);
194
195 if (cp != NULL && !IS_DIR(cp->c_fid)) /* only files can be executed */
196 coda_vmflush(cp);
197
198 return false;
199}
200void
201coda_flush(enum dc_status dcstat)
202{
203
204 coda_clstat.ncalls++;
205 coda_clstat.reqs[CODA_FLUSH]++;
206
207 coda_nc_flush(dcstat); /* flush files from the name cache */
208
209 coda_iterate(coda_flush_selector, NULL);
210}
211
212/*
213 * As a debugging measure, print out any cnodes that lived through a
214 * name cache flush.
215 */
216static bool
217coda_testflush_selector(void *cl, struct vnode *vp)
218{
219 struct cnode *cp = VTOC(vp);
220
221 if (cp != NULL)
222 myprintf(("Live cnode fid %s count %d\n",
223 coda_f2s(&cp->c_fid), CTOV(cp)->v_usecount));
224
225 return false;
226}
227void
228coda_testflush(void)
229{
230
231 coda_iterate(coda_testflush_selector, NULL);
232}
233
234/*
235 * First, step through all cnodes and mark them unmounting.
236 * NetBSD kernels may try to fsync them now that venus
237 * is dead, which would be a bad thing.
238 *
239 */
240static bool
241coda_unmounting_selector(void *cl, struct vnode *vp)
242{
243 struct cnode *cp = VTOC(vp);
244
245 if (cp)
246 cp->c_flags |= C_UNMOUNTING;
247
248 return false;
249}
250void
251coda_unmounting(struct mount *whoIam)
252{
253 struct vnode_iterator *marker;
254
255 vfs_vnode_iterator_init(whoIam, &marker);
256 vfs_vnode_iterator_next(marker, coda_unmounting_selector, NULL);
257 vfs_vnode_iterator_destroy(marker);
258}
259
260#ifdef DEBUG
261static bool
262coda_checkunmounting_selector(void *cl, struct vnode *vp)
263{
264 struct cnode *cp = VTOC(vp);
265
266 if (cp && !(cp->c_flags & C_UNMOUNTING)) {
267 printf("vp %p, cp %p missed\n", vp, cp);
268 cp->c_flags |= C_UNMOUNTING;
269 }
270
271 return false;
272}
273void
274coda_checkunmounting(struct mount *mp)
275{
276 struct vnode_iterator *marker;
277
278 vfs_vnode_iterator_init(mp, &marker);
279 vfs_vnode_iterator_next(marker, coda_checkunmounting_selector, NULL);
280 vfs_vnode_iterator_destroy(marker);
281}
282
283void
284coda_cacheprint(struct mount *whoIam)
285{
286 struct vnode *vp;
287 struct vnode_iterator *marker;
288 int count = 0;
289
290 printf("coda_cacheprint: coda_ctlvp %p, cp %p", coda_ctlvp, VTOC(coda_ctlvp));
291 coda_nc_name(VTOC(coda_ctlvp));
292 printf("\n");
293
294 vfs_vnode_iterator_init(whoIam, &marker);
295 while ((vp = vfs_vnode_iterator_next(marker, NULL, NULL)) != NULL) {
296 printf("coda_cacheprint: vp %p, cp %p", vp, VTOC(vp));
297 coda_nc_name(VTOC(vp));
298 printf("\n");
299 count++;
300 vrele(vp);
301 }
302 printf("coda_cacheprint: count %d\n", count);
303 vfs_vnode_iterator_destroy(marker);
304}
305#endif
306
307/*
308 * There are 6 cases where invalidations occur. The semantics of each
309 * is listed here.
310 *
311 * CODA_FLUSH -- flush all entries from the name cache and the cnode cache.
312 * CODA_PURGEUSER -- flush all entries from the name cache for a specific user
313 * This call is a result of token expiration.
314 *
315 * The next two are the result of callbacks on a file or directory.
316 * CODA_ZAPDIR -- flush the attributes for the dir from its cnode.
317 * Zap all children of this directory from the namecache.
318 * CODA_ZAPFILE -- flush the attributes for a file.
319 *
320 * The fifth is a result of Venus detecting an inconsistent file.
321 * CODA_PURGEFID -- flush the attribute for the file
322 * If it is a dir (odd vnode), purge its
323 * children from the namecache
324 * remove the file from the namecache.
325 *
326 * The sixth allows Venus to replace local fids with global ones
327 * during reintegration.
328 *
329 * CODA_REPLACE -- replace one CodaFid with another throughout the name cache
330 */
331
332int handleDownCall(int opcode, union outputArgs *out)
333{
334 int error;
335
336 /* Handle invalidate requests. */
337 switch (opcode) {
338 case CODA_FLUSH : {
339
340 coda_flush(IS_DOWNCALL);
341
342 CODADEBUG(CODA_FLUSH,coda_testflush();) /* print remaining cnodes */
343 return(0);
344 }
345
346 case CODA_PURGEUSER : {
347 coda_clstat.ncalls++;
348 coda_clstat.reqs[CODA_PURGEUSER]++;
349
350 /* XXX - need to prevent fsync's */
351#ifdef CODA_COMPAT_5
352 coda_nc_purge_user(out->coda_purgeuser.cred.cr_uid, IS_DOWNCALL);
353#else
354 coda_nc_purge_user(out->coda_purgeuser.uid, IS_DOWNCALL);
355#endif
356 return(0);
357 }
358
359 case CODA_ZAPFILE : {
360 struct cnode *cp;
361
362 error = 0;
363 coda_clstat.ncalls++;
364 coda_clstat.reqs[CODA_ZAPFILE]++;
365
366 cp = coda_find(&out->coda_zapfile.Fid);
367 if (cp != NULL) {
368 cp->c_flags &= ~C_VATTR;
369 if (CTOV(cp)->v_iflag & VI_TEXT)
370 error = coda_vmflush(cp);
371 CODADEBUG(CODA_ZAPFILE, myprintf((
372 "zapfile: fid = %s, refcnt = %d, error = %d\n",
373 coda_f2s(&cp->c_fid), CTOV(cp)->v_usecount - 1, error)););
374 if (CTOV(cp)->v_usecount == 1) {
375 cp->c_flags |= C_PURGING;
376 }
377 mutex_exit(&cp->c_lock);
378 vrele(CTOV(cp));
379 }
380
381 return(error);
382 }
383
384 case CODA_ZAPDIR : {
385 struct cnode *cp;
386
387 coda_clstat.ncalls++;
388 coda_clstat.reqs[CODA_ZAPDIR]++;
389
390 cp = coda_find(&out->coda_zapdir.Fid);
391 if (cp != NULL) {
392 cp->c_flags &= ~C_VATTR;
393 coda_nc_zapParentfid(&out->coda_zapdir.Fid, IS_DOWNCALL);
394
395 CODADEBUG(CODA_ZAPDIR, myprintf((
396 "zapdir: fid = %s, refcnt = %d\n",
397 coda_f2s(&cp->c_fid), CTOV(cp)->v_usecount - 1)););
398 if (CTOV(cp)->v_usecount == 1) {
399 cp->c_flags |= C_PURGING;
400 }
401 mutex_exit(&cp->c_lock);
402 vrele(CTOV(cp));
403 }
404
405 return(0);
406 }
407
408 case CODA_PURGEFID : {
409 struct cnode *cp;
410
411 error = 0;
412 coda_clstat.ncalls++;
413 coda_clstat.reqs[CODA_PURGEFID]++;
414
415 cp = coda_find(&out->coda_purgefid.Fid);
416 if (cp != NULL) {
417 if (IS_DIR(out->coda_purgefid.Fid)) { /* Vnode is a directory */
418 coda_nc_zapParentfid(&out->coda_purgefid.Fid,
419 IS_DOWNCALL);
420 }
421 cp->c_flags &= ~C_VATTR;
422 coda_nc_zapfid(&out->coda_purgefid.Fid, IS_DOWNCALL);
423 if (!(IS_DIR(out->coda_purgefid.Fid))
424 && (CTOV(cp)->v_iflag & VI_TEXT)) {
425
426 error = coda_vmflush(cp);
427 }
428 CODADEBUG(CODA_PURGEFID, myprintf((
429 "purgefid: fid = %s, refcnt = %d, error = %d\n",
430 coda_f2s(&cp->c_fid), CTOV(cp)->v_usecount - 1, error)););
431 if (CTOV(cp)->v_usecount == 1) {
432 cp->c_flags |= C_PURGING;
433 }
434 mutex_exit(&cp->c_lock);
435 vrele(CTOV(cp));
436 }
437 return(error);
438 }
439
440 case CODA_REPLACE : {
441 struct cnode *cp = NULL;
442
443 coda_clstat.ncalls++;
444 coda_clstat.reqs[CODA_REPLACE]++;
445
446 cp = coda_find(&out->coda_replace.OldFid);
447 if (cp != NULL) {
448 error = vcache_rekey_enter(CTOV(cp)->v_mount, CTOV(cp),
449 &out->coda_replace.OldFid, sizeof(CodaFid),
450 &out->coda_replace.NewFid, sizeof(CodaFid));
451 if (error) {
452 mutex_exit(&cp->c_lock);
453 vrele(CTOV(cp));
454 return error;
455 }
456 cp->c_fid = out->coda_replace.NewFid;
457 vcache_rekey_exit(CTOV(cp)->v_mount, CTOV(cp),
458 &out->coda_replace.OldFid, sizeof(CodaFid),
459 &cp->c_fid, sizeof(CodaFid));
460
461 CODADEBUG(CODA_REPLACE, myprintf((
462 "replace: oldfid = %s, newfid = %s, cp = %p\n",
463 coda_f2s(&out->coda_replace.OldFid),
464 coda_f2s(&cp->c_fid), cp));)
465 mutex_exit(&cp->c_lock);
466 vrele(CTOV(cp));
467 }
468 return (0);
469 }
470 default:
471 myprintf(("handleDownCall: unknown opcode %d\n", opcode));
472 return (EINVAL);
473 }
474}
475
476/* coda_grab_vnode: lives in either cfs_mach.c or cfs_nbsd.c */
477
478int
479coda_vmflush(struct cnode *cp)
480{
481 return 0;
482}
483
484
485/*
486 * kernel-internal debugging switches
487 */
488
489void coda_debugon(void)
490{
491 codadebug = -1;
492 coda_nc_debug = -1;
493 coda_vnop_print_entry = 1;
494 coda_psdev_print_entry = 1;
495 coda_vfsop_print_entry = 1;
496}
497
498void coda_debugoff(void)
499{
500 codadebug = 0;
501 coda_nc_debug = 0;
502 coda_vnop_print_entry = 0;
503 coda_psdev_print_entry = 0;
504 coda_vfsop_print_entry = 0;
505}
506
507/* How to print a ucred */
508void
509coda_print_cred(kauth_cred_t cred)
510{
511
512 uint16_t ngroups;
513 int i;
514
515 myprintf(("ref %d\tuid %d\n", kauth_cred_getrefcnt(cred),
516 kauth_cred_geteuid(cred)));
517
518 ngroups = kauth_cred_ngroups(cred);
519 for (i=0; i < ngroups; i++)
520 myprintf(("\tgroup %d: (%d)\n", i, kauth_cred_group(cred, i)));
521 myprintf(("\n"));
522
523}
524
525/*
526 * Utilities used by both client and server
527 * Standard levels:
528 * 0) no debugging
529 * 1) hard failures
530 * 2) soft failures
531 * 3) current test software
532 * 4) main procedure entry points
533 * 5) main procedure exit points
534 * 6) utility procedure entry points
535 * 7) utility procedure exit points
536 * 8) obscure procedure entry points
537 * 9) obscure procedure exit points
538 * 10) random stuff
539 * 11) all <= 1
540 * 12) all <= 2
541 * 13) all <= 3
542 * ...
543 */
544