1/* $NetBSD: dksubr.c,v 1.91 2016/10/24 17:14:27 jdolecek Exp $ */
2
3/*-
4 * Copyright (c) 1996, 1997, 1998, 1999, 2002, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Roland C. Dowdeswell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: dksubr.c,v 1.91 2016/10/24 17:14:27 jdolecek Exp $");
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/stat.h>
38#include <sys/proc.h>
39#include <sys/ioctl.h>
40#include <sys/device.h>
41#include <sys/disk.h>
42#include <sys/disklabel.h>
43#include <sys/buf.h>
44#include <sys/bufq.h>
45#include <sys/vnode.h>
46#include <sys/fcntl.h>
47#include <sys/namei.h>
48#include <sys/module.h>
49#include <sys/syslog.h>
50
51#include <dev/dkvar.h>
52#include <miscfs/specfs/specdev.h> /* for v_rdev */
53
54int dkdebug = 0;
55
56#ifdef DEBUG
57#define DKDB_FOLLOW 0x1
58#define DKDB_INIT 0x2
59#define DKDB_VNODE 0x4
60#define DKDB_DUMP 0x8
61
62#define IFDEBUG(x,y) if (dkdebug & (x)) y
63#define DPRINTF(x,y) IFDEBUG(x, printf y)
64#define DPRINTF_FOLLOW(y) DPRINTF(DKDB_FOLLOW, y)
65#else
66#define IFDEBUG(x,y)
67#define DPRINTF(x,y)
68#define DPRINTF_FOLLOW(y)
69#endif
70
71#define DKF_READYFORDUMP (DKF_INITED|DKF_TAKEDUMP)
72
73static int dk_subr_modcmd(modcmd_t, void *);
74
75#define DKLABELDEV(dev) \
76 (MAKEDISKDEV(major((dev)), DISKUNIT((dev)), RAW_PART))
77
78static void dk_makedisklabel(struct dk_softc *);
79static int dk_translate(struct dk_softc *, struct buf *);
80static void dk_done1(struct dk_softc *, struct buf *, bool);
81
82void
83dk_init(struct dk_softc *dksc, device_t dev, int dtype)
84{
85
86 memset(dksc, 0x0, sizeof(*dksc));
87 dksc->sc_dtype = dtype;
88 dksc->sc_dev = dev;
89
90 strlcpy(dksc->sc_xname, device_xname(dev), DK_XNAME_SIZE);
91 dksc->sc_dkdev.dk_name = dksc->sc_xname;
92}
93
94void
95dk_attach(struct dk_softc *dksc)
96{
97 KASSERT(dksc->sc_dev != NULL);
98
99 mutex_init(&dksc->sc_iolock, MUTEX_DEFAULT, IPL_VM);
100 dksc->sc_flags |= DKF_READYFORDUMP;
101#ifdef DIAGNOSTIC
102 dksc->sc_flags |= DKF_WARNLABEL | DKF_LABELSANITY;
103#endif
104
105 /* Attach the device into the rnd source list. */
106 rnd_attach_source(&dksc->sc_rnd_source, dksc->sc_xname,
107 RND_TYPE_DISK, RND_FLAG_DEFAULT);
108}
109
110void
111dk_detach(struct dk_softc *dksc)
112{
113 /* Unhook the entropy source. */
114 rnd_detach_source(&dksc->sc_rnd_source);
115
116 dksc->sc_flags &= ~DKF_READYFORDUMP;
117 mutex_destroy(&dksc->sc_iolock);
118}
119
120/* ARGSUSED */
121int
122dk_open(struct dk_softc *dksc, dev_t dev,
123 int flags, int fmt, struct lwp *l)
124{
125 struct disklabel *lp = dksc->sc_dkdev.dk_label;
126 int part = DISKPART(dev);
127 int pmask = 1 << part;
128 int ret = 0;
129 struct disk *dk = &dksc->sc_dkdev;
130
131 DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%x)\n", __func__,
132 dksc->sc_xname, dksc, dev, flags));
133
134 mutex_enter(&dk->dk_openlock);
135
136 /*
137 * If there are wedges, and this is not RAW_PART, then we
138 * need to fail.
139 */
140 if (dk->dk_nwedges != 0 && part != RAW_PART) {
141 ret = EBUSY;
142 goto done;
143 }
144
145 /*
146 * If we're init'ed and there are no other open partitions then
147 * update the in-core disklabel.
148 */
149 if ((dksc->sc_flags & DKF_INITED)) {
150 if ((dksc->sc_flags & DKF_VLABEL) == 0) {
151 dksc->sc_flags |= DKF_VLABEL;
152 dk_getdisklabel(dksc, dev);
153 }
154 }
155
156 /* Fail if we can't find the partition. */
157 if (part != RAW_PART &&
158 ((dksc->sc_flags & DKF_VLABEL) == 0 ||
159 part >= lp->d_npartitions ||
160 lp->d_partitions[part].p_fstype == FS_UNUSED)) {
161 ret = ENXIO;
162 goto done;
163 }
164
165 /* Mark our unit as open. */
166 switch (fmt) {
167 case S_IFCHR:
168 dk->dk_copenmask |= pmask;
169 break;
170 case S_IFBLK:
171 dk->dk_bopenmask |= pmask;
172 break;
173 }
174
175 dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask;
176
177done:
178 mutex_exit(&dk->dk_openlock);
179 return ret;
180}
181
182/* ARGSUSED */
183int
184dk_close(struct dk_softc *dksc, dev_t dev,
185 int flags, int fmt, struct lwp *l)
186{
187 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
188 int part = DISKPART(dev);
189 int pmask = 1 << part;
190 struct disk *dk = &dksc->sc_dkdev;
191
192 DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%x)\n", __func__,
193 dksc->sc_xname, dksc, dev, flags));
194
195 mutex_enter(&dk->dk_openlock);
196
197 switch (fmt) {
198 case S_IFCHR:
199 dk->dk_copenmask &= ~pmask;
200 break;
201 case S_IFBLK:
202 dk->dk_bopenmask &= ~pmask;
203 break;
204 }
205 dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask;
206
207 if (dk->dk_openmask == 0) {
208 if (dkd->d_lastclose != NULL)
209 (*dkd->d_lastclose)(dksc->sc_dev);
210 if ((dksc->sc_flags & DKF_KLABEL) == 0)
211 dksc->sc_flags &= ~DKF_VLABEL;
212 }
213
214 mutex_exit(&dk->dk_openlock);
215 return 0;
216}
217
218static int
219dk_translate(struct dk_softc *dksc, struct buf *bp)
220{
221 int part;
222 int wlabel;
223 daddr_t blkno;
224 struct disklabel *lp;
225 struct disk *dk;
226 uint64_t numsecs;
227 unsigned secsize;
228
229 lp = dksc->sc_dkdev.dk_label;
230 dk = &dksc->sc_dkdev;
231
232 part = DISKPART(bp->b_dev);
233 numsecs = dk->dk_geom.dg_secperunit;
234 secsize = dk->dk_geom.dg_secsize;
235
236 /*
237 * The transfer must be a whole number of blocks and the offset must
238 * not be negative.
239 */
240 if ((bp->b_bcount % secsize) != 0 || bp->b_blkno < 0) {
241 bp->b_error = EINVAL;
242 goto done;
243 }
244
245 /* If there is nothing to do, then we are done */
246 if (bp->b_bcount == 0)
247 goto done;
248
249 wlabel = dksc->sc_flags & (DKF_WLABEL|DKF_LABELLING);
250 if (part == RAW_PART) {
251 uint64_t numblocks = btodb(numsecs * secsize);
252 if (bounds_check_with_mediasize(bp, DEV_BSIZE, numblocks) <= 0)
253 goto done;
254 } else {
255 if (bounds_check_with_label(&dksc->sc_dkdev, bp, wlabel) <= 0)
256 goto done;
257 }
258
259 /*
260 * Convert the block number to absolute and put it in terms
261 * of the device's logical block size.
262 */
263 if (secsize >= DEV_BSIZE)
264 blkno = bp->b_blkno / (secsize / DEV_BSIZE);
265 else
266 blkno = bp->b_blkno * (DEV_BSIZE / secsize);
267
268 if (part != RAW_PART)
269 blkno += lp->d_partitions[DISKPART(bp->b_dev)].p_offset;
270 bp->b_rawblkno = blkno;
271
272 return -1;
273
274done:
275 bp->b_resid = bp->b_bcount;
276 return bp->b_error;
277}
278
279static int
280dk_strategy1(struct dk_softc *dksc, struct buf *bp)
281{
282 int error;
283
284 DPRINTF_FOLLOW(("%s(%s, %p, %p)\n", __func__,
285 dksc->sc_xname, dksc, bp));
286
287 if (!(dksc->sc_flags & DKF_INITED)) {
288 DPRINTF_FOLLOW(("%s: not inited\n", __func__));
289 bp->b_error = ENXIO;
290 bp->b_resid = bp->b_bcount;
291 biodone(bp);
292 return 1;
293 }
294
295 error = dk_translate(dksc, bp);
296 if (error >= 0) {
297 biodone(bp);
298 return 1;
299 }
300
301 return 0;
302}
303
304void
305dk_strategy(struct dk_softc *dksc, struct buf *bp)
306{
307 int error;
308
309 error = dk_strategy1(dksc, bp);
310 if (error)
311 return;
312
313 /*
314 * Queue buffer and start unit
315 */
316 dk_start(dksc, bp);
317}
318
319int
320dk_strategy_defer(struct dk_softc *dksc, struct buf *bp)
321{
322 int error;
323
324 error = dk_strategy1(dksc, bp);
325 if (error)
326 return error;
327
328 /*
329 * Queue buffer only
330 */
331 mutex_enter(&dksc->sc_iolock);
332 bufq_put(dksc->sc_bufq, bp);
333 mutex_exit(&dksc->sc_iolock);
334
335 return 0;
336}
337
338int
339dk_strategy_pending(struct dk_softc *dksc)
340{
341 struct buf *bp;
342
343 if (!(dksc->sc_flags & DKF_INITED)) {
344 DPRINTF_FOLLOW(("%s: not inited\n", __func__));
345 return 0;
346 }
347
348 mutex_enter(&dksc->sc_iolock);
349 bp = bufq_peek(dksc->sc_bufq);
350 mutex_exit(&dksc->sc_iolock);
351
352 return bp != NULL;
353}
354
355void
356dk_start(struct dk_softc *dksc, struct buf *bp)
357{
358 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
359 int error;
360
361 if (!(dksc->sc_flags & DKF_INITED)) {
362 DPRINTF_FOLLOW(("%s: not inited\n", __func__));
363 return;
364 }
365
366 mutex_enter(&dksc->sc_iolock);
367
368 if (bp != NULL)
369 bufq_put(dksc->sc_bufq, bp);
370
371 if (dksc->sc_busy)
372 goto done;
373 dksc->sc_busy = true;
374
375 /*
376 * Peeking at the buffer queue and committing the operation
377 * only after success isn't atomic.
378 *
379 * So when a diskstart fails, the buffer is saved
380 * and tried again before the next buffer is fetched.
381 * dk_drain() handles flushing of a saved buffer.
382 *
383 * This keeps order of I/O operations, unlike bufq_put.
384 */
385
386 bp = dksc->sc_deferred;
387 dksc->sc_deferred = NULL;
388
389 if (bp == NULL)
390 bp = bufq_get(dksc->sc_bufq);
391
392 while (bp != NULL) {
393
394 disk_busy(&dksc->sc_dkdev);
395 mutex_exit(&dksc->sc_iolock);
396 error = dkd->d_diskstart(dksc->sc_dev, bp);
397 mutex_enter(&dksc->sc_iolock);
398 if (error == EAGAIN) {
399 dksc->sc_deferred = bp;
400 disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
401 break;
402 }
403
404 if (error != 0) {
405 bp->b_error = error;
406 bp->b_resid = bp->b_bcount;
407 dk_done1(dksc, bp, false);
408 }
409
410 bp = bufq_get(dksc->sc_bufq);
411 }
412
413 dksc->sc_busy = false;
414done:
415 mutex_exit(&dksc->sc_iolock);
416}
417
418static void
419dk_done1(struct dk_softc *dksc, struct buf *bp, bool lock)
420{
421 struct disk *dk = &dksc->sc_dkdev;
422
423 if (bp->b_error != 0) {
424 struct cfdriver *cd = device_cfdriver(dksc->sc_dev);
425
426 diskerr(bp, cd->cd_name, "error", LOG_PRINTF, 0,
427 dk->dk_label);
428 printf("\n");
429 }
430
431 if (lock)
432 mutex_enter(&dksc->sc_iolock);
433 disk_unbusy(dk, bp->b_bcount - bp->b_resid, (bp->b_flags & B_READ));
434 if (lock)
435 mutex_exit(&dksc->sc_iolock);
436
437 rnd_add_uint32(&dksc->sc_rnd_source, bp->b_rawblkno);
438
439 biodone(bp);
440}
441
442void
443dk_done(struct dk_softc *dksc, struct buf *bp)
444{
445 dk_done1(dksc, bp, true);
446}
447
448void
449dk_drain(struct dk_softc *dksc)
450{
451 struct buf *bp;
452
453 mutex_enter(&dksc->sc_iolock);
454 bp = dksc->sc_deferred;
455 dksc->sc_deferred = NULL;
456 if (bp != NULL) {
457 bp->b_error = EIO;
458 bp->b_resid = bp->b_bcount;
459 biodone(bp);
460 }
461 bufq_drain(dksc->sc_bufq);
462 mutex_exit(&dksc->sc_iolock);
463}
464
465int
466dk_discard(struct dk_softc *dksc, dev_t dev, off_t pos, off_t len)
467{
468 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
469 unsigned secsize = dksc->sc_dkdev.dk_geom.dg_secsize;
470 struct buf tmp, *bp = &tmp;
471 int error;
472
473 DPRINTF_FOLLOW(("%s(%s, %p, 0x"PRIx64", %jd, %jd)\n", __func__,
474 dksc->sc_xname, dksc, (intmax_t)pos, (intmax_t)len));
475
476 if (!(dksc->sc_flags & DKF_INITED)) {
477 DPRINTF_FOLLOW(("%s: not inited\n", __func__));
478 return ENXIO;
479 }
480
481 if (secsize == 0 || (pos % secsize) != 0)
482 return EINVAL;
483
484 /* enough data to please the bounds checking code */
485 bp->b_dev = dev;
486 bp->b_blkno = (daddr_t)(pos / secsize);
487 bp->b_bcount = len;
488 bp->b_flags = B_WRITE;
489
490 error = dk_translate(dksc, bp);
491 if (error >= 0)
492 return error;
493
494 error = dkd->d_discard(dksc->sc_dev,
495 (off_t)bp->b_rawblkno * secsize,
496 (off_t)bp->b_bcount);
497
498 return error;
499}
500
501int
502dk_size(struct dk_softc *dksc, dev_t dev)
503{
504 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
505 struct disklabel *lp;
506 int is_open;
507 int part;
508 int size;
509
510 if ((dksc->sc_flags & DKF_INITED) == 0)
511 return -1;
512
513 part = DISKPART(dev);
514 is_open = dksc->sc_dkdev.dk_openmask & (1 << part);
515
516 if (!is_open && dkd->d_open(dev, 0, S_IFBLK, curlwp))
517 return -1;
518
519 lp = dksc->sc_dkdev.dk_label;
520 if (lp->d_partitions[part].p_fstype != FS_SWAP)
521 size = -1;
522 else
523 size = lp->d_partitions[part].p_size *
524 (lp->d_secsize / DEV_BSIZE);
525
526 if (!is_open && dkd->d_close(dev, 0, S_IFBLK, curlwp))
527 return -1;
528
529 return size;
530}
531
532int
533dk_ioctl(struct dk_softc *dksc, dev_t dev,
534 u_long cmd, void *data, int flag, struct lwp *l)
535{
536 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
537 struct disklabel *lp;
538 struct disk *dk = &dksc->sc_dkdev;
539#ifdef __HAVE_OLD_DISKLABEL
540 struct disklabel newlabel;
541#endif
542 int error;
543
544 DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%lx)\n", __func__,
545 dksc->sc_xname, dksc, dev, cmd));
546
547 /* ensure that the pseudo disk is open for writes for these commands */
548 switch (cmd) {
549 case DIOCSDINFO:
550 case DIOCWDINFO:
551#ifdef __HAVE_OLD_DISKLABEL
552 case ODIOCSDINFO:
553 case ODIOCWDINFO:
554#endif
555 case DIOCKLABEL:
556 case DIOCWLABEL:
557 case DIOCAWEDGE:
558 case DIOCDWEDGE:
559 case DIOCSSTRATEGY:
560 if ((flag & FWRITE) == 0)
561 return EBADF;
562 }
563
564 /* ensure that the pseudo-disk is initialized for these */
565 switch (cmd) {
566 case DIOCGDINFO:
567 case DIOCSDINFO:
568 case DIOCWDINFO:
569 case DIOCGPARTINFO:
570 case DIOCKLABEL:
571 case DIOCWLABEL:
572 case DIOCGDEFLABEL:
573 case DIOCAWEDGE:
574 case DIOCDWEDGE:
575 case DIOCLWEDGES:
576 case DIOCMWEDGES:
577 case DIOCCACHESYNC:
578#ifdef __HAVE_OLD_DISKLABEL
579 case ODIOCGDINFO:
580 case ODIOCSDINFO:
581 case ODIOCWDINFO:
582 case ODIOCGDEFLABEL:
583#endif
584 if ((dksc->sc_flags & DKF_INITED) == 0)
585 return ENXIO;
586 }
587
588 error = disk_ioctl(dk, dev, cmd, data, flag, l);
589 if (error != EPASSTHROUGH)
590 return error;
591 else
592 error = 0;
593
594 switch (cmd) {
595 case DIOCWDINFO:
596 case DIOCSDINFO:
597#ifdef __HAVE_OLD_DISKLABEL
598 case ODIOCWDINFO:
599 case ODIOCSDINFO:
600#endif
601#ifdef __HAVE_OLD_DISKLABEL
602 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
603 memset(&newlabel, 0, sizeof newlabel);
604 memcpy(&newlabel, data, sizeof (struct olddisklabel));
605 lp = &newlabel;
606 } else
607#endif
608 lp = (struct disklabel *)data;
609
610 mutex_enter(&dk->dk_openlock);
611 dksc->sc_flags |= DKF_LABELLING;
612
613 error = setdisklabel(dksc->sc_dkdev.dk_label,
614 lp, 0, dksc->sc_dkdev.dk_cpulabel);
615 if (error == 0) {
616 if (cmd == DIOCWDINFO
617#ifdef __HAVE_OLD_DISKLABEL
618 || cmd == ODIOCWDINFO
619#endif
620 )
621 error = writedisklabel(DKLABELDEV(dev),
622 dkd->d_strategy, dksc->sc_dkdev.dk_label,
623 dksc->sc_dkdev.dk_cpulabel);
624 }
625
626 dksc->sc_flags &= ~DKF_LABELLING;
627 mutex_exit(&dk->dk_openlock);
628 break;
629
630 case DIOCKLABEL:
631 if (*(int *)data != 0)
632 dksc->sc_flags |= DKF_KLABEL;
633 else
634 dksc->sc_flags &= ~DKF_KLABEL;
635 break;
636
637 case DIOCWLABEL:
638 if (*(int *)data != 0)
639 dksc->sc_flags |= DKF_WLABEL;
640 else
641 dksc->sc_flags &= ~DKF_WLABEL;
642 break;
643
644 case DIOCGDEFLABEL:
645 dk_getdefaultlabel(dksc, (struct disklabel *)data);
646 break;
647
648#ifdef __HAVE_OLD_DISKLABEL
649 case ODIOCGDEFLABEL:
650 dk_getdefaultlabel(dksc, &newlabel);
651 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
652 return ENOTTY;
653 memcpy(data, &newlabel, sizeof (struct olddisklabel));
654 break;
655#endif
656
657 case DIOCGSTRATEGY:
658 {
659 struct disk_strategy *dks = (void *)data;
660
661 mutex_enter(&dksc->sc_iolock);
662 if (dksc->sc_bufq != NULL)
663 strlcpy(dks->dks_name,
664 bufq_getstrategyname(dksc->sc_bufq),
665 sizeof(dks->dks_name));
666 else
667 error = EINVAL;
668 mutex_exit(&dksc->sc_iolock);
669 dks->dks_paramlen = 0;
670 break;
671 }
672
673 case DIOCSSTRATEGY:
674 {
675 struct disk_strategy *dks = (void *)data;
676 struct bufq_state *new;
677 struct bufq_state *old;
678
679 if (dks->dks_param != NULL) {
680 return EINVAL;
681 }
682 dks->dks_name[sizeof(dks->dks_name) - 1] = 0; /* ensure term */
683 error = bufq_alloc(&new, dks->dks_name,
684 BUFQ_EXACT|BUFQ_SORT_RAWBLOCK);
685 if (error) {
686 return error;
687 }
688 mutex_enter(&dksc->sc_iolock);
689 old = dksc->sc_bufq;
690 if (old)
691 bufq_move(new, old);
692 dksc->sc_bufq = new;
693 mutex_exit(&dksc->sc_iolock);
694 if (old)
695 bufq_free(old);
696 break;
697 }
698
699 default:
700 error = ENOTTY;
701 }
702
703 return error;
704}
705
706/*
707 * dk_dump dumps all of physical memory into the partition specified.
708 * This requires substantially more framework than {s,w}ddump, and hence
709 * is probably much more fragile.
710 *
711 */
712
713#define DKFF_READYFORDUMP(x) (((x) & DKF_READYFORDUMP) == DKF_READYFORDUMP)
714static volatile int dk_dumping = 0;
715
716/* ARGSUSED */
717int
718dk_dump(struct dk_softc *dksc, dev_t dev,
719 daddr_t blkno, void *vav, size_t size)
720{
721 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
722 char *va = vav;
723 struct disklabel *lp;
724 struct partition *p;
725 int part, towrt, nsects, sectoff, maxblkcnt, nblk;
726 int maxxfer, rv = 0;
727
728 /*
729 * ensure that we consider this device to be safe for dumping,
730 * and that the device is configured.
731 */
732 if (!DKFF_READYFORDUMP(dksc->sc_flags)) {
733 DPRINTF(DKDB_DUMP, ("%s: bad dump flags 0x%x\n", __func__,
734 dksc->sc_flags));
735 return ENXIO;
736 }
737
738 /* ensure that we are not already dumping */
739 if (dk_dumping)
740 return EFAULT;
741 dk_dumping = 1;
742
743 if (dkd->d_dumpblocks == NULL) {
744 DPRINTF(DKDB_DUMP, ("%s: no dumpblocks\n", __func__));
745 return ENXIO;
746 }
747
748 /* device specific max transfer size */
749 maxxfer = MAXPHYS;
750 if (dkd->d_iosize != NULL)
751 (*dkd->d_iosize)(dksc->sc_dev, &maxxfer);
752
753 /* Convert to disk sectors. Request must be a multiple of size. */
754 part = DISKPART(dev);
755 lp = dksc->sc_dkdev.dk_label;
756 if ((size % lp->d_secsize) != 0) {
757 DPRINTF(DKDB_DUMP, ("%s: odd size %zu\n", __func__, size));
758 return EFAULT;
759 }
760 towrt = size / lp->d_secsize;
761 blkno = dbtob(blkno) / lp->d_secsize; /* blkno in secsize units */
762
763 p = &lp->d_partitions[part];
764 if (p->p_fstype != FS_SWAP) {
765 DPRINTF(DKDB_DUMP, ("%s: bad fstype %d\n", __func__,
766 p->p_fstype));
767 return ENXIO;
768 }
769 nsects = p->p_size;
770 sectoff = p->p_offset;
771
772 /* Check transfer bounds against partition size. */
773 if ((blkno < 0) || ((blkno + towrt) > nsects)) {
774 DPRINTF(DKDB_DUMP, ("%s: out of bounds blkno=%jd, towrt=%d, "
775 "nsects=%d\n", __func__, (intmax_t)blkno, towrt, nsects));
776 return EINVAL;
777 }
778
779 /* Offset block number to start of partition. */
780 blkno += sectoff;
781
782 /* Start dumping and return when done. */
783 maxblkcnt = howmany(maxxfer, lp->d_secsize);
784 while (towrt > 0) {
785 nblk = min(maxblkcnt, towrt);
786
787 if ((rv = (*dkd->d_dumpblocks)(dksc->sc_dev, va, blkno, nblk))
788 != 0) {
789 DPRINTF(DKDB_DUMP, ("%s: dumpblocks %d\n", __func__,
790 rv));
791 return rv;
792 }
793
794 towrt -= nblk;
795 blkno += nblk;
796 va += nblk * lp->d_secsize;
797 }
798
799 dk_dumping = 0;
800
801 return 0;
802}
803
804/* ARGSUSED */
805void
806dk_getdefaultlabel(struct dk_softc *dksc, struct disklabel *lp)
807{
808 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
809
810 memset(lp, 0, sizeof(*lp));
811
812 if (dg->dg_secperunit > UINT32_MAX)
813 lp->d_secperunit = UINT32_MAX;
814 else
815 lp->d_secperunit = dg->dg_secperunit;
816 lp->d_secsize = dg->dg_secsize;
817 lp->d_nsectors = dg->dg_nsectors;
818 lp->d_ntracks = dg->dg_ntracks;
819 lp->d_ncylinders = dg->dg_ncylinders;
820 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
821
822 strlcpy(lp->d_typename, dksc->sc_xname, sizeof(lp->d_typename));
823 lp->d_type = dksc->sc_dtype;
824 strlcpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
825 lp->d_rpm = 3600;
826 lp->d_interleave = 1;
827 lp->d_flags = 0;
828
829 lp->d_partitions[RAW_PART].p_offset = 0;
830 lp->d_partitions[RAW_PART].p_size = lp->d_secperunit;
831 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
832 lp->d_npartitions = RAW_PART + 1;
833
834 lp->d_magic = DISKMAGIC;
835 lp->d_magic2 = DISKMAGIC;
836 lp->d_checksum = dkcksum(dksc->sc_dkdev.dk_label);
837}
838
839/* ARGSUSED */
840void
841dk_getdisklabel(struct dk_softc *dksc, dev_t dev)
842{
843 const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
844 struct disklabel *lp = dksc->sc_dkdev.dk_label;
845 struct cpu_disklabel *clp = dksc->sc_dkdev.dk_cpulabel;
846 struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
847 struct partition *pp;
848 int i;
849 const char *errstring;
850
851 memset(clp, 0x0, sizeof(*clp));
852 dk_getdefaultlabel(dksc, lp);
853 errstring = readdisklabel(DKLABELDEV(dev), dkd->d_strategy,
854 dksc->sc_dkdev.dk_label, dksc->sc_dkdev.dk_cpulabel);
855 if (errstring) {
856 dk_makedisklabel(dksc);
857 if (dksc->sc_flags & DKF_WARNLABEL)
858 printf("%s: %s\n", dksc->sc_xname, errstring);
859 return;
860 }
861
862 if ((dksc->sc_flags & DKF_LABELSANITY) == 0)
863 return;
864
865 /* Sanity check */
866 if (lp->d_secperunit < UINT32_MAX ?
867 lp->d_secperunit != dg->dg_secperunit :
868 lp->d_secperunit > dg->dg_secperunit)
869 printf("WARNING: %s: total sector size in disklabel (%ju) "
870 "!= the size of %s (%ju)\n", dksc->sc_xname,
871 (uintmax_t)lp->d_secperunit, dksc->sc_xname,
872 (uintmax_t)dg->dg_secperunit);
873
874 for (i=0; i < lp->d_npartitions; i++) {
875 pp = &lp->d_partitions[i];
876 if (pp->p_offset + pp->p_size > dg->dg_secperunit)
877 printf("WARNING: %s: end of partition `%c' exceeds "
878 "the size of %s (%ju)\n", dksc->sc_xname,
879 'a' + i, dksc->sc_xname,
880 (uintmax_t)dg->dg_secperunit);
881 }
882}
883
884/* ARGSUSED */
885static void
886dk_makedisklabel(struct dk_softc *dksc)
887{
888 struct disklabel *lp = dksc->sc_dkdev.dk_label;
889
890 lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
891 strlcpy(lp->d_packname, "default label", sizeof(lp->d_packname));
892 lp->d_checksum = dkcksum(lp);
893}
894
895/* This function is taken from ccd.c:1.76 --rcd */
896
897/*
898 * XXX this function looks too generic for dksubr.c, shouldn't we
899 * put it somewhere better?
900 */
901
902/*
903 * Lookup the provided name in the filesystem. If the file exists,
904 * is a valid block device, and isn't being used by anyone else,
905 * set *vpp to the file's vnode.
906 */
907int
908dk_lookup(struct pathbuf *pb, struct lwp *l, struct vnode **vpp)
909{
910 struct nameidata nd;
911 struct vnode *vp;
912 int error;
913
914 if (l == NULL)
915 return ESRCH; /* Is ESRCH the best choice? */
916
917 NDINIT(&nd, LOOKUP, FOLLOW, pb);
918 if ((error = vn_open(&nd, FREAD | FWRITE, 0)) != 0) {
919 DPRINTF((DKDB_FOLLOW|DKDB_INIT),
920 ("%s: vn_open error = %d\n", __func__, error));
921 return error;
922 }
923
924 vp = nd.ni_vp;
925 if (vp->v_type != VBLK) {
926 error = ENOTBLK;
927 goto out;
928 }
929
930 /* Reopen as anonymous vnode to protect against forced unmount. */
931 if ((error = bdevvp(vp->v_rdev, vpp)) != 0)
932 goto out;
933 VOP_UNLOCK(vp);
934 if ((error = vn_close(vp, FREAD | FWRITE, l->l_cred)) != 0) {
935 vrele(*vpp);
936 return error;
937 }
938 if ((error = VOP_OPEN(*vpp, FREAD | FWRITE, l->l_cred)) != 0) {
939 vrele(*vpp);
940 return error;
941 }
942 mutex_enter((*vpp)->v_interlock);
943 (*vpp)->v_writecount++;
944 mutex_exit((*vpp)->v_interlock);
945
946 IFDEBUG(DKDB_VNODE, vprint("dk_lookup: vnode info", *vpp));
947
948 return 0;
949out:
950 VOP_UNLOCK(vp);
951 (void) vn_close(vp, FREAD | FWRITE, l->l_cred);
952 return error;
953}
954
955MODULE(MODULE_CLASS_MISC, dk_subr, NULL);
956
957static int
958dk_subr_modcmd(modcmd_t cmd, void *arg)
959{
960 switch (cmd) {
961 case MODULE_CMD_INIT:
962 case MODULE_CMD_FINI:
963 return 0;
964 case MODULE_CMD_STAT:
965 case MODULE_CMD_AUTOUNLOAD:
966 default:
967 return ENOTTY;
968 }
969}
970