Mercurial > illumos > illumos-gate
annotate usr/src/uts/common/fs/zfs/dnode.c @ 2082:76b439ec3ac1
PSARC 2006/223 ZFS Hot Spares
PSARC 2006/303 ZFS Clone Promotion
6276916 support for "clone swap"
6288488 du reports misleading size on RAID-Z
6393490 libzfs should be a real library
6397148 fbufs debug code should be removed from buf_hash_insert()
6405966 Hot Spare support in ZFS
6409302 passing a non-root vdev via zpool_create() panics system
6415739 assertion failed: !(zio->io_flags & 0x00040)
6416759 ::dbufs does not find bonus buffers anymore
6417978 double parity RAID-Z a.k.a. RAID6
6424554 full block re-writes need not read data in
6425111 detaching an offline device can result in import confusion
author | eschrock |
---|---|
date | Tue, 30 May 2006 15:47:16 -0700 |
parents | 2960cf15fee6 |
children | 45c1310316ff |
rev | line source |
---|---|
789 | 1 /* |
2 * CDDL HEADER START | |
3 * | |
4 * The contents of this file are subject to the terms of the | |
1491
bdcb30e07e7d
6389368 fat zap should use 16k blocks (with backwards compatability)
ahrens
parents:
1402
diff
changeset
|
5 * Common Development and Distribution License (the "License"). |
bdcb30e07e7d
6389368 fat zap should use 16k blocks (with backwards compatability)
ahrens
parents:
1402
diff
changeset
|
6 * You may not use this file except in compliance with the License. |
789 | 7 * |
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 * or http://www.opensolaris.org/os/licensing. | |
10 * See the License for the specific language governing permissions | |
11 * and limitations under the License. | |
12 * | |
13 * When distributing Covered Code, include this CDDL HEADER in each | |
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 * If applicable, add the following below this CDDL HEADER, with the | |
16 * fields enclosed by brackets "[]" replaced with your own identifying | |
17 * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 * | |
19 * CDDL HEADER END | |
20 */ | |
21 /* | |
1402 | 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. |
789 | 23 * Use is subject to license terms. |
24 */ | |
25 | |
26 #pragma ident "%Z%%M% %I% %E% SMI" | |
27 | |
28 #include <sys/zfs_context.h> | |
29 #include <sys/dbuf.h> | |
30 #include <sys/dnode.h> | |
31 #include <sys/dmu.h> | |
32 #include <sys/dmu_impl.h> | |
33 #include <sys/dmu_tx.h> | |
34 #include <sys/dmu_objset.h> | |
35 #include <sys/dsl_dir.h> | |
36 #include <sys/dsl_dataset.h> | |
37 #include <sys/spa.h> | |
38 #include <sys/zio.h> | |
39 #include <sys/dmu_zfetch.h> | |
40 | |
41 static int free_range_compar(const void *node1, const void *node2); | |
42 | |
43 static kmem_cache_t *dnode_cache; | |
44 | |
45 static dnode_phys_t dnode_phys_zero; | |
46 | |
47 int zfs_default_bs = SPA_MINBLOCKSHIFT; | |
48 int zfs_default_ibs = DN_MAX_INDBLKSHIFT; | |
49 | |
50 /* ARGSUSED */ | |
51 static int | |
52 dnode_cons(void *arg, void *unused, int kmflag) | |
53 { | |
54 int i; | |
55 dnode_t *dn = arg; | |
56 bzero(dn, sizeof (dnode_t)); | |
57 | |
58 rw_init(&dn->dn_struct_rwlock, NULL, RW_DEFAULT, NULL); | |
59 mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL); | |
60 mutex_init(&dn->dn_dbufs_mtx, NULL, MUTEX_DEFAULT, NULL); | |
61 refcount_create(&dn->dn_holds); | |
62 refcount_create(&dn->dn_tx_holds); | |
63 | |
64 for (i = 0; i < TXG_SIZE; i++) { | |
65 avl_create(&dn->dn_ranges[i], free_range_compar, | |
66 sizeof (free_range_t), | |
67 offsetof(struct free_range, fr_node)); | |
68 list_create(&dn->dn_dirty_dbufs[i], | |
69 sizeof (dmu_buf_impl_t), | |
70 offsetof(dmu_buf_impl_t, db_dirty_node[i])); | |
71 } | |
72 | |
73 list_create(&dn->dn_dbufs, sizeof (dmu_buf_impl_t), | |
74 offsetof(dmu_buf_impl_t, db_link)); | |
75 | |
76 return (0); | |
77 } | |
78 | |
79 /* ARGSUSED */ | |
80 static void | |
81 dnode_dest(void *arg, void *unused) | |
82 { | |
83 int i; | |
84 dnode_t *dn = arg; | |
85 | |
86 rw_destroy(&dn->dn_struct_rwlock); | |
87 mutex_destroy(&dn->dn_mtx); | |
88 mutex_destroy(&dn->dn_dbufs_mtx); | |
89 refcount_destroy(&dn->dn_holds); | |
90 refcount_destroy(&dn->dn_tx_holds); | |
91 | |
92 for (i = 0; i < TXG_SIZE; i++) { | |
93 avl_destroy(&dn->dn_ranges[i]); | |
94 list_destroy(&dn->dn_dirty_dbufs[i]); | |
95 } | |
96 | |
97 list_destroy(&dn->dn_dbufs); | |
98 } | |
99 | |
100 void | |
101 dnode_init(void) | |
102 { | |
103 dnode_cache = kmem_cache_create("dnode_t", | |
104 sizeof (dnode_t), | |
105 0, dnode_cons, dnode_dest, NULL, NULL, NULL, 0); | |
106 } | |
107 | |
108 void | |
109 dnode_fini(void) | |
110 { | |
111 kmem_cache_destroy(dnode_cache); | |
112 } | |
113 | |
114 | |
873
adefbfa5f42d
6347448 non ZFS_DEBUG kernels shouldn't call empty verify functions
ek110237
parents:
789
diff
changeset
|
115 #ifdef ZFS_DEBUG |
789 | 116 void |
117 dnode_verify(dnode_t *dn) | |
118 { | |
119 int drop_struct_lock = FALSE; | |
120 | |
121 ASSERT(dn->dn_phys); | |
122 ASSERT(dn->dn_objset); | |
123 | |
124 ASSERT(dn->dn_phys->dn_type < DMU_OT_NUMTYPES); | |
125 | |
126 if (!(zfs_flags & ZFS_DEBUG_DNODE_VERIFY)) | |
127 return; | |
128 | |
129 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { | |
130 rw_enter(&dn->dn_struct_rwlock, RW_READER); | |
131 drop_struct_lock = TRUE; | |
132 } | |
133 if (dn->dn_phys->dn_type != DMU_OT_NONE || dn->dn_allocated_txg != 0) { | |
134 int i; | |
135 ASSERT3U(dn->dn_indblkshift, >=, 0); | |
136 ASSERT3U(dn->dn_indblkshift, <=, SPA_MAXBLOCKSHIFT); | |
137 if (dn->dn_datablkshift) { | |
138 ASSERT3U(dn->dn_datablkshift, >=, SPA_MINBLOCKSHIFT); | |
139 ASSERT3U(dn->dn_datablkshift, <=, SPA_MAXBLOCKSHIFT); | |
140 ASSERT3U(1<<dn->dn_datablkshift, ==, dn->dn_datablksz); | |
141 } | |
142 ASSERT3U(dn->dn_nlevels, <=, 30); | |
143 ASSERT3U(dn->dn_type, <=, DMU_OT_NUMTYPES); | |
144 ASSERT3U(dn->dn_nblkptr, >=, 1); | |
145 ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR); | |
146 ASSERT3U(dn->dn_bonuslen, <=, DN_MAX_BONUSLEN); | |
147 ASSERT3U(dn->dn_datablksz, ==, | |
148 dn->dn_datablkszsec << SPA_MINBLOCKSHIFT); | |
149 ASSERT3U(ISP2(dn->dn_datablksz), ==, dn->dn_datablkshift != 0); | |
150 ASSERT3U((dn->dn_nblkptr - 1) * sizeof (blkptr_t) + | |
151 dn->dn_bonuslen, <=, DN_MAX_BONUSLEN); | |
152 for (i = 0; i < TXG_SIZE; i++) { | |
153 ASSERT3U(dn->dn_next_nlevels[i], <=, dn->dn_nlevels); | |
154 } | |
155 } | |
156 if (dn->dn_phys->dn_type != DMU_OT_NONE) | |
157 ASSERT3U(dn->dn_phys->dn_nlevels, <=, dn->dn_nlevels); | |
1544 | 158 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || dn->dn_dbuf != NULL); |
789 | 159 if (dn->dn_dbuf != NULL) { |
160 ASSERT3P(dn->dn_phys, ==, | |
161 (dnode_phys_t *)dn->dn_dbuf->db.db_data + | |
162 (dn->dn_object % (dn->dn_dbuf->db.db_size >> DNODE_SHIFT))); | |
163 } | |
164 if (drop_struct_lock) | |
165 rw_exit(&dn->dn_struct_rwlock); | |
873
adefbfa5f42d
6347448 non ZFS_DEBUG kernels shouldn't call empty verify functions
ek110237
parents:
789
diff
changeset
|
166 } |
789 | 167 #endif |
168 | |
169 void | |
170 dnode_byteswap(dnode_phys_t *dnp) | |
171 { | |
172 uint64_t *buf64 = (void*)&dnp->dn_blkptr; | |
173 int i; | |
174 | |
175 if (dnp->dn_type == DMU_OT_NONE) { | |
176 bzero(dnp, sizeof (dnode_phys_t)); | |
177 return; | |
178 } | |
179 | |
180 dnp->dn_datablkszsec = BSWAP_16(dnp->dn_datablkszsec); | |
181 dnp->dn_bonuslen = BSWAP_16(dnp->dn_bonuslen); | |
182 dnp->dn_maxblkid = BSWAP_64(dnp->dn_maxblkid); | |
2082 | 183 dnp->dn_used = BSWAP_64(dnp->dn_used); |
789 | 184 |
185 /* | |
186 * dn_nblkptr is only one byte, so it's OK to read it in either | |
187 * byte order. We can't read dn_bouslen. | |
188 */ | |
189 ASSERT(dnp->dn_indblkshift <= SPA_MAXBLOCKSHIFT); | |
190 ASSERT(dnp->dn_nblkptr <= DN_MAX_NBLKPTR); | |
191 for (i = 0; i < dnp->dn_nblkptr * sizeof (blkptr_t)/8; i++) | |
192 buf64[i] = BSWAP_64(buf64[i]); | |
193 | |
194 /* | |
195 * OK to check dn_bonuslen for zero, because it won't matter if | |
196 * we have the wrong byte order. This is necessary because the | |
197 * dnode dnode is smaller than a regular dnode. | |
198 */ | |
199 if (dnp->dn_bonuslen != 0) { | |
200 /* | |
201 * Note that the bonus length calculated here may be | |
202 * longer than the actual bonus buffer. This is because | |
203 * we always put the bonus buffer after the last block | |
204 * pointer (instead of packing it against the end of the | |
205 * dnode buffer). | |
206 */ | |
207 int off = (dnp->dn_nblkptr-1) * sizeof (blkptr_t); | |
208 size_t len = DN_MAX_BONUSLEN - off; | |
209 dmu_ot[dnp->dn_bonustype].ot_byteswap(dnp->dn_bonus + off, len); | |
210 } | |
211 } | |
212 | |
213 void | |
214 dnode_buf_byteswap(void *vbuf, size_t size) | |
215 { | |
216 dnode_phys_t *buf = vbuf; | |
217 int i; | |
218 | |
219 ASSERT3U(sizeof (dnode_phys_t), ==, (1<<DNODE_SHIFT)); | |
220 ASSERT((size & (sizeof (dnode_phys_t)-1)) == 0); | |
221 | |
222 size >>= DNODE_SHIFT; | |
223 for (i = 0; i < size; i++) { | |
224 dnode_byteswap(buf); | |
225 buf++; | |
226 } | |
227 } | |
228 | |
229 static int | |
230 free_range_compar(const void *node1, const void *node2) | |
231 { | |
232 const free_range_t *rp1 = node1; | |
233 const free_range_t *rp2 = node2; | |
234 | |
235 if (rp1->fr_blkid < rp2->fr_blkid) | |
236 return (-1); | |
237 else if (rp1->fr_blkid > rp2->fr_blkid) | |
238 return (1); | |
239 else return (0); | |
240 } | |
241 | |
242 static void | |
243 dnode_setdblksz(dnode_t *dn, int size) | |
244 { | |
245 ASSERT3U(P2PHASE(size, SPA_MINBLOCKSIZE), ==, 0); | |
246 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); | |
247 ASSERT3U(size, >=, SPA_MINBLOCKSIZE); | |
248 ASSERT3U(size >> SPA_MINBLOCKSHIFT, <, | |
249 1<<(sizeof (dn->dn_phys->dn_datablkszsec) * 8)); | |
250 dn->dn_datablksz = size; | |
251 dn->dn_datablkszsec = size >> SPA_MINBLOCKSHIFT; | |
252 dn->dn_datablkshift = ISP2(size) ? highbit(size - 1) : 0; | |
253 } | |
254 | |
255 static dnode_t * | |
256 dnode_create(objset_impl_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db, | |
257 uint64_t object) | |
258 { | |
259 dnode_t *dn = kmem_cache_alloc(dnode_cache, KM_SLEEP); | |
260 (void) dnode_cons(dn, NULL, 0); /* XXX */ | |
261 | |
262 dn->dn_objset = os; | |
263 dn->dn_object = object; | |
264 dn->dn_dbuf = db; | |
265 dn->dn_phys = dnp; | |
266 | |
267 if (dnp->dn_datablkszsec) | |
268 dnode_setdblksz(dn, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT); | |
269 dn->dn_indblkshift = dnp->dn_indblkshift; | |
270 dn->dn_nlevels = dnp->dn_nlevels; | |
271 dn->dn_type = dnp->dn_type; | |
272 dn->dn_nblkptr = dnp->dn_nblkptr; | |
273 dn->dn_checksum = dnp->dn_checksum; | |
274 dn->dn_compress = dnp->dn_compress; | |
275 dn->dn_bonustype = dnp->dn_bonustype; | |
276 dn->dn_bonuslen = dnp->dn_bonuslen; | |
277 dn->dn_maxblkid = dnp->dn_maxblkid; | |
278 | |
279 dmu_zfetch_init(&dn->dn_zfetch, dn); | |
280 | |
281 ASSERT(dn->dn_phys->dn_type < DMU_OT_NUMTYPES); | |
282 mutex_enter(&os->os_lock); | |
283 list_insert_head(&os->os_dnodes, dn); | |
284 mutex_exit(&os->os_lock); | |
285 | |
286 return (dn); | |
287 } | |
288 | |
289 static void | |
290 dnode_destroy(dnode_t *dn) | |
291 { | |
292 objset_impl_t *os = dn->dn_objset; | |
293 | |
294 mutex_enter(&os->os_lock); | |
295 list_remove(&os->os_dnodes, dn); | |
296 mutex_exit(&os->os_lock); | |
297 | |
298 if (dn->dn_dirtyctx_firstset) { | |
299 kmem_free(dn->dn_dirtyctx_firstset, 1); | |
300 dn->dn_dirtyctx_firstset = NULL; | |
301 } | |
302 dmu_zfetch_rele(&dn->dn_zfetch); | |
1544 | 303 if (dn->dn_bonus) { |
304 mutex_enter(&dn->dn_bonus->db_mtx); | |
305 dbuf_evict(dn->dn_bonus); | |
306 dn->dn_bonus = NULL; | |
307 } | |
789 | 308 kmem_cache_free(dnode_cache, dn); |
309 } | |
310 | |
311 void | |
312 dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs, | |
1599 | 313 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx) |
789 | 314 { |
315 int i; | |
316 | |
317 if (blocksize == 0) | |
318 blocksize = 1 << zfs_default_bs; | |
1402 | 319 else if (blocksize > SPA_MAXBLOCKSIZE) |
320 blocksize = SPA_MAXBLOCKSIZE; | |
321 else | |
322 blocksize = P2ROUNDUP(blocksize, SPA_MINBLOCKSIZE); | |
789 | 323 |
324 if (ibs == 0) | |
325 ibs = zfs_default_ibs; | |
326 | |
327 ibs = MIN(MAX(ibs, DN_MIN_INDBLKSHIFT), DN_MAX_INDBLKSHIFT); | |
328 | |
329 dprintf("os=%p obj=%llu txg=%llu blocksize=%d ibs=%d\n", dn->dn_objset, | |
330 dn->dn_object, tx->tx_txg, blocksize, ibs); | |
331 | |
332 ASSERT(dn->dn_type == DMU_OT_NONE); | |
333 ASSERT(bcmp(dn->dn_phys, &dnode_phys_zero, sizeof (dnode_phys_t)) == 0); | |
334 ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE); | |
335 ASSERT(ot != DMU_OT_NONE); | |
336 ASSERT3U(ot, <, DMU_OT_NUMTYPES); | |
337 ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) || | |
338 (bonustype != DMU_OT_NONE && bonuslen != 0)); | |
339 ASSERT3U(bonustype, <, DMU_OT_NUMTYPES); | |
340 ASSERT3U(bonuslen, <=, DN_MAX_BONUSLEN); | |
341 ASSERT(dn->dn_type == DMU_OT_NONE); | |
342 ASSERT3U(dn->dn_maxblkid, ==, 0); | |
343 ASSERT3U(dn->dn_allocated_txg, ==, 0); | |
344 ASSERT3U(dn->dn_assigned_txg, ==, 0); | |
345 ASSERT(refcount_is_zero(&dn->dn_tx_holds)); | |
346 ASSERT3U(refcount_count(&dn->dn_holds), <=, 1); | |
347 ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL); | |
348 | |
349 for (i = 0; i < TXG_SIZE; i++) { | |
350 ASSERT3U(dn->dn_next_nlevels[i], ==, 0); | |
351 ASSERT3U(dn->dn_next_indblkshift[i], ==, 0); | |
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
352 ASSERT3U(dn->dn_next_blksz[i], ==, 0); |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
353 ASSERT(!list_link_active(&dn->dn_dirty_link[i])); |
789 | 354 ASSERT3P(list_head(&dn->dn_dirty_dbufs[i]), ==, NULL); |
355 ASSERT3U(avl_numnodes(&dn->dn_ranges[i]), ==, 0); | |
356 } | |
357 | |
358 dn->dn_type = ot; | |
359 dnode_setdblksz(dn, blocksize); | |
360 dn->dn_indblkshift = ibs; | |
361 dn->dn_nlevels = 1; | |
362 dn->dn_nblkptr = 1 + ((DN_MAX_BONUSLEN - bonuslen) >> SPA_BLKPTRSHIFT); | |
363 dn->dn_bonustype = bonustype; | |
364 dn->dn_bonuslen = bonuslen; | |
365 dn->dn_checksum = ZIO_CHECKSUM_INHERIT; | |
366 dn->dn_compress = ZIO_COMPRESS_INHERIT; | |
367 dn->dn_dirtyctx = 0; | |
368 | |
369 dn->dn_free_txg = 0; | |
370 if (dn->dn_dirtyctx_firstset) { | |
371 kmem_free(dn->dn_dirtyctx_firstset, 1); | |
372 dn->dn_dirtyctx_firstset = NULL; | |
373 } | |
374 | |
375 dn->dn_allocated_txg = tx->tx_txg; | |
1599 | 376 |
789 | 377 dnode_setdirty(dn, tx); |
1599 | 378 dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs; |
379 dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = dn->dn_datablksz; | |
789 | 380 } |
381 | |
382 void | |
383 dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, | |
384 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx) | |
385 { | |
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
386 int i; |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
387 |
789 | 388 ASSERT3U(blocksize, >=, SPA_MINBLOCKSIZE); |
389 ASSERT3U(blocksize, <=, SPA_MAXBLOCKSIZE); | |
390 ASSERT3U(blocksize % SPA_MINBLOCKSIZE, ==, 0); | |
1544 | 391 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx)); |
789 | 392 ASSERT(tx->tx_txg != 0); |
393 ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) || | |
394 (bonustype != DMU_OT_NONE && bonuslen != 0)); | |
395 ASSERT3U(bonustype, <, DMU_OT_NUMTYPES); | |
396 ASSERT3U(bonuslen, <=, DN_MAX_BONUSLEN); | |
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
397 |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
398 for (i = 0; i < TXG_SIZE; i++) |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
399 ASSERT(!list_link_active(&dn->dn_dirty_link[i])); |
789 | 400 |
1544 | 401 /* clean up any unreferenced dbufs */ |
1646
b4e43ae19fff
6393443 Remove remaining txg_wait_synced() from zfs unmount path.
perrin
parents:
1599
diff
changeset
|
402 (void) dnode_evict_dbufs(dn, 0); |
1544 | 403 ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL); |
404 | |
789 | 405 /* |
406 * XXX I should really have a generation number to tell if we | |
407 * need to do this... | |
408 */ | |
409 if (blocksize != dn->dn_datablksz || | |
410 dn->dn_bonustype != bonustype || dn->dn_bonuslen != bonuslen) { | |
411 /* free all old data */ | |
412 dnode_free_range(dn, 0, -1ULL, tx); | |
413 } | |
414 | |
415 /* change blocksize */ | |
416 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); | |
417 dnode_setdblksz(dn, blocksize); | |
418 dnode_setdirty(dn, tx); | |
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
419 dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = blocksize; |
789 | 420 rw_exit(&dn->dn_struct_rwlock); |
421 | |
422 /* change type */ | |
423 dn->dn_type = ot; | |
424 | |
425 if (dn->dn_bonuslen != bonuslen) { | |
1544 | 426 dmu_buf_impl_t *db = NULL; |
427 | |
789 | 428 /* change bonus size */ |
429 if (bonuslen == 0) | |
430 bonuslen = 1; /* XXX */ | |
1544 | 431 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); |
432 if (dn->dn_bonus == NULL) | |
433 dn->dn_bonus = dbuf_create_bonus(dn); | |
434 db = dn->dn_bonus; | |
435 rw_exit(&dn->dn_struct_rwlock); | |
436 if (refcount_add(&db->db_holds, FTAG) == 1) | |
437 dnode_add_ref(dn, db); | |
1990
2960cf15fee6
6416794 zfs panics in dnode_reallocate during incremental zfs restore
maybee
parents:
1793
diff
changeset
|
438 VERIFY(0 == dbuf_read(db, NULL, DB_RF_MUST_SUCCEED)); |
789 | 439 mutex_enter(&db->db_mtx); |
440 ASSERT3U(db->db.db_size, ==, dn->dn_bonuslen); | |
441 ASSERT(db->db.db_data != NULL); | |
442 db->db.db_size = bonuslen; | |
443 mutex_exit(&db->db_mtx); | |
444 dbuf_dirty(db, tx); | |
1544 | 445 dbuf_rele(db, FTAG); |
789 | 446 } |
447 | |
448 /* change bonus size and type */ | |
449 mutex_enter(&dn->dn_mtx); | |
450 dn->dn_bonustype = bonustype; | |
451 dn->dn_bonuslen = bonuslen; | |
452 dn->dn_nblkptr = 1 + ((DN_MAX_BONUSLEN - bonuslen) >> SPA_BLKPTRSHIFT); | |
453 dn->dn_checksum = ZIO_CHECKSUM_INHERIT; | |
454 dn->dn_compress = ZIO_COMPRESS_INHERIT; | |
455 ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR); | |
456 | |
457 dn->dn_allocated_txg = tx->tx_txg; | |
458 mutex_exit(&dn->dn_mtx); | |
459 } | |
460 | |
461 void | |
462 dnode_special_close(dnode_t *dn) | |
463 { | |
1544 | 464 /* |
465 * Wait for final references to the dnode to clear. This can | |
466 * only happen if the arc is asyncronously evicting state that | |
467 * has a hold on this dnode while we are trying to evict this | |
468 * dnode. | |
469 */ | |
470 while (refcount_count(&dn->dn_holds) > 0) | |
471 delay(1); | |
789 | 472 dnode_destroy(dn); |
473 } | |
474 | |
475 dnode_t * | |
476 dnode_special_open(objset_impl_t *os, dnode_phys_t *dnp, uint64_t object) | |
477 { | |
478 dnode_t *dn = dnode_create(os, dnp, NULL, object); | |
873
adefbfa5f42d
6347448 non ZFS_DEBUG kernels shouldn't call empty verify functions
ek110237
parents:
789
diff
changeset
|
479 DNODE_VERIFY(dn); |
789 | 480 return (dn); |
481 } | |
482 | |
483 static void | |
484 dnode_buf_pageout(dmu_buf_t *db, void *arg) | |
485 { | |
486 dnode_t **children_dnodes = arg; | |
487 int i; | |
488 int epb = db->db_size >> DNODE_SHIFT; | |
489 | |
490 for (i = 0; i < epb; i++) { | |
491 dnode_t *dn = children_dnodes[i]; | |
492 int n; | |
493 | |
494 if (dn == NULL) | |
495 continue; | |
496 #ifdef ZFS_DEBUG | |
497 /* | |
498 * If there are holds on this dnode, then there should | |
499 * be holds on the dnode's containing dbuf as well; thus | |
500 * it wouldn't be eligable for eviction and this function | |
501 * would not have been called. | |
502 */ | |
503 ASSERT(refcount_is_zero(&dn->dn_holds)); | |
504 ASSERT(list_head(&dn->dn_dbufs) == NULL); | |
505 ASSERT(refcount_is_zero(&dn->dn_tx_holds)); | |
506 | |
507 for (n = 0; n < TXG_SIZE; n++) | |
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
508 ASSERT(!list_link_active(&dn->dn_dirty_link[n])); |
789 | 509 #endif |
510 children_dnodes[i] = NULL; | |
511 dnode_destroy(dn); | |
512 } | |
513 kmem_free(children_dnodes, epb * sizeof (dnode_t *)); | |
514 } | |
515 | |
516 /* | |
1544 | 517 * errors: |
518 * EINVAL - invalid object number. | |
519 * EIO - i/o error. | |
520 * succeeds even for free dnodes. | |
789 | 521 */ |
1544 | 522 int |
523 dnode_hold_impl(objset_impl_t *os, uint64_t object, int flag, | |
524 void *tag, dnode_t **dnp) | |
789 | 525 { |
1544 | 526 int epb, idx, err; |
789 | 527 int drop_struct_lock = FALSE; |
1544 | 528 int type; |
789 | 529 uint64_t blk; |
530 dnode_t *mdn, *dn; | |
531 dmu_buf_impl_t *db; | |
532 dnode_t **children_dnodes; | |
533 | |
534 if (object == 0 || object >= DN_MAX_OBJECT) | |
1544 | 535 return (EINVAL); |
789 | 536 |
537 mdn = os->os_meta_dnode; | |
538 | |
873
adefbfa5f42d
6347448 non ZFS_DEBUG kernels shouldn't call empty verify functions
ek110237
parents:
789
diff
changeset
|
539 DNODE_VERIFY(mdn); |
789 | 540 |
541 if (!RW_WRITE_HELD(&mdn->dn_struct_rwlock)) { | |
542 rw_enter(&mdn->dn_struct_rwlock, RW_READER); | |
543 drop_struct_lock = TRUE; | |
544 } | |
545 | |
546 blk = dbuf_whichblock(mdn, object * sizeof (dnode_phys_t)); | |
547 | |
1544 | 548 db = dbuf_hold(mdn, blk, FTAG); |
789 | 549 if (drop_struct_lock) |
550 rw_exit(&mdn->dn_struct_rwlock); | |
1544 | 551 if (db == NULL) |
552 return (EIO); | |
553 err = dbuf_read(db, NULL, DB_RF_CANFAIL); | |
554 if (err) { | |
555 dbuf_rele(db, FTAG); | |
556 return (err); | |
557 } | |
789 | 558 |
559 ASSERT3U(db->db.db_size, >=, 1<<DNODE_SHIFT); | |
560 epb = db->db.db_size >> DNODE_SHIFT; | |
561 | |
562 idx = object & (epb-1); | |
563 | |
564 children_dnodes = dmu_buf_get_user(&db->db); | |
565 if (children_dnodes == NULL) { | |
566 dnode_t **winner; | |
567 children_dnodes = kmem_zalloc(epb * sizeof (dnode_t *), | |
568 KM_SLEEP); | |
569 if (winner = dmu_buf_set_user(&db->db, children_dnodes, NULL, | |
570 dnode_buf_pageout)) { | |
571 kmem_free(children_dnodes, epb * sizeof (dnode_t *)); | |
572 children_dnodes = winner; | |
573 } | |
574 } | |
575 | |
576 if ((dn = children_dnodes[idx]) == NULL) { | |
577 dnode_t *winner; | |
578 dn = dnode_create(os, (dnode_phys_t *)db->db.db_data+idx, | |
579 db, object); | |
580 winner = atomic_cas_ptr(&children_dnodes[idx], NULL, dn); | |
581 if (winner != NULL) { | |
582 dnode_destroy(dn); | |
583 dn = winner; | |
584 } | |
585 } | |
586 | |
587 mutex_enter(&dn->dn_mtx); | |
1544 | 588 type = dn->dn_type; |
789 | 589 if (dn->dn_free_txg || |
1544 | 590 ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE) || |
591 ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)) { | |
789 | 592 mutex_exit(&dn->dn_mtx); |
1544 | 593 dbuf_rele(db, FTAG); |
594 return (type == DMU_OT_NONE ? ENOENT : EEXIST); | |
789 | 595 } |
596 mutex_exit(&dn->dn_mtx); | |
597 | |
1544 | 598 if (refcount_add(&dn->dn_holds, tag) == 1) |
789 | 599 dbuf_add_ref(db, dn); |
600 | |
873
adefbfa5f42d
6347448 non ZFS_DEBUG kernels shouldn't call empty verify functions
ek110237
parents:
789
diff
changeset
|
601 DNODE_VERIFY(dn); |
789 | 602 ASSERT3P(dn->dn_dbuf, ==, db); |
603 ASSERT3U(dn->dn_object, ==, object); | |
1544 | 604 dbuf_rele(db, FTAG); |
789 | 605 |
1544 | 606 *dnp = dn; |
607 return (0); | |
789 | 608 } |
609 | |
610 /* | |
611 * Return held dnode if the object is allocated, NULL if not. | |
612 */ | |
1544 | 613 int |
614 dnode_hold(objset_impl_t *os, uint64_t object, void *tag, dnode_t **dnp) | |
789 | 615 { |
1544 | 616 return (dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, tag, dnp)); |
789 | 617 } |
618 | |
619 void | |
1544 | 620 dnode_add_ref(dnode_t *dn, void *tag) |
789 | 621 { |
622 ASSERT(refcount_count(&dn->dn_holds) > 0); | |
1544 | 623 (void) refcount_add(&dn->dn_holds, tag); |
789 | 624 } |
625 | |
626 void | |
1544 | 627 dnode_rele(dnode_t *dn, void *tag) |
789 | 628 { |
629 uint64_t refs; | |
630 | |
1544 | 631 refs = refcount_remove(&dn->dn_holds, tag); |
789 | 632 /* NOTE: the DNODE_DNODE does not have a dn_dbuf */ |
633 if (refs == 0 && dn->dn_dbuf) | |
1544 | 634 dbuf_rele(dn->dn_dbuf, dn); |
789 | 635 } |
636 | |
637 void | |
638 dnode_setdirty(dnode_t *dn, dmu_tx_t *tx) | |
639 { | |
640 objset_impl_t *os = dn->dn_objset; | |
641 uint64_t txg = tx->tx_txg; | |
642 | |
1544 | 643 if (dn->dn_object == DMU_META_DNODE_OBJECT) |
789 | 644 return; |
645 | |
873
adefbfa5f42d
6347448 non ZFS_DEBUG kernels shouldn't call empty verify functions
ek110237
parents:
789
diff
changeset
|
646 DNODE_VERIFY(dn); |
789 | 647 |
648 #ifdef ZFS_DEBUG | |
649 mutex_enter(&dn->dn_mtx); | |
650 ASSERT(dn->dn_phys->dn_type || dn->dn_allocated_txg); | |
651 /* ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= txg); */ | |
652 mutex_exit(&dn->dn_mtx); | |
653 #endif | |
654 | |
655 mutex_enter(&os->os_lock); | |
656 | |
657 /* | |
658 * If we are already marked dirty, we're done. | |
659 */ | |
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
660 if (list_link_active(&dn->dn_dirty_link[txg & TXG_MASK])) { |
789 | 661 mutex_exit(&os->os_lock); |
662 return; | |
663 } | |
664 | |
665 ASSERT(!refcount_is_zero(&dn->dn_holds) || list_head(&dn->dn_dbufs)); | |
666 ASSERT(dn->dn_datablksz != 0); | |
1599 | 667 ASSERT3U(dn->dn_next_blksz[txg&TXG_MASK], ==, 0); |
789 | 668 |
669 dprintf_ds(os->os_dsl_dataset, "obj=%llu txg=%llu\n", | |
670 dn->dn_object, txg); | |
671 | |
672 if (dn->dn_free_txg > 0 && dn->dn_free_txg <= txg) { | |
673 list_insert_tail(&os->os_free_dnodes[txg&TXG_MASK], dn); | |
674 } else { | |
675 list_insert_tail(&os->os_dirty_dnodes[txg&TXG_MASK], dn); | |
676 } | |
677 | |
678 mutex_exit(&os->os_lock); | |
679 | |
680 /* | |
681 * The dnode maintains a hold on its containing dbuf as | |
682 * long as there are holds on it. Each instantiated child | |
683 * dbuf maintaines a hold on the dnode. When the last child | |
684 * drops its hold, the dnode will drop its hold on the | |
685 * containing dbuf. We add a "dirty hold" here so that the | |
686 * dnode will hang around after we finish processing its | |
687 * children. | |
688 */ | |
1544 | 689 dnode_add_ref(dn, (void *)(uintptr_t)tx->tx_txg); |
789 | 690 |
691 dbuf_dirty(dn->dn_dbuf, tx); | |
692 | |
693 dsl_dataset_dirty(os->os_dsl_dataset, tx); | |
694 } | |
695 | |
696 void | |
697 dnode_free(dnode_t *dn, dmu_tx_t *tx) | |
698 { | |
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
699 int txgoff = tx->tx_txg & TXG_MASK; |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
700 |
789 | 701 dprintf("dn=%p txg=%llu\n", dn, tx->tx_txg); |
702 | |
703 /* we should be the only holder... hopefully */ | |
704 /* ASSERT3U(refcount_count(&dn->dn_holds), ==, 1); */ | |
705 | |
706 mutex_enter(&dn->dn_mtx); | |
707 if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg) { | |
708 mutex_exit(&dn->dn_mtx); | |
709 return; | |
710 } | |
711 dn->dn_free_txg = tx->tx_txg; | |
712 mutex_exit(&dn->dn_mtx); | |
713 | |
714 /* | |
715 * If the dnode is already dirty, it needs to be moved from | |
716 * the dirty list to the free list. | |
717 */ | |
718 mutex_enter(&dn->dn_objset->os_lock); | |
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
719 if (list_link_active(&dn->dn_dirty_link[txgoff])) { |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
720 list_remove(&dn->dn_objset->os_dirty_dnodes[txgoff], dn); |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
721 list_insert_tail(&dn->dn_objset->os_free_dnodes[txgoff], dn); |
789 | 722 mutex_exit(&dn->dn_objset->os_lock); |
723 } else { | |
724 mutex_exit(&dn->dn_objset->os_lock); | |
725 dnode_setdirty(dn, tx); | |
726 } | |
727 } | |
728 | |
729 /* | |
730 * Try to change the block size for the indicated dnode. This can only | |
731 * succeed if there are no blocks allocated or dirty beyond first block | |
732 */ | |
733 int | |
734 dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx) | |
735 { | |
736 dmu_buf_impl_t *db, *db_next; | |
737 int have_db0 = FALSE; | |
738 int err = ENOTSUP; | |
739 | |
740 if (size == 0) | |
741 size = SPA_MINBLOCKSIZE; | |
742 if (size > SPA_MAXBLOCKSIZE) | |
743 size = SPA_MAXBLOCKSIZE; | |
744 else | |
745 size = P2ROUNDUP(size, SPA_MINBLOCKSIZE); | |
746 | |
747 if (ibs == 0) | |
748 ibs = dn->dn_indblkshift; | |
749 | |
750 if (size >> SPA_MINBLOCKSHIFT == dn->dn_datablkszsec && | |
751 ibs == dn->dn_indblkshift) | |
752 return (0); | |
753 | |
754 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); | |
755 | |
756 /* Check for any allocated blocks beyond the first */ | |
757 if (dn->dn_phys->dn_maxblkid != 0) | |
758 goto end; | |
759 | |
760 mutex_enter(&dn->dn_dbufs_mtx); | |
761 for (db = list_head(&dn->dn_dbufs); db; db = db_next) { | |
762 db_next = list_next(&dn->dn_dbufs, db); | |
763 | |
764 if (db->db_blkid == 0) { | |
765 have_db0 = TRUE; | |
766 } else if (db->db_blkid != DB_BONUS_BLKID) { | |
767 mutex_exit(&dn->dn_dbufs_mtx); | |
768 goto end; | |
769 } | |
770 } | |
771 mutex_exit(&dn->dn_dbufs_mtx); | |
772 | |
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
773 db = NULL; |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
774 if (!BP_IS_HOLE(&dn->dn_phys->dn_blkptr[0]) || have_db0) { |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
775 /* obtain the old block */ |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
776 db = dbuf_hold(dn, 0, FTAG); |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
777 dbuf_new_size(db, size, tx); |
789 | 778 } |
779 | |
780 dnode_setdblksz(dn, size); | |
781 dn->dn_indblkshift = ibs; | |
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
782 dnode_setdirty(dn, tx); |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
783 dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = size; |
789 | 784 dn->dn_next_indblkshift[tx->tx_txg&TXG_MASK] = ibs; |
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
785 |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
786 if (db) |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
787 dbuf_rele(db, FTAG); |
789 | 788 |
789 err = 0; | |
790 end: | |
791 rw_exit(&dn->dn_struct_rwlock); | |
792 return (err); | |
793 } | |
794 | |
795 uint64_t | |
796 dnode_max_nonzero_offset(dnode_t *dn) | |
797 { | |
798 if (dn->dn_phys->dn_maxblkid == 0 && | |
799 BP_IS_HOLE(&dn->dn_phys->dn_blkptr[0])) | |
800 return (0); | |
801 else | |
802 return ((dn->dn_phys->dn_maxblkid+1) * dn->dn_datablksz); | |
803 } | |
804 | |
805 void | |
806 dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx) | |
807 { | |
808 uint64_t txgoff = tx->tx_txg & TXG_MASK; | |
809 int drop_struct_lock = FALSE; | |
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
810 int epbs, new_nlevels; |
789 | 811 uint64_t sz; |
812 | |
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
813 ASSERT(blkid != DB_BONUS_BLKID); |
789 | 814 |
815 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { | |
816 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); | |
817 drop_struct_lock = TRUE; | |
818 } | |
819 | |
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
820 if (blkid <= dn->dn_maxblkid) |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
821 goto out; |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
822 |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
823 dn->dn_maxblkid = blkid; |
789 | 824 |
825 /* | |
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
826 * Compute the number of levels necessary to support the new maxblkid. |
789 | 827 */ |
828 new_nlevels = 1; | |
829 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; | |
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
830 for (sz = dn->dn_nblkptr; |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
831 sz <= blkid && sz >= dn->dn_nblkptr; sz <<= epbs) |
789 | 832 new_nlevels++; |
833 | |
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
834 if (new_nlevels > dn->dn_nlevels) { |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
835 int old_nlevels = dn->dn_nlevels; |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
836 dmu_buf_impl_t *db; |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
837 |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
838 dn->dn_nlevels = new_nlevels; |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
839 |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
840 ASSERT3U(new_nlevels, >, dn->dn_next_nlevels[txgoff]); |
789 | 841 dn->dn_next_nlevels[txgoff] = new_nlevels; |
842 | |
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
843 /* Dirty the left indirects. */ |
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
844 db = dbuf_hold_level(dn, old_nlevels, 0, FTAG); |
789 | 845 dbuf_dirty(db, tx); |
1544 | 846 dbuf_rele(db, FTAG); |
1596
2e2377ccbf85
6395371 ASSERT in dmu_tx_count_free: blkid + i < dn->dn_phys->dn_nblkptr
ahrens
parents:
1544
diff
changeset
|
847 |
789 | 848 } |
849 | |
850 out: | |
851 if (drop_struct_lock) | |
852 rw_exit(&dn->dn_struct_rwlock); | |
853 } | |
854 | |
855 void | |
856 dnode_clear_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx) | |
857 { | |
858 avl_tree_t *tree = &dn->dn_ranges[tx->tx_txg&TXG_MASK]; | |
859 avl_index_t where; | |
860 free_range_t *rp; | |
861 free_range_t rp_tofind; | |
862 uint64_t endblk = blkid + nblks; | |
863 | |
864 ASSERT(MUTEX_HELD(&dn->dn_mtx)); | |
865 ASSERT(nblks <= UINT64_MAX - blkid); /* no overflow */ | |
866 | |
867 dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n", | |
868 blkid, nblks, tx->tx_txg); | |
869 rp_tofind.fr_blkid = blkid; | |
870 rp = avl_find(tree, &rp_tofind, &where); | |
871 if (rp == NULL) | |
872 rp = avl_nearest(tree, where, AVL_BEFORE); | |
873 if (rp == NULL) | |
874 rp = avl_nearest(tree, where, AVL_AFTER); | |
875 | |
876 while (rp && (rp->fr_blkid <= blkid + nblks)) { | |
877 uint64_t fr_endblk = rp->fr_blkid + rp->fr_nblks; | |
878 free_range_t *nrp = AVL_NEXT(tree, rp); | |
879 | |
880 if (blkid <= rp->fr_blkid && endblk >= fr_endblk) { | |
881 /* clear this entire range */ | |
882 avl_remove(tree, rp); | |
883 kmem_free(rp, sizeof (free_range_t)); | |
884 } else if (blkid <= rp->fr_blkid && | |
885 endblk > rp->fr_blkid && endblk < fr_endblk) { | |
886 /* clear the beginning of this range */ | |
887 rp->fr_blkid = endblk; | |
888 rp->fr_nblks = fr_endblk - endblk; | |
889 } else if (blkid > rp->fr_blkid && blkid < fr_endblk && | |
890 endblk >= fr_endblk) { | |
891 /* clear the end of this range */ | |
892 rp->fr_nblks = blkid - rp->fr_blkid; | |
893 } else if (blkid > rp->fr_blkid && endblk < fr_endblk) { | |
894 /* clear a chunk out of this range */ | |
895 free_range_t *new_rp = | |
896 kmem_alloc(sizeof (free_range_t), KM_SLEEP); | |
897 | |
898 new_rp->fr_blkid = endblk; | |
899 new_rp->fr_nblks = fr_endblk - endblk; | |
900 avl_insert_here(tree, new_rp, rp, AVL_AFTER); | |
901 rp->fr_nblks = blkid - rp->fr_blkid; | |
902 } | |
903 /* there may be no overlap */ | |
904 rp = nrp; | |
905 } | |
906 } | |
907 | |
908 void | |
909 dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx) | |
910 { | |
911 dmu_buf_impl_t *db; | |
912 uint64_t start, objsize, blkid, nblks; | |
913 int blkshift, blksz, tail, head, epbs; | |
914 int trunc = FALSE; | |
915 | |
916 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); | |
917 blksz = dn->dn_datablksz; | |
918 blkshift = dn->dn_datablkshift; | |
919 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; | |
920 | |
921 /* If the range is past the end of the file, this is a no-op */ | |
922 objsize = blksz * (dn->dn_maxblkid+1); | |
923 if (off >= objsize) | |
924 goto out; | |
925 if (len == -1ULL) { | |
926 len = UINT64_MAX - off; | |
927 trunc = TRUE; | |
928 } | |
929 | |
930 /* | |
931 * First, block align the region to free: | |
932 */ | |
933 if (dn->dn_maxblkid == 0) { | |
934 if (off == 0) { | |
935 head = 0; | |
936 } else { | |
937 head = blksz - off; | |
938 ASSERT3U(head, >, 0); | |
939 } | |
940 start = off; | |
941 } else { | |
942 ASSERT(ISP2(blksz)); | |
943 head = P2NPHASE(off, blksz); | |
944 start = P2PHASE(off, blksz); | |
945 } | |
946 /* zero out any partial block data at the start of the range */ | |
947 if (head) { | |
948 ASSERT3U(start + head, ==, blksz); | |
949 if (len < head) | |
950 head = len; | |
951 if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, off), TRUE, | |
952 FTAG, &db) == 0) { | |
953 caddr_t data; | |
954 | |
955 /* don't dirty if it isn't on disk and isn't dirty */ | |
956 if (db->db_dirtied || | |
957 (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) { | |
958 rw_exit(&dn->dn_struct_rwlock); | |
959 dbuf_will_dirty(db, tx); | |
960 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); | |
961 data = db->db.db_data; | |
962 bzero(data + start, head); | |
963 } | |
1544 | 964 dbuf_rele(db, FTAG); |
789 | 965 } |
966 off += head; | |
967 len -= head; | |
968 } | |
969 /* If the range was less than one block, we are done */ | |
970 if (len == 0) | |
971 goto out; | |
972 | |
973 /* If the remaining range is past the end of the file, we are done */ | |
974 if (off > dn->dn_maxblkid << blkshift) | |
975 goto out; | |
976 | |
977 if (off + len == UINT64_MAX) | |
978 tail = 0; | |
979 else | |
980 tail = P2PHASE(len, blksz); | |
981 | |
982 ASSERT3U(P2PHASE(off, blksz), ==, 0); | |
983 /* zero out any partial block data at the end of the range */ | |
984 if (tail) { | |
985 if (len < tail) | |
986 tail = len; | |
987 if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, off+len), | |
988 TRUE, FTAG, &db) == 0) { | |
989 /* don't dirty if it isn't on disk and isn't dirty */ | |
990 if (db->db_dirtied || | |
991 (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) { | |
992 rw_exit(&dn->dn_struct_rwlock); | |
993 dbuf_will_dirty(db, tx); | |
994 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); | |
995 bzero(db->db.db_data, tail); | |
996 } | |
1544 | 997 dbuf_rele(db, FTAG); |
789 | 998 } |
999 len -= tail; | |
1000 } | |
1001 /* If the range did not include a full block, we are done */ | |
1002 if (len == 0) | |
1003 goto out; | |
1004 | |
1005 /* dirty the left indirects */ | |
1006 if (dn->dn_nlevels > 1 && off != 0) { | |
1007 db = dbuf_hold_level(dn, 1, | |
1008 (off - head) >> (blkshift + epbs), FTAG); | |
1009 dbuf_will_dirty(db, tx); | |
1544 | 1010 dbuf_rele(db, FTAG); |
789 | 1011 } |
1012 | |
1013 /* dirty the right indirects */ | |
1014 if (dn->dn_nlevels > 1 && !trunc) { | |
1015 db = dbuf_hold_level(dn, 1, | |
1016 (off + len + tail - 1) >> (blkshift + epbs), FTAG); | |
1017 dbuf_will_dirty(db, tx); | |
1544 | 1018 dbuf_rele(db, FTAG); |
789 | 1019 } |
1020 | |
1021 /* | |
1022 * Finally, add this range to the dnode range list, we | |
1023 * will finish up this free operation in the syncing phase. | |
1024 */ | |
1025 ASSERT(IS_P2ALIGNED(off, 1<<blkshift)); | |
1026 ASSERT(off + len == UINT64_MAX || IS_P2ALIGNED(len, 1<<blkshift)); | |
1027 blkid = off >> blkshift; | |
1028 nblks = len >> blkshift; | |
1029 | |
1030 if (trunc) | |
1031 dn->dn_maxblkid = (blkid ? blkid - 1 : 0); | |
1032 | |
1033 mutex_enter(&dn->dn_mtx); | |
1034 dnode_clear_range(dn, blkid, nblks, tx); | |
1035 { | |
1036 free_range_t *rp, *found; | |
1037 avl_index_t where; | |
1038 avl_tree_t *tree = &dn->dn_ranges[tx->tx_txg&TXG_MASK]; | |
1039 | |
1040 /* Add new range to dn_ranges */ | |
1041 rp = kmem_alloc(sizeof (free_range_t), KM_SLEEP); | |
1042 rp->fr_blkid = blkid; | |
1043 rp->fr_nblks = nblks; | |
1044 found = avl_find(tree, rp, &where); | |
1045 ASSERT(found == NULL); | |
1046 avl_insert(tree, rp, where); | |
1047 dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n", | |
1048 blkid, nblks, tx->tx_txg); | |
1049 } | |
1050 mutex_exit(&dn->dn_mtx); | |
1051 | |
1052 dbuf_free_range(dn, blkid, nblks, tx); | |
1053 dnode_setdirty(dn, tx); | |
1054 out: | |
1055 rw_exit(&dn->dn_struct_rwlock); | |
1056 } | |
1057 | |
1058 /* return TRUE if this blkid was freed in a recent txg, or FALSE if it wasn't */ | |
1059 uint64_t | |
1060 dnode_block_freed(dnode_t *dn, uint64_t blkid) | |
1061 { | |
1062 free_range_t range_tofind; | |
1063 void *dp = spa_get_dsl(dn->dn_objset->os_spa); | |
1064 int i; | |
1065 | |
1066 if (blkid == DB_BONUS_BLKID) | |
1067 return (FALSE); | |
1068 | |
1069 /* | |
1070 * If we're in the process of opening the pool, dp will not be | |
1071 * set yet, but there shouldn't be anything dirty. | |
1072 */ | |
1073 if (dp == NULL) | |
1074 return (FALSE); | |
1075 | |
1076 if (dn->dn_free_txg) | |
1077 return (TRUE); | |
1078 | |
1079 /* | |
1080 * If dn_datablkshift is not set, then there's only a single | |
1081 * block, in which case there will never be a free range so it | |
1082 * won't matter. | |
1083 */ | |
1084 range_tofind.fr_blkid = blkid; | |
1085 mutex_enter(&dn->dn_mtx); | |
1086 for (i = 0; i < TXG_SIZE; i++) { | |
1087 free_range_t *range_found; | |
1088 avl_index_t idx; | |
1089 | |
1090 range_found = avl_find(&dn->dn_ranges[i], &range_tofind, &idx); | |
1091 if (range_found) { | |
1092 ASSERT(range_found->fr_nblks > 0); | |
1093 break; | |
1094 } | |
1095 range_found = avl_nearest(&dn->dn_ranges[i], idx, AVL_BEFORE); | |
1096 if (range_found && | |
1097 range_found->fr_blkid + range_found->fr_nblks > blkid) | |
1098 break; | |
1099 } | |
1100 mutex_exit(&dn->dn_mtx); | |
1101 return (i < TXG_SIZE); | |
1102 } | |
1103 | |
1104 /* call from syncing context when we actually write/free space for this dnode */ | |
1105 void | |
2082 | 1106 dnode_diduse_space(dnode_t *dn, int64_t delta) |
789 | 1107 { |
2082 | 1108 uint64_t space; |
1109 dprintf_dnode(dn, "dn=%p dnp=%p used=%llu delta=%lld\n", | |
789 | 1110 dn, dn->dn_phys, |
2082 | 1111 (u_longlong_t)dn->dn_phys->dn_used, |
1112 (longlong_t)delta); | |
789 | 1113 |
1114 mutex_enter(&dn->dn_mtx); | |
2082 | 1115 space = DN_USED_BYTES(dn->dn_phys); |
1116 if (delta > 0) { | |
1117 ASSERT3U(space + delta, >=, space); /* no overflow */ | |
789 | 1118 } else { |
2082 | 1119 ASSERT3U(space, >=, -delta); /* no underflow */ |
1120 } | |
1121 space += delta; | |
1122 if (spa_version(dn->dn_objset->os_spa) < ZFS_VERSION_DNODE_BYTES) { | |
1123 ASSERT((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) == 0); | |
1124 ASSERT3U(P2PHASE(space, 1<<DEV_BSHIFT), ==, 0); | |
1125 dn->dn_phys->dn_used = space >> DEV_BSHIFT; | |
1126 } else { | |
1127 dn->dn_phys->dn_used = space; | |
1128 dn->dn_phys->dn_flags |= DNODE_FLAG_USED_BYTES; | |
789 | 1129 } |
1130 mutex_exit(&dn->dn_mtx); | |
1131 } | |
1132 | |
1133 /* | |
1134 * Call when we think we're going to write/free space in open context. | |
1135 * Be conservative (ie. OK to write less than this or free more than | |
1136 * this, but don't write more or free less). | |
1137 */ | |
1138 void | |
1139 dnode_willuse_space(dnode_t *dn, int64_t space, dmu_tx_t *tx) | |
1140 { | |
1141 objset_impl_t *os = dn->dn_objset; | |
1142 dsl_dataset_t *ds = os->os_dsl_dataset; | |
1143 | |
1144 if (space > 0) | |
1145 space = spa_get_asize(os->os_spa, space); | |
1146 | |
1147 if (ds) | |
1148 dsl_dir_willuse_space(ds->ds_dir, space, tx); | |
1149 | |
1150 dmu_tx_willuse_space(tx, space); | |
1151 } | |
1152 | |
1153 static int | |
1154 dnode_next_offset_level(dnode_t *dn, boolean_t hole, uint64_t *offset, | |
1155 int lvl, uint64_t blkfill) | |
1156 { | |
1157 dmu_buf_impl_t *db = NULL; | |
1158 void *data = NULL; | |
1159 uint64_t epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; | |
1160 uint64_t epb = 1ULL << epbs; | |
1161 uint64_t minfill, maxfill; | |
1162 int i, error, span; | |
1163 | |
1164 dprintf("probing object %llu offset %llx level %d of %u\n", | |
1165 dn->dn_object, *offset, lvl, dn->dn_phys->dn_nlevels); | |
1166 | |
1167 if (lvl == dn->dn_phys->dn_nlevels) { | |
1168 error = 0; | |
1169 epb = dn->dn_phys->dn_nblkptr; | |
1170 data = dn->dn_phys->dn_blkptr; | |
1171 } else { | |
1172 uint64_t blkid = dbuf_whichblock(dn, *offset) >> (epbs * lvl); | |
1173 error = dbuf_hold_impl(dn, lvl, blkid, TRUE, FTAG, &db); | |
1174 if (error) { | |
1175 if (error == ENOENT) | |
1176 return (hole ? 0 : ESRCH); | |
1177 return (error); | |
1178 } | |
1793
d371fba21a3e
6407444 unhandled i/o error from dnode_next_offset_level()
ahrens
parents:
1646
diff
changeset
|
1179 error = dbuf_read(db, NULL, DB_RF_CANFAIL | DB_RF_HAVESTRUCT); |
d371fba21a3e
6407444 unhandled i/o error from dnode_next_offset_level()
ahrens
parents:
1646
diff
changeset
|
1180 if (error) { |
d371fba21a3e
6407444 unhandled i/o error from dnode_next_offset_level()
ahrens
parents:
1646
diff
changeset
|
1181 dbuf_rele(db, FTAG); |
d371fba21a3e
6407444 unhandled i/o error from dnode_next_offset_level()
ahrens
parents:
1646
diff
changeset
|
1182 return (error); |
d371fba21a3e
6407444 unhandled i/o error from dnode_next_offset_level()
ahrens
parents:
1646
diff
changeset
|
1183 } |
789 | 1184 data = db->db.db_data; |
1185 } | |
1186 | |
1187 if (lvl == 0) { | |
1188 dnode_phys_t *dnp = data; | |
1189 span = DNODE_SHIFT; | |
1190 ASSERT(dn->dn_type == DMU_OT_DNODE); | |
1191 | |
1192 for (i = (*offset >> span) & (blkfill - 1); i < blkfill; i++) { | |
1193 if (!dnp[i].dn_type == hole) | |
1194 break; | |
1195 *offset += 1ULL << span; | |
1196 } | |
1197 if (i == blkfill) | |
1198 error = ESRCH; | |
1199 } else { | |
1200 blkptr_t *bp = data; | |
1201 span = (lvl - 1) * epbs + dn->dn_datablkshift; | |
1202 minfill = 0; | |
1203 maxfill = blkfill << ((lvl - 1) * epbs); | |
1204 | |
1205 if (hole) | |
1206 maxfill--; | |
1207 else | |
1208 minfill++; | |
1209 | |
1210 for (i = (*offset >> span) & ((1ULL << epbs) - 1); | |
1211 i < epb; i++) { | |
1212 if (bp[i].blk_fill >= minfill && | |
1213 bp[i].blk_fill <= maxfill) | |
1214 break; | |
1215 *offset += 1ULL << span; | |
1216 } | |
1217 if (i >= epb) | |
1218 error = ESRCH; | |
1219 } | |
1220 | |
1221 if (db) | |
1544 | 1222 dbuf_rele(db, FTAG); |
789 | 1223 |
1224 return (error); | |
1225 } | |
1226 | |
1227 /* | |
1228 * Find the next hole, data, or sparse region at or after *offset. | |
1229 * The value 'blkfill' tells us how many items we expect to find | |
1230 * in an L0 data block; this value is 1 for normal objects, | |
1231 * DNODES_PER_BLOCK for the meta dnode, and some fraction of | |
1232 * DNODES_PER_BLOCK when searching for sparse regions thereof. | |
1233 * Examples: | |
1234 * | |
1235 * dnode_next_offset(dn, hole, offset, 1, 1); | |
1236 * Finds the next hole/data in a file. | |
1237 * Used in dmu_offset_next(). | |
1238 * | |
1239 * dnode_next_offset(mdn, hole, offset, 0, DNODES_PER_BLOCK); | |
1240 * Finds the next free/allocated dnode an objset's meta-dnode. | |
1241 * Used in dmu_object_next(). | |
1242 * | |
1243 * dnode_next_offset(mdn, TRUE, offset, 2, DNODES_PER_BLOCK >> 2); | |
1244 * Finds the next L2 meta-dnode bp that's at most 1/4 full. | |
1245 * Used in dmu_object_alloc(). | |
1246 */ | |
1247 int | |
1248 dnode_next_offset(dnode_t *dn, boolean_t hole, uint64_t *offset, | |
1249 int minlvl, uint64_t blkfill) | |
1250 { | |
1251 int lvl, maxlvl; | |
1252 int error = 0; | |
1253 uint64_t initial_offset = *offset; | |
1254 | |
1255 rw_enter(&dn->dn_struct_rwlock, RW_READER); | |
1256 | |
1257 if (dn->dn_phys->dn_nlevels == 0) { | |
1258 rw_exit(&dn->dn_struct_rwlock); | |
1259 return (ESRCH); | |
1260 } | |
1261 | |
1262 if (dn->dn_datablkshift == 0) { | |
1263 if (*offset < dn->dn_datablksz) { | |
1264 if (hole) | |
1265 *offset = dn->dn_datablksz; | |
1266 } else { | |
1267 error = ESRCH; | |
1268 } | |
1269 rw_exit(&dn->dn_struct_rwlock); | |
1270 return (error); | |
1271 } | |
1272 | |
1273 maxlvl = dn->dn_phys->dn_nlevels; | |
1274 | |
1275 for (lvl = minlvl; lvl <= maxlvl; lvl++) { | |
1276 error = dnode_next_offset_level(dn, hole, offset, lvl, blkfill); | |
1793
d371fba21a3e
6407444 unhandled i/o error from dnode_next_offset_level()
ahrens
parents:
1646
diff
changeset
|
1277 if (error != ESRCH) |
789 | 1278 break; |
1279 } | |
1280 | |
1281 while (--lvl >= minlvl && error == 0) | |
1282 error = dnode_next_offset_level(dn, hole, offset, lvl, blkfill); | |
1283 | |
1284 rw_exit(&dn->dn_struct_rwlock); | |
1285 | |
1793
d371fba21a3e
6407444 unhandled i/o error from dnode_next_offset_level()
ahrens
parents:
1646
diff
changeset
|
1286 if (error == 0 && initial_offset > *offset) |
d371fba21a3e
6407444 unhandled i/o error from dnode_next_offset_level()
ahrens
parents:
1646
diff
changeset
|
1287 error = ESRCH; |
789 | 1288 |
1289 return (error); | |
1290 } |