Print this page
5253 kmem_alloc/kmem_zalloc won't fail with KM_SLEEP
5254 getrbuf won't fail with KM_SLEEP
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/ufs/ufs_acl.c
+++ new/usr/src/uts/common/fs/ufs/ufs_acl.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 #include <sys/types.h>
27 27 #include <sys/stat.h>
28 28 #include <sys/errno.h>
29 29 #include <sys/kmem.h>
30 30 #include <sys/t_lock.h>
31 31 #include <sys/ksynch.h>
32 32 #include <sys/buf.h>
33 33 #include <sys/vfs.h>
34 34 #include <sys/vnode.h>
35 35 #include <sys/mode.h>
36 36 #include <sys/systm.h>
37 37 #include <vm/seg.h>
38 38 #include <sys/file.h>
39 39 #include <sys/acl.h>
40 40 #include <sys/fs/ufs_inode.h>
41 41 #include <sys/fs/ufs_acl.h>
42 42 #include <sys/fs/ufs_quota.h>
43 43 #include <sys/sysmacros.h>
44 44 #include <sys/debug.h>
45 45 #include <sys/policy.h>
46 46
47 47 /* Cache routines */
48 48 static int si_signature(si_t *);
49 49 static int si_cachei_get(struct inode *, si_t **);
50 50 static int si_cachea_get(struct inode *, si_t *, si_t **);
51 51 static int si_cmp(si_t *, si_t *);
52 52 static void si_cache_put(si_t *);
53 53 void si_cache_del(si_t *, int);
54 54 void si_cache_init(void);
55 55
56 56 static void ufs_si_free_mem(si_t *);
57 57 static int ufs_si_store(struct inode *, si_t *, int, cred_t *);
58 58 static si_t *ufs_acl_cp(si_t *);
59 59 static int ufs_sectobuf(si_t *, caddr_t *, size_t *);
60 60 static int acl_count(ufs_ic_acl_t *);
61 61 static int acl_validate(aclent_t *, int, int);
62 62 static int vsecattr2aclentry(vsecattr_t *, si_t **);
63 63 static int aclentry2vsecattr(si_t *, vsecattr_t *);
64 64
65 65 krwlock_t si_cache_lock; /* Protects si_cache */
66 66 int si_cachecnt = 64; /* # buckets in si_cache[a|i] */
67 67 si_t **si_cachea; /* The 'by acl' cache chains */
68 68 si_t **si_cachei; /* The 'by inode' cache chains */
69 69 long si_cachehit = 0;
70 70 long si_cachemiss = 0;
71 71
72 72 #define SI_HASH(S) ((int)(S) & (si_cachecnt - 1))
73 73
74 74 /*
75 75 * Store the new acls in aclp. Attempts to make things atomic.
76 76 * Search the acl cache for an identical sp and, if found, attach
77 77 * the cache'd acl to ip. If the acl is new (not in the cache),
78 78 * add it to the cache, then attach it to ip. Last, remove and
79 79 * decrement the reference count of any prior acl list attached
80 80 * to the ip.
81 81 *
82 82 * Parameters:
83 83 * ip - Ptr to inode to receive the acl list
84 84 * sp - Ptr to in-core acl structure to attach to the inode.
85 85 * puship - 0 do not push the object inode(ip) 1 push the ip
86 86 * cr - Ptr to credentials
87 87 *
88 88 * Returns: 0 - Success
89 89 * N - From errno.h
90 90 */
91 91 static int
92 92 ufs_si_store(struct inode *ip, si_t *sp, int puship, cred_t *cr)
93 93 {
94 94 struct vfs *vfsp;
95 95 struct inode *sip;
96 96 si_t *oldsp;
97 97 si_t *csp;
98 98 caddr_t acldata;
99 99 ino_t oldshadow;
100 100 size_t acldatalen;
101 101 off_t offset;
102 102 int shadow;
103 103 int err;
104 104 int refcnt;
105 105 int usecnt;
106 106 int signature;
107 107 int resid;
108 108 struct ufsvfs *ufsvfsp = ip->i_ufsvfs;
109 109 struct fs *fs = ufsvfsp->vfs_fs;
110 110
111 111 ASSERT(RW_WRITE_HELD(&ip->i_contents));
112 112 ASSERT(ip->i_ufs_acl != sp);
113 113
114 114 if (!CHECK_ACL_ALLOWED(ip->i_mode & IFMT))
115 115 return (ENOSYS);
116 116
117 117 /*
118 118 * if there are only the three owner/group/other then do not
119 119 * create a shadow inode. If there is already a shadow with
120 120 * the file, remove it.
121 121 *
122 122 */
123 123 if (!sp->ausers &&
124 124 !sp->agroups &&
125 125 !sp->downer &&
126 126 !sp->dgroup &&
127 127 !sp->dother &&
128 128 sp->dclass.acl_ismask == 0 &&
129 129 !sp->dusers &&
130 130 !sp->dgroups) {
131 131 if (ip->i_ufs_acl)
132 132 err = ufs_si_free(ip->i_ufs_acl, ITOV(ip)->v_vfsp, cr);
133 133 ip->i_ufs_acl = NULL;
134 134 ip->i_shadow = 0;
135 135 ip->i_flag |= IMOD | IACC;
136 136 ip->i_mode = (ip->i_smode & ~0777) |
137 137 ((sp->aowner->acl_ic_perm & 07) << 6) |
138 138 (MASK2MODE(sp)) |
139 139 (sp->aother->acl_ic_perm & 07);
140 140 TRANS_INODE(ip->i_ufsvfs, ip);
141 141 ufs_iupdat(ip, 1);
142 142 ufs_si_free_mem(sp);
143 143 return (0);
144 144 }
145 145
146 146 loop:
147 147
148 148 /*
149 149 * Check cache. If in cache, use existing shadow inode.
150 150 * Increment the shadow link count, then attach to the
151 151 * cached ufs_acl_entry struct, and increment it's reference
152 152 * count. Then discard the passed-in ufs_acl_entry and
153 153 * return.
154 154 */
155 155 if (si_cachea_get(ip, sp, &csp) == 0) {
156 156 ASSERT(RW_WRITE_HELD(&csp->s_lock));
157 157 if (ip->i_ufs_acl == csp) {
158 158 rw_exit(&csp->s_lock);
159 159 (void) ufs_si_free_mem(sp);
160 160 return (0);
161 161 }
162 162 vfsp = ITOV(ip)->v_vfsp;
163 163 ASSERT(csp->s_shadow <= INT_MAX);
164 164 shadow = (int)csp->s_shadow;
165 165 /*
166 166 * We can't call ufs_iget while holding the csp locked,
167 167 * because we might deadlock. So we drop the
168 168 * lock on csp, then go search the si_cache again
169 169 * to see if the csp is still there.
170 170 */
171 171 rw_exit(&csp->s_lock);
172 172 if ((err = ufs_iget(vfsp, shadow, &sip, cr)) != 0) {
173 173 (void) ufs_si_free_mem(sp);
174 174 return (EIO);
175 175 }
176 176 rw_enter(&sip->i_contents, RW_WRITER);
177 177 if ((sip->i_mode & IFMT) != IFSHAD || sip->i_nlink <= 0) {
178 178 rw_exit(&sip->i_contents);
179 179 VN_RELE(ITOV(sip));
180 180 goto loop;
181 181 }
182 182 /* Get the csp again */
183 183 if (si_cachea_get(ip, sp, &csp) != 0) {
184 184 rw_exit(&sip->i_contents);
185 185 VN_RELE(ITOV(sip));
186 186 goto loop;
187 187 }
188 188 ASSERT(RW_WRITE_HELD(&csp->s_lock));
189 189 /* See if we got the right shadow */
190 190 if (csp->s_shadow != shadow) {
191 191 rw_exit(&csp->s_lock);
192 192 rw_exit(&sip->i_contents);
193 193 VN_RELE(ITOV(sip));
194 194 goto loop;
195 195 }
196 196 ASSERT(RW_WRITE_HELD(&sip->i_contents));
197 197 ASSERT(sip->i_dquot == 0);
198 198 /* Increment link count */
199 199 ASSERT(sip->i_nlink > 0);
200 200 sip->i_nlink++;
201 201 TRANS_INODE(ufsvfsp, sip);
202 202 csp->s_use = sip->i_nlink;
203 203 csp->s_ref++;
204 204 ASSERT(sp->s_ref >= 0 && sp->s_ref <= sp->s_use);
205 205 sip->i_flag |= ICHG | IMOD;
206 206 sip->i_seq++;
207 207 ITIMES_NOLOCK(sip);
208 208 /*
209 209 * Always release s_lock before both releasing i_contents
210 210 * and calling VN_RELE.
211 211 */
212 212 rw_exit(&csp->s_lock);
213 213 rw_exit(&sip->i_contents);
214 214 VN_RELE(ITOV(sip));
215 215 (void) ufs_si_free_mem(sp);
216 216 sp = csp;
217 217 si_cachehit++;
218 218 goto switchshadows;
219 219 }
220 220
221 221 /* Alloc a shadow inode and fill it in */
222 222 err = ufs_ialloc(ip, ip->i_number, (mode_t)IFSHAD, &sip, cr);
223 223 if (err) {
224 224 (void) ufs_si_free_mem(sp);
225 225 return (err);
226 226 }
227 227 rw_enter(&sip->i_contents, RW_WRITER);
228 228 sip->i_flag |= IACC | IUPD | ICHG;
229 229 sip->i_seq++;
230 230 sip->i_mode = (o_mode_t)IFSHAD;
231 231 ITOV(sip)->v_type = VREG;
232 232 ufs_reset_vnode(ITOV(sip));
233 233 sip->i_nlink = 1;
234 234 sip->i_uid = crgetuid(cr);
235 235 sip->i_suid = (ulong_t)sip->i_uid > (ulong_t)USHRT_MAX ?
236 236 UID_LONG : sip->i_uid;
237 237 sip->i_gid = crgetgid(cr);
238 238 sip->i_sgid = (ulong_t)sip->i_gid > (ulong_t)USHRT_MAX ?
239 239 GID_LONG : sip->i_gid;
240 240 sip->i_shadow = 0;
241 241 TRANS_INODE(ufsvfsp, sip);
242 242 sip->i_ufs_acl = NULL;
243 243 ASSERT(sip->i_size == 0);
244 244
245 245 sp->s_shadow = sip->i_number;
246 246
247 247 if ((err = ufs_sectobuf(sp, &acldata, &acldatalen)) != 0)
248 248 goto errout;
249 249 offset = 0;
250 250
251 251 /*
252 252 * We don't actually care about the residual count upon failure,
253 253 * but giving ufs_rdwri() the pointer means it won't translate
254 254 * all failures to EIO. Our caller needs to know when ENOSPC
255 255 * gets hit.
256 256 */
257 257 resid = 0;
258 258 if (((err = ufs_rdwri(UIO_WRITE, FWRITE|FSYNC, sip, acldata,
259 259 acldatalen, (offset_t)0, UIO_SYSSPACE, &resid, cr)) != 0) ||
260 260 (resid != 0)) {
261 261 kmem_free(acldata, acldatalen);
262 262 if ((resid != 0) && (err == 0))
263 263 err = ENOSPC;
264 264 goto errout;
265 265 }
266 266
267 267 offset += acldatalen;
268 268 if ((acldatalen + fs->fs_bsize) > ufsvfsp->vfs_maxacl)
269 269 ufsvfsp->vfs_maxacl = acldatalen + fs->fs_bsize;
270 270
271 271 kmem_free(acldata, acldatalen);
272 272 /* Sync & free the shadow inode */
273 273 ufs_iupdat(sip, 1);
274 274 rw_exit(&sip->i_contents);
275 275 VN_RELE(ITOV(sip));
276 276
277 277 /* We're committed to using this sp */
278 278 sp->s_use = 1;
279 279 sp->s_ref = 1;
280 280
281 281 /* Now put the new acl stuff in the cache */
282 282 /* XXX Might make a duplicate */
283 283 si_cache_put(sp);
284 284 si_cachemiss++;
285 285
286 286 switchshadows:
287 287 /* Now switch the parent inode to use the new shadow inode */
288 288 ASSERT(RW_WRITE_HELD(&ip->i_contents));
289 289 rw_enter(&sp->s_lock, RW_READER);
290 290 oldsp = ip->i_ufs_acl;
291 291 oldshadow = ip->i_shadow;
292 292 ip->i_ufs_acl = sp;
293 293 ASSERT(sp->s_shadow <= INT_MAX);
294 294 ip->i_shadow = (int32_t)sp->s_shadow;
295 295 ASSERT(oldsp != sp);
296 296 ASSERT(oldshadow != ip->i_number);
297 297 ASSERT(ip->i_number != ip->i_shadow);
298 298 /*
299 299 * Change the mode bits to follow the acl list
300 300 *
301 301 * NOTE: a directory is not required to have a "regular" acl
302 302 * bug id's 1238908, 1257173, 1263171 and 1263188
303 303 *
304 304 * but if a "regular" acl is present, it must contain
305 305 * an "owner", "group", and "other" acl
306 306 *
307 307 * If an ACL mask exists, the effective group rights are
308 308 * set to the mask. Otherwise, the effective group rights
309 309 * are set to the object group bits.
310 310 */
311 311 if (sp->aowner) { /* Owner */
312 312 ip->i_mode &= ~0700; /* clear Owner */
313 313 ip->i_mode |= (sp->aowner->acl_ic_perm & 07) << 6;
314 314 ip->i_uid = sp->aowner->acl_ic_who;
315 315 }
316 316
317 317 if (sp->agroup) { /* Group */
318 318 ip->i_mode &= ~0070; /* clear Group */
319 319 ip->i_mode |= MASK2MODE(sp); /* apply mask */
320 320 ip->i_gid = sp->agroup->acl_ic_who;
321 321 }
322 322
323 323 if (sp->aother) { /* Other */
324 324 ip->i_mode &= ~0007; /* clear Other */
325 325 ip->i_mode |= (sp->aother->acl_ic_perm & 07);
326 326 }
327 327
328 328 if (sp->aclass.acl_ismask)
329 329 ip->i_mode = (ip->i_mode & ~070) |
330 330 (((sp->aclass.acl_maskbits & 07) << 3) &
331 331 ip->i_mode);
332 332
333 333 TRANS_INODE(ufsvfsp, ip);
334 334 rw_exit(&sp->s_lock);
335 335 ip->i_flag |= ICHG;
336 336 ip->i_seq++;
337 337 /*
338 338 * when creating a file there is no need to push the inode, it
339 339 * is pushed later
340 340 */
341 341 if (puship == 1)
342 342 ufs_iupdat(ip, 1);
343 343
344 344 /*
345 345 * Decrement link count on the old shadow inode,
346 346 * and decrement reference count on the old aclp,
347 347 */
348 348 if (oldshadow) {
349 349 /* Get the shadow inode */
350 350 ASSERT(RW_WRITE_HELD(&ip->i_contents));
351 351 vfsp = ITOV(ip)->v_vfsp;
352 352 if ((err = ufs_iget_alloced(vfsp, oldshadow, &sip, cr)) != 0) {
353 353 return (EIO);
354 354 }
355 355 /* Decrement link count */
356 356 rw_enter(&sip->i_contents, RW_WRITER);
357 357 if (oldsp)
358 358 rw_enter(&oldsp->s_lock, RW_WRITER);
359 359 ASSERT(sip->i_dquot == 0);
360 360 ASSERT(sip->i_nlink > 0);
361 361 usecnt = --sip->i_nlink;
362 362 ufs_setreclaim(sip);
363 363 TRANS_INODE(ufsvfsp, sip);
364 364 sip->i_flag |= ICHG | IMOD;
365 365 sip->i_seq++;
366 366 ITIMES_NOLOCK(sip);
367 367 if (oldsp) {
368 368 oldsp->s_use = usecnt;
369 369 refcnt = --oldsp->s_ref;
370 370 signature = oldsp->s_signature;
371 371 /*
372 372 * Always release s_lock before both releasing
373 373 * i_contents and calling VN_RELE.
374 374 */
375 375 rw_exit(&oldsp->s_lock);
376 376 }
377 377 rw_exit(&sip->i_contents);
378 378 VN_RELE(ITOV(sip));
379 379 if (oldsp && (refcnt == 0))
380 380 si_cache_del(oldsp, signature);
381 381 }
382 382 return (0);
383 383
384 384 errout:
385 385 /* Throw the newly alloc'd inode away */
386 386 sip->i_nlink = 0;
387 387 ufs_setreclaim(sip);
388 388 TRANS_INODE(ufsvfsp, sip);
389 389 ITIMES_NOLOCK(sip);
390 390 rw_exit(&sip->i_contents);
391 391 VN_RELE(ITOV(sip));
392 392 ASSERT(!sp->s_use && !sp->s_ref && !(sp->s_flags & SI_CACHED));
393 393 (void) ufs_si_free_mem(sp);
394 394 return (err);
395 395 }
396 396
397 397 /*
398 398 * Load the acls for inode ip either from disk (adding to the cache),
399 399 * or search the cache and attach the cache'd acl list to the ip.
400 400 * In either case, maintain the proper reference count on the cached entry.
401 401 *
402 402 * Parameters:
403 403 * ip - Ptr to the inode which needs the acl list loaded
404 404 * cr - Ptr to credentials
405 405 *
406 406 * Returns: 0 - Success
407 407 * N - From errno.h
408 408 */
409 409 int
410 410 ufs_si_load(struct inode *ip, cred_t *cr)
411 411 /*
412 412 * ip parent inode in
413 413 * cr credentials in
414 414 */
415 415 {
416 416 struct vfs *vfsp;
417 417 struct inode *sip;
418 418 ufs_fsd_t *fsdp;
419 419 si_t *sp;
420 420 vsecattr_t vsecattr = {
421 421 (uint_t)0,
422 422 (int)0,
423 423 (void *)NULL,
424 424 (int)0,
425 425 (void *)NULL};
426 426 aclent_t *aclp;
427 427 ufs_acl_t *ufsaclp;
428 428 caddr_t acldata = NULL;
429 429 ino_t maxino;
430 430 int err;
431 431 size_t acldatalen;
432 432 int numacls;
433 433 int shadow;
434 434 int usecnt;
435 435 struct ufsvfs *ufsvfsp = ip->i_ufsvfs;
436 436 struct fs *fs = ufsvfsp->vfs_fs;
437 437
438 438 ASSERT(ip != NULL);
439 439 ASSERT(RW_WRITE_HELD(&ip->i_contents));
440 440 ASSERT(ip->i_shadow && ip->i_ufs_acl == NULL);
441 441 ASSERT((ip->i_mode & IFMT) != IFSHAD);
442 442
443 443 if (!CHECK_ACL_ALLOWED(ip->i_mode & IFMT))
444 444 return (ENOSYS);
445 445
446 446 if (ip->i_shadow == ip->i_number)
447 447 return (EIO);
448 448
449 449 maxino = (ino_t)(ITOF(ip)->fs_ncg * ITOF(ip)->fs_ipg);
450 450 if (ip->i_shadow < UFSROOTINO || ip->i_shadow > maxino)
451 451 return (EIO);
452 452
453 453 /*
454 454 * XXX Check cache. If in cache, link to it and increment
455 455 * the reference count, then return.
456 456 */
457 457 if (si_cachei_get(ip, &sp) == 0) {
458 458 ASSERT(RW_WRITE_HELD(&sp->s_lock));
459 459 ip->i_ufs_acl = sp;
460 460 sp->s_ref++;
461 461 ASSERT(sp->s_ref >= 0 && sp->s_ref <= sp->s_use);
462 462 rw_exit(&sp->s_lock);
463 463 si_cachehit++;
464 464 return (0);
465 465 }
466 466
467 467 /* Get the shadow inode */
468 468 vfsp = ITOV(ip)->v_vfsp;
469 469 shadow = ip->i_shadow;
470 470 if ((err = ufs_iget_alloced(vfsp, shadow, &sip, cr)) != 0) {
471 471 return (err);
472 472 }
473 473 rw_enter(&sip->i_contents, RW_WRITER);
474 474
475 475 if ((sip->i_mode & IFMT) != IFSHAD) {
476 476 rw_exit(&sip->i_contents);
477 477 err = EINVAL;
478 478 goto alldone;
479 479 }
480 480
481 481 ASSERT(sip->i_dquot == 0);
482 482 usecnt = sip->i_nlink;
483 483 if ((!ULOCKFS_IS_NOIACC(&ufsvfsp->vfs_ulockfs)) &&
484 484 (!(sip)->i_ufsvfs->vfs_noatime)) {
485 485 sip->i_flag |= IACC;
486 486 }
487 487 rw_downgrade(&sip->i_contents);
488 488
489 489 ASSERT(sip->i_size <= MAXOFF_T);
490 490 /* Read the acl's and other stuff from disk */
491 491 acldata = kmem_zalloc((size_t)sip->i_size, KM_SLEEP);
492 492 acldatalen = sip->i_size;
493 493
494 494 err = ufs_rdwri(UIO_READ, FREAD, sip, acldata, acldatalen, (offset_t)0,
495 495 UIO_SYSSPACE, (int *)0, cr);
496 496
497 497 rw_exit(&sip->i_contents);
498 498
499 499 if (err)
500 500 goto alldone;
501 501
502 502 /*
503 503 * Convert from disk format
504 504 * Result is a vsecattr struct which we then convert to the
505 505 * si struct.
506 506 */
507 507 bzero((caddr_t)&vsecattr, sizeof (vsecattr_t));
508 508 for (fsdp = (ufs_fsd_t *)acldata;
509 509 fsdp < (ufs_fsd_t *)(acldata + acldatalen);
510 510 fsdp = (ufs_fsd_t *)((caddr_t)fsdp +
511 511 FSD_RECSZ(fsdp, fsdp->fsd_size))) {
512 512 if (fsdp->fsd_size <= 0)
513 513 break;
514 514 switch (fsdp->fsd_type) {
515 515 case FSD_ACL:
516 516 numacls = vsecattr.vsa_aclcnt =
517 517 (int)((fsdp->fsd_size - 2 * sizeof (int)) /
518 518 sizeof (ufs_acl_t));
519 519 aclp = vsecattr.vsa_aclentp =
520 520 kmem_zalloc(numacls * sizeof (aclent_t), KM_SLEEP);
521 521 for (ufsaclp = (ufs_acl_t *)fsdp->fsd_data;
522 522 numacls; ufsaclp++) {
523 523 aclp->a_type = ufsaclp->acl_tag;
524 524 aclp->a_id = ufsaclp->acl_who;
525 525 aclp->a_perm = ufsaclp->acl_perm;
526 526 aclp++;
527 527 numacls--;
528 528 }
529 529 break;
530 530 case FSD_DFACL:
531 531 numacls = vsecattr.vsa_dfaclcnt =
532 532 (int)((fsdp->fsd_size - 2 * sizeof (int)) /
533 533 sizeof (ufs_acl_t));
534 534 aclp = vsecattr.vsa_dfaclentp =
535 535 kmem_zalloc(numacls * sizeof (aclent_t), KM_SLEEP);
536 536 for (ufsaclp = (ufs_acl_t *)fsdp->fsd_data;
537 537 numacls; ufsaclp++) {
538 538 aclp->a_type = ufsaclp->acl_tag;
539 539 aclp->a_id = ufsaclp->acl_who;
540 540 aclp->a_perm = ufsaclp->acl_perm;
541 541 aclp++;
542 542 numacls--;
543 543 }
544 544 break;
545 545 }
546 546 }
547 547 /* Sort the lists */
548 548 if (vsecattr.vsa_aclentp) {
549 549 ksort((caddr_t)vsecattr.vsa_aclentp, vsecattr.vsa_aclcnt,
550 550 sizeof (aclent_t), cmp2acls);
551 551 if ((err = acl_validate(vsecattr.vsa_aclentp,
552 552 vsecattr.vsa_aclcnt, ACL_CHECK)) != 0) {
553 553 goto alldone;
554 554 }
555 555 }
556 556 if (vsecattr.vsa_dfaclentp) {
557 557 ksort((caddr_t)vsecattr.vsa_dfaclentp, vsecattr.vsa_dfaclcnt,
558 558 sizeof (aclent_t), cmp2acls);
559 559 if ((err = acl_validate(vsecattr.vsa_dfaclentp,
560 560 vsecattr.vsa_dfaclcnt, DEF_ACL_CHECK)) != 0) {
561 561 goto alldone;
562 562 }
563 563 }
564 564
565 565 /* ignore shadow inodes without ACLs */
566 566 if (!vsecattr.vsa_aclentp && !vsecattr.vsa_dfaclentp) {
567 567 err = 0;
568 568 goto alldone;
569 569 }
570 570
571 571 /* Convert from vsecattr struct to ufs_acl_entry struct */
572 572 if ((err = vsecattr2aclentry(&vsecattr, &sp)) != 0) {
573 573 goto alldone;
574 574 }
575 575
576 576 /* There aren't filled in by vsecattr2aclentry */
577 577 sp->s_shadow = ip->i_shadow;
578 578 sp->s_dev = ip->i_dev;
579 579 sp->s_use = usecnt;
580 580 sp->s_ref = 1;
581 581 ASSERT(sp->s_ref >= 0 && sp->s_ref <= sp->s_use);
582 582
583 583 /* XXX Might make a duplicate */
584 584 si_cache_put(sp);
585 585
586 586 /* Signal anyone waiting on this shadow to be loaded */
587 587 ip->i_ufs_acl = sp;
588 588 err = 0;
589 589 si_cachemiss++;
590 590 if ((acldatalen + fs->fs_bsize) > ufsvfsp->vfs_maxacl)
591 591 ufsvfsp->vfs_maxacl = acldatalen + fs->fs_bsize;
592 592 alldone:
593 593 /*
594 594 * Common exit point. Mark shadow inode as ISTALE
595 595 * if we detect an internal inconsistency, to
596 596 * prevent stray inodes appearing in the cache.
597 597 */
598 598 if (err) {
599 599 rw_enter(&sip->i_contents, RW_READER);
600 600 mutex_enter(&sip->i_tlock);
601 601 sip->i_flag |= ISTALE;
602 602 mutex_exit(&sip->i_tlock);
603 603 rw_exit(&sip->i_contents);
604 604 }
605 605 VN_RELE(ITOV(sip));
606 606
607 607 /*
608 608 * Cleanup of data structures allocated
609 609 * on the fly.
610 610 */
611 611 if (acldata)
612 612 kmem_free(acldata, acldatalen);
613 613
614 614 if (vsecattr.vsa_aclentp)
615 615 kmem_free(vsecattr.vsa_aclentp,
616 616 vsecattr.vsa_aclcnt * sizeof (aclent_t));
617 617 if (vsecattr.vsa_dfaclentp)
618 618 kmem_free(vsecattr.vsa_dfaclentp,
619 619 vsecattr.vsa_dfaclcnt * sizeof (aclent_t));
620 620 return (err);
621 621 }
622 622
623 623 /*
624 624 * Check the inode's ACL's to see if this mode of access is
625 625 * allowed; return 0 if allowed, EACCES if not.
626 626 *
627 627 * We follow the procedure defined in Sec. 3.3.5, ACL Access
628 628 * Check Algorithm, of the POSIX 1003.6 Draft Standard.
629 629 */
630 630 int
631 631 ufs_acl_access(struct inode *ip, int mode, cred_t *cr)
632 632 /*
633 633 * ip parent inode
634 634 * mode mode of access read, write, execute/examine
635 635 * cr credentials
636 636 */
637 637 {
638 638 ufs_ic_acl_t *acl;
639 639 int ismask, mask = 0;
640 640 int gperm = 0;
641 641 int ngroup = 0;
642 642 si_t *sp = NULL;
643 643 uid_t uid = crgetuid(cr);
644 644 uid_t owner;
645 645
646 646 ASSERT(ip->i_ufs_acl != NULL);
647 647 ASSERT(RW_LOCK_HELD(&ip->i_contents));
648 648
649 649 sp = ip->i_ufs_acl;
650 650
651 651 ismask = sp->aclass.acl_ismask ?
652 652 sp->aclass.acl_ismask : NULL;
653 653
654 654 if (ismask)
655 655 mask = sp->aclass.acl_maskbits;
656 656 else
657 657 mask = -1;
658 658
659 659 /*
660 660 * (1) If user owns the file, obey user mode bits
661 661 */
662 662 owner = sp->aowner->acl_ic_who;
663 663 if (uid == owner) {
664 664 return (MODE_CHECK(owner, mode, (sp->aowner->acl_ic_perm << 6),
665 665 cr, ip));
666 666 }
667 667
668 668 /*
669 669 * (2) Obey any matching ACL_USER entry
670 670 */
671 671 if (sp->ausers)
672 672 for (acl = sp->ausers; acl != NULL; acl = acl->acl_ic_next) {
673 673 if (acl->acl_ic_who == uid) {
674 674 return (MODE_CHECK(owner, mode,
675 675 (mask & acl->acl_ic_perm) << 6, cr, ip));
676 676 }
677 677 }
678 678
679 679 /*
680 680 * (3) If user belongs to file's group, obey group mode bits
681 681 * if no ACL mask is defined; if there is an ACL mask, we look
682 682 * at both the group mode bits and any ACL_GROUP entries.
683 683 */
684 684 if (groupmember((uid_t)sp->agroup->acl_ic_who, cr)) {
685 685 ngroup++;
686 686 gperm = (sp->agroup->acl_ic_perm);
687 687 if (!ismask)
688 688 return (MODE_CHECK(owner, mode, (gperm << 6), cr, ip));
689 689 }
690 690
691 691 /*
692 692 * (4) Accumulate the permissions in matching ACL_GROUP entries
693 693 */
694 694 if (sp->agroups)
695 695 for (acl = sp->agroups; acl != NULL; acl = acl->acl_ic_next)
696 696 {
697 697 if (groupmember(acl->acl_ic_who, cr)) {
698 698 ngroup++;
699 699 gperm |= acl->acl_ic_perm;
700 700 }
701 701 }
702 702
703 703 if (ngroup != 0)
704 704 return (MODE_CHECK(owner, mode, ((gperm & mask) << 6), cr, ip));
705 705
706 706 /*
707 707 * (5) Finally, use the "other" mode bits
708 708 */
709 709 return (MODE_CHECK(owner, mode, sp->aother->acl_ic_perm << 6, cr, ip));
710 710 }
711 711
712 712 /*ARGSUSED2*/
713 713 int
714 714 ufs_acl_get(struct inode *ip, vsecattr_t *vsap, int flag, cred_t *cr)
715 715 {
716 716 aclent_t *aclentp;
717 717
718 718 ASSERT(RW_LOCK_HELD(&ip->i_contents));
719 719
720 720 /* XXX Range check, sanity check, shadow check */
721 721 /* If an ACL is present, get the data from the shadow inode info */
722 722 if (ip->i_ufs_acl)
723 723 return (aclentry2vsecattr(ip->i_ufs_acl, vsap));
724 724
725 725 /*
726 726 * If no ACLs are present, fabricate one from the mode bits.
727 727 * This code is almost identical to fs_fab_acl(), but we
728 728 * already have the mode bits handy, so we'll avoid going
729 729 * through VOP_GETATTR() again.
730 730 */
731 731
732 732 vsap->vsa_aclcnt = 0;
↓ open down ↓ |
732 lines elided |
↑ open up ↑ |
733 733 vsap->vsa_aclentp = NULL;
734 734 vsap->vsa_dfaclcnt = 0; /* Default ACLs are not fabricated */
735 735 vsap->vsa_dfaclentp = NULL;
736 736
737 737 if (vsap->vsa_mask & (VSA_ACLCNT | VSA_ACL))
738 738 vsap->vsa_aclcnt = 4; /* USER, GROUP, OTHER, and CLASS */
739 739
740 740 if (vsap->vsa_mask & VSA_ACL) {
741 741 vsap->vsa_aclentp = kmem_zalloc(4 * sizeof (aclent_t),
742 742 KM_SLEEP);
743 - if (vsap->vsa_aclentp == NULL)
744 - return (ENOMEM);
743 +
745 744 aclentp = vsap->vsa_aclentp;
746 745
747 746 /* Owner */
748 747 aclentp->a_type = USER_OBJ;
749 748 aclentp->a_perm = ((ushort_t)(ip->i_mode & 0700)) >> 6;
750 749 aclentp->a_id = ip->i_uid; /* Really undefined */
751 750 aclentp++;
752 751
753 752 /* Group */
754 753 aclentp->a_type = GROUP_OBJ;
755 754 aclentp->a_perm = ((ushort_t)(ip->i_mode & 0070)) >> 3;
756 755 aclentp->a_id = ip->i_gid; /* Really undefined */
757 756 aclentp++;
758 757
759 758 /* Other */
760 759 aclentp->a_type = OTHER_OBJ;
761 760 aclentp->a_perm = ip->i_mode & 0007;
762 761 aclentp->a_id = 0; /* Really undefined */
763 762 aclentp++;
764 763
765 764 /* Class */
766 765 aclentp->a_type = CLASS_OBJ;
767 766 aclentp->a_perm = ((ushort_t)(ip->i_mode & 0070)) >> 3;
768 767 aclentp->a_id = 0; /* Really undefined */
769 768 ksort((caddr_t)vsap->vsa_aclentp, vsap->vsa_aclcnt,
770 769 sizeof (aclent_t), cmp2acls);
771 770 }
772 771
773 772 return (0);
774 773 }
775 774
776 775 /*ARGSUSED2*/
777 776 int
778 777 ufs_acl_set(struct inode *ip, vsecattr_t *vsap, int flag, cred_t *cr)
779 778 {
780 779 si_t *sp;
781 780 int err;
782 781
783 782 ASSERT(RW_WRITE_HELD(&ip->i_contents));
784 783
785 784 if (!CHECK_ACL_ALLOWED(ip->i_mode & IFMT))
786 785 return (ENOSYS);
787 786
788 787 /*
789 788 * only the owner of the file or privileged users can change the ACLs
790 789 */
791 790 if (secpolicy_vnode_setdac(cr, ip->i_uid) != 0)
792 791 return (EPERM);
793 792
794 793 /* Convert from vsecattr struct to ufs_acl_entry struct */
795 794 if ((err = vsecattr2aclentry(vsap, &sp)) != 0)
796 795 return (err);
797 796 sp->s_dev = ip->i_dev;
798 797
799 798 /*
800 799 * Make the user & group objs in the acl list follow what's
801 800 * in the inode.
802 801 */
803 802 #ifdef DEBUG
804 803 if (vsap->vsa_mask == VSA_ACL) {
805 804 ASSERT(sp->aowner);
806 805 ASSERT(sp->agroup);
807 806 ASSERT(sp->aother);
808 807 }
809 808 #endif /* DEBUG */
810 809
811 810 if (sp->aowner)
812 811 sp->aowner->acl_ic_who = ip->i_uid;
813 812 if (sp->agroup)
814 813 sp->agroup->acl_ic_who = ip->i_gid;
815 814
816 815 /*
817 816 * Write and cache the new acl list
818 817 */
819 818 err = ufs_si_store(ip, sp, 1, cr);
820 819
821 820 return (err);
822 821 }
823 822
824 823 /*
825 824 * XXX Scan sorted array of acl's, checking for:
826 825 * 1) Any duplicate/conflicting entries (same type and id)
827 826 * 2) More than 1 of USER_OBJ, GROUP_OBJ, OTHER_OBJ, CLASS_OBJ
828 827 * 3) More than 1 of DEF_USER_OBJ, DEF_GROUP_OBJ, DEF_OTHER_OBJ, DEF_CLASS_OBJ
829 828 *
830 829 * Parameters:
831 830 * aclentp - ptr to sorted list of acl entries.
832 831 * nentries - # acl entries on the list
833 832 * flag - Bitmap (ACL_CHECK and/or DEF_ACL_CHECK) indicating whether the
834 833 * list contains regular acls, default acls, or both.
835 834 *
836 835 * Returns: 0 - Success
837 836 * EINVAL - Invalid list (dups or multiple entries of type USER_OBJ, etc)
838 837 */
839 838 static int
840 839 acl_validate(aclent_t *aclentp, int nentries, int flag)
841 840 {
842 841 int i;
843 842 int nuser_objs = 0;
844 843 int ngroup_objs = 0;
845 844 int nother_objs = 0;
846 845 int nclass_objs = 0;
847 846 int ndef_user_objs = 0;
848 847 int ndef_group_objs = 0;
849 848 int ndef_other_objs = 0;
850 849 int ndef_class_objs = 0;
851 850 int nusers = 0;
852 851 int ngroups = 0;
853 852 int ndef_users = 0;
854 853 int ndef_groups = 0;
855 854 int numdefs = 0;
856 855
857 856 /* Null list or list of one */
858 857 if (aclentp == NULL)
859 858 return (0);
860 859
861 860 if (nentries <= 0)
862 861 return (EINVAL);
863 862
864 863 for (i = 1; i < nentries; i++) {
865 864 if (((aclentp[i - 1].a_type == aclentp[i].a_type) &&
866 865 (aclentp[i - 1].a_id == aclentp[i].a_id)) ||
867 866 (aclentp[i - 1].a_perm > 07)) {
868 867 return (EINVAL);
869 868 }
870 869 }
871 870
872 871 if (flag == 0 || (flag != ACL_CHECK && flag != DEF_ACL_CHECK))
873 872 return (EINVAL);
874 873
875 874 /* Count types */
876 875 for (i = 0; i < nentries; i++) {
877 876 switch (aclentp[i].a_type) {
878 877 case USER_OBJ: /* Owner */
879 878 nuser_objs++;
880 879 break;
881 880 case GROUP_OBJ: /* Group */
882 881 ngroup_objs++;
883 882 break;
884 883 case OTHER_OBJ: /* Other */
885 884 nother_objs++;
886 885 break;
887 886 case CLASS_OBJ: /* Mask */
888 887 nclass_objs++;
889 888 break;
890 889 case DEF_USER_OBJ: /* Default Owner */
891 890 ndef_user_objs++;
892 891 break;
893 892 case DEF_GROUP_OBJ: /* Default Group */
894 893 ndef_group_objs++;
895 894 break;
896 895 case DEF_OTHER_OBJ: /* Default Other */
897 896 ndef_other_objs++;
898 897 break;
899 898 case DEF_CLASS_OBJ: /* Default Mask */
900 899 ndef_class_objs++;
901 900 break;
902 901 case USER: /* Users */
903 902 nusers++;
904 903 break;
905 904 case GROUP: /* Groups */
906 905 ngroups++;
907 906 break;
908 907 case DEF_USER: /* Default Users */
909 908 ndef_users++;
910 909 break;
911 910 case DEF_GROUP: /* Default Groups */
912 911 ndef_groups++;
913 912 break;
914 913 default: /* Unknown type */
915 914 return (EINVAL);
916 915 }
917 916 }
918 917
919 918 /*
920 919 * For normal acl's, we require there be one (and only one)
921 920 * USER_OBJ, GROUP_OBJ and OTHER_OBJ. There is either zero
922 921 * or one CLASS_OBJ.
923 922 */
924 923 if (flag & ACL_CHECK) {
925 924 if (nuser_objs != 1 || ngroup_objs != 1 ||
926 925 nother_objs != 1 || nclass_objs > 1) {
927 926 return (EINVAL);
928 927 }
929 928 /*
930 929 * If there are ANY group acls, there MUST be a
931 930 * class_obj(mask) acl (1003.6/D12 p. 29 lines 75-80).
932 931 */
933 932 if (ngroups && !nclass_objs) {
934 933 return (EINVAL);
935 934 }
936 935 if (nuser_objs + ngroup_objs + nother_objs + nclass_objs +
937 936 ngroups + nusers > MAX_ACL_ENTRIES)
938 937 return (EINVAL);
939 938 }
940 939
941 940 /*
942 941 * For default acl's, we require that there be either one (and only one)
943 942 * DEF_USER_OBJ, DEF_GROUP_OBJ and DEF_OTHER_OBJ
944 943 * or there be none of them.
945 944 */
946 945 if (flag & DEF_ACL_CHECK) {
947 946 if (ndef_other_objs > 1 || ndef_user_objs > 1 ||
948 947 ndef_group_objs > 1 || ndef_class_objs > 1) {
949 948 return (EINVAL);
950 949 }
951 950
952 951 numdefs = ndef_other_objs + ndef_user_objs + ndef_group_objs;
953 952
954 953 if (numdefs != 0 && numdefs != 3) {
955 954 return (EINVAL);
956 955 }
957 956 /*
958 957 * If there are ANY def_group acls, there MUST be a
959 958 * def_class_obj(mask) acl (1003.6/D12 P. 29 lines 75-80).
960 959 * XXX(jimh) This is inferred.
961 960 */
962 961 if (ndef_groups && !ndef_class_objs) {
963 962 return (EINVAL);
964 963 }
965 964 if ((ndef_users || ndef_groups) &&
966 965 ((numdefs != 3) && !ndef_class_objs)) {
967 966 return (EINVAL);
968 967 }
969 968 if (ndef_user_objs + ndef_group_objs + ndef_other_objs +
970 969 ndef_class_objs + ndef_users + ndef_groups >
971 970 MAX_ACL_ENTRIES)
972 971 return (EINVAL);
973 972 }
974 973 return (0);
975 974 }
976 975
977 976 static int
978 977 formacl(ufs_ic_acl_t **aclpp, aclent_t *aclentp)
979 978 {
980 979 ufs_ic_acl_t *uaclp;
981 980
982 981 uaclp = kmem_alloc(sizeof (ufs_ic_acl_t), KM_SLEEP);
983 982 uaclp->acl_ic_perm = aclentp->a_perm;
984 983 uaclp->acl_ic_who = aclentp->a_id;
985 984 uaclp->acl_ic_next = *aclpp;
986 985 *aclpp = uaclp;
987 986 return (0);
988 987 }
989 988
990 989 /*
991 990 * XXX - Make more efficient
992 991 * Convert from the vsecattr struct, used by the VOP interface, to
993 992 * the ufs_acl_entry struct used for in-core storage of acl's.
994 993 *
995 994 * Parameters:
996 995 * vsap - Ptr to array of security attributes.
997 996 * spp - Ptr to ptr to si struct for the results
998 997 *
999 998 * Returns: 0 - Success
1000 999 * N - From errno.h
1001 1000 */
1002 1001 static int
1003 1002 vsecattr2aclentry(vsecattr_t *vsap, si_t **spp)
1004 1003 {
1005 1004 aclent_t *aclentp, *aclp;
1006 1005 si_t *sp;
1007 1006 int err;
1008 1007 int i;
1009 1008
1010 1009 /* Sort & validate the lists on the vsap */
1011 1010 ksort((caddr_t)vsap->vsa_aclentp, vsap->vsa_aclcnt,
1012 1011 sizeof (aclent_t), cmp2acls);
1013 1012 ksort((caddr_t)vsap->vsa_dfaclentp, vsap->vsa_dfaclcnt,
1014 1013 sizeof (aclent_t), cmp2acls);
1015 1014 if ((err = acl_validate(vsap->vsa_aclentp,
1016 1015 vsap->vsa_aclcnt, ACL_CHECK)) != 0)
1017 1016 return (err);
1018 1017 if ((err = acl_validate(vsap->vsa_dfaclentp,
1019 1018 vsap->vsa_dfaclcnt, DEF_ACL_CHECK)) != 0)
1020 1019 return (err);
1021 1020
1022 1021 /* Create new si struct and hang acl's off it */
1023 1022 sp = kmem_zalloc(sizeof (si_t), KM_SLEEP);
1024 1023 rw_init(&sp->s_lock, NULL, RW_DEFAULT, NULL);
1025 1024
1026 1025 /* Process acl list */
1027 1026 aclp = (aclent_t *)vsap->vsa_aclentp;
1028 1027 aclentp = aclp + vsap->vsa_aclcnt - 1;
1029 1028 for (i = 0; i < vsap->vsa_aclcnt; i++) {
1030 1029 switch (aclentp->a_type) {
1031 1030 case USER_OBJ: /* Owner */
1032 1031 if (err = formacl(&sp->aowner, aclentp))
1033 1032 goto error;
1034 1033 break;
1035 1034 case GROUP_OBJ: /* Group */
1036 1035 if (err = formacl(&sp->agroup, aclentp))
1037 1036 goto error;
1038 1037 break;
1039 1038 case OTHER_OBJ: /* Other */
1040 1039 if (err = formacl(&sp->aother, aclentp))
1041 1040 goto error;
1042 1041 break;
1043 1042 case USER:
1044 1043 if (err = formacl(&sp->ausers, aclentp))
1045 1044 goto error;
1046 1045 break;
1047 1046 case CLASS_OBJ: /* Mask */
1048 1047 sp->aclass.acl_ismask = 1;
1049 1048 sp->aclass.acl_maskbits = aclentp->a_perm;
1050 1049 break;
1051 1050 case GROUP:
1052 1051 if (err = formacl(&sp->agroups, aclentp))
1053 1052 goto error;
1054 1053 break;
1055 1054 default:
1056 1055 break;
1057 1056 }
1058 1057 aclentp--;
1059 1058 }
1060 1059
1061 1060 /* Process default acl list */
1062 1061 aclp = (aclent_t *)vsap->vsa_dfaclentp;
1063 1062 aclentp = aclp + vsap->vsa_dfaclcnt - 1;
1064 1063 for (i = 0; i < vsap->vsa_dfaclcnt; i++) {
1065 1064 switch (aclentp->a_type) {
1066 1065 case DEF_USER_OBJ: /* Default Owner */
1067 1066 if (err = formacl(&sp->downer, aclentp))
1068 1067 goto error;
1069 1068 break;
1070 1069 case DEF_GROUP_OBJ: /* Default Group */
1071 1070 if (err = formacl(&sp->dgroup, aclentp))
1072 1071 goto error;
1073 1072 break;
1074 1073 case DEF_OTHER_OBJ: /* Default Other */
1075 1074 if (err = formacl(&sp->dother, aclentp))
1076 1075 goto error;
1077 1076 break;
1078 1077 case DEF_USER:
1079 1078 if (err = formacl(&sp->dusers, aclentp))
1080 1079 goto error;
1081 1080 break;
1082 1081 case DEF_CLASS_OBJ: /* Default Mask */
1083 1082 sp->dclass.acl_ismask = 1;
1084 1083 sp->dclass.acl_maskbits = aclentp->a_perm;
1085 1084 break;
1086 1085 case DEF_GROUP:
1087 1086 if (err = formacl(&sp->dgroups, aclentp))
1088 1087 goto error;
1089 1088 break;
1090 1089 default:
1091 1090 break;
1092 1091 }
1093 1092 aclentp--;
1094 1093 }
1095 1094 *spp = sp;
1096 1095 return (0);
1097 1096
1098 1097 error:
1099 1098 ufs_si_free_mem(sp);
1100 1099 return (err);
1101 1100 }
1102 1101
1103 1102 void
1104 1103 formvsec(int obj_type, ufs_ic_acl_t *aclp, aclent_t **aclentpp)
1105 1104 {
1106 1105 for (; aclp; aclp = aclp->acl_ic_next) {
1107 1106 (*aclentpp)->a_type = obj_type;
1108 1107 (*aclentpp)->a_perm = aclp->acl_ic_perm;
1109 1108 (*aclentpp)->a_id = aclp->acl_ic_who;
1110 1109 (*aclentpp)++;
1111 1110 }
1112 1111 }
1113 1112
1114 1113 /*
1115 1114 * XXX - Make more efficient
1116 1115 * Convert from the ufs_acl_entry struct used for in-core storage of acl's
1117 1116 * to the vsecattr struct, used by the VOP interface.
1118 1117 *
1119 1118 * Parameters:
1120 1119 * sp - Ptr to si struct with the acls
1121 1120 * vsap - Ptr to a vsecattr struct which will take the results.
1122 1121 *
1123 1122 * Returns: 0 - Success
1124 1123 * N - From errno table
1125 1124 */
1126 1125 static int
1127 1126 aclentry2vsecattr(si_t *sp, vsecattr_t *vsap)
1128 1127 {
1129 1128 aclent_t *aclentp;
1130 1129 int numacls = 0;
1131 1130 int err;
1132 1131
1133 1132 vsap->vsa_aclentp = vsap->vsa_dfaclentp = NULL;
1134 1133
1135 1134 numacls = acl_count(sp->aowner) +
1136 1135 acl_count(sp->agroup) +
1137 1136 acl_count(sp->aother) +
1138 1137 acl_count(sp->ausers) +
1139 1138 acl_count(sp->agroups);
1140 1139 if (sp->aclass.acl_ismask)
1141 1140 numacls++;
1142 1141
1143 1142 if (vsap->vsa_mask & (VSA_ACLCNT | VSA_ACL))
1144 1143 vsap->vsa_aclcnt = numacls;
1145 1144
1146 1145 if (numacls == 0)
1147 1146 goto do_defaults;
1148 1147
1149 1148 if (vsap->vsa_mask & VSA_ACL) {
1150 1149 vsap->vsa_aclentp = kmem_zalloc(numacls * sizeof (aclent_t),
1151 1150 KM_SLEEP);
1152 1151 aclentp = vsap->vsa_aclentp;
1153 1152
1154 1153 formvsec(USER_OBJ, sp->aowner, &aclentp);
1155 1154 formvsec(USER, sp->ausers, &aclentp);
1156 1155 formvsec(GROUP_OBJ, sp->agroup, &aclentp);
1157 1156 formvsec(GROUP, sp->agroups, &aclentp);
1158 1157 formvsec(OTHER_OBJ, sp->aother, &aclentp);
1159 1158
1160 1159 if (sp->aclass.acl_ismask) {
1161 1160 aclentp->a_type = CLASS_OBJ; /* Mask */
1162 1161 aclentp->a_perm = sp->aclass.acl_maskbits;
1163 1162 aclentp->a_id = 0;
1164 1163 aclentp++;
1165 1164 }
1166 1165
1167 1166 /* Sort the acl list */
1168 1167 ksort((caddr_t)vsap->vsa_aclentp, vsap->vsa_aclcnt,
1169 1168 sizeof (aclent_t), cmp2acls);
1170 1169 /* Check the acl list */
1171 1170 if ((err = acl_validate(vsap->vsa_aclentp,
1172 1171 vsap->vsa_aclcnt, ACL_CHECK)) != 0) {
1173 1172 kmem_free(vsap->vsa_aclentp,
1174 1173 numacls * sizeof (aclent_t));
1175 1174 vsap->vsa_aclentp = NULL;
1176 1175 return (err);
1177 1176 }
1178 1177
1179 1178 }
1180 1179 do_defaults:
1181 1180 /* Process Defaults */
1182 1181
1183 1182 numacls = acl_count(sp->downer) +
1184 1183 acl_count(sp->dgroup) +
1185 1184 acl_count(sp->dother) +
1186 1185 acl_count(sp->dusers) +
1187 1186 acl_count(sp->dgroups);
1188 1187 if (sp->dclass.acl_ismask)
1189 1188 numacls++;
1190 1189
1191 1190 if (vsap->vsa_mask & (VSA_DFACLCNT | VSA_DFACL))
1192 1191 vsap->vsa_dfaclcnt = numacls;
1193 1192
1194 1193 if (numacls == 0)
1195 1194 goto do_others;
1196 1195
1197 1196 if (vsap->vsa_mask & VSA_DFACL) {
1198 1197 vsap->vsa_dfaclentp =
1199 1198 kmem_zalloc(numacls * sizeof (aclent_t), KM_SLEEP);
1200 1199 aclentp = vsap->vsa_dfaclentp;
1201 1200 formvsec(DEF_USER_OBJ, sp->downer, &aclentp);
1202 1201 formvsec(DEF_USER, sp->dusers, &aclentp);
1203 1202 formvsec(DEF_GROUP_OBJ, sp->dgroup, &aclentp);
1204 1203 formvsec(DEF_GROUP, sp->dgroups, &aclentp);
1205 1204 formvsec(DEF_OTHER_OBJ, sp->dother, &aclentp);
1206 1205
1207 1206 if (sp->dclass.acl_ismask) {
1208 1207 aclentp->a_type = DEF_CLASS_OBJ; /* Mask */
1209 1208 aclentp->a_perm = sp->dclass.acl_maskbits;
1210 1209 aclentp->a_id = 0;
1211 1210 aclentp++;
1212 1211 }
1213 1212
1214 1213 /* Sort the default acl list */
1215 1214 ksort((caddr_t)vsap->vsa_dfaclentp, vsap->vsa_dfaclcnt,
1216 1215 sizeof (aclent_t), cmp2acls);
1217 1216 if ((err = acl_validate(vsap->vsa_dfaclentp,
1218 1217 vsap->vsa_dfaclcnt, DEF_ACL_CHECK)) != 0) {
1219 1218 if (vsap->vsa_aclentp != NULL)
1220 1219 kmem_free(vsap->vsa_aclentp,
1221 1220 vsap->vsa_aclcnt * sizeof (aclent_t));
1222 1221 kmem_free(vsap->vsa_dfaclentp,
1223 1222 vsap->vsa_dfaclcnt * sizeof (aclent_t));
1224 1223 vsap->vsa_aclentp = vsap->vsa_dfaclentp = NULL;
1225 1224 return (err);
1226 1225 }
1227 1226 }
1228 1227
1229 1228 do_others:
1230 1229 return (0);
1231 1230 }
1232 1231
1233 1232 static void
1234 1233 acl_free(ufs_ic_acl_t *aclp)
1235 1234 {
1236 1235 while (aclp != NULL) {
1237 1236 ufs_ic_acl_t *nextaclp = aclp->acl_ic_next;
1238 1237 kmem_free(aclp, sizeof (ufs_ic_acl_t));
1239 1238 aclp = nextaclp;
1240 1239 }
1241 1240 }
1242 1241
1243 1242 /*
1244 1243 * ufs_si_free_mem will discard the sp, and the acl hanging off of the
1245 1244 * sp. It is required that the sp not be locked, and not be in the
1246 1245 * cache.
1247 1246 *
1248 1247 * input: pointer to sp to discard.
1249 1248 *
1250 1249 * return - nothing.
1251 1250 *
1252 1251 */
1253 1252 static void
1254 1253 ufs_si_free_mem(si_t *sp)
1255 1254 {
1256 1255 ASSERT(!(sp->s_flags & SI_CACHED));
1257 1256 ASSERT(!RW_LOCK_HELD(&sp->s_lock));
1258 1257 /*
1259 1258 * remove from the cache
1260 1259 * free the acl entries
1261 1260 */
1262 1261 acl_free(sp->aowner);
1263 1262 acl_free(sp->agroup);
1264 1263 acl_free(sp->aother);
1265 1264 acl_free(sp->ausers);
1266 1265 acl_free(sp->agroups);
1267 1266
1268 1267 acl_free(sp->downer);
1269 1268 acl_free(sp->dgroup);
1270 1269 acl_free(sp->dother);
1271 1270 acl_free(sp->dusers);
1272 1271 acl_free(sp->dgroups);
1273 1272
1274 1273 rw_destroy(&sp->s_lock);
1275 1274 kmem_free(sp, sizeof (si_t));
1276 1275 }
1277 1276
1278 1277 void
1279 1278 acl_cpy(ufs_ic_acl_t *saclp, ufs_ic_acl_t *daclp)
1280 1279 {
1281 1280 ufs_ic_acl_t *aclp, *prev_aclp = NULL, *aclp1;
1282 1281
1283 1282 if (saclp == NULL) {
1284 1283 daclp = NULL;
1285 1284 return;
1286 1285 }
1287 1286 prev_aclp = daclp;
1288 1287
1289 1288 for (aclp = saclp; aclp != NULL; aclp = aclp->acl_ic_next) {
1290 1289 aclp1 = kmem_alloc(sizeof (ufs_ic_acl_t), KM_SLEEP);
1291 1290 aclp1->acl_ic_next = NULL;
1292 1291 aclp1->acl_ic_who = aclp->acl_ic_who;
1293 1292 aclp1->acl_ic_perm = aclp->acl_ic_perm;
1294 1293 prev_aclp->acl_ic_next = aclp1;
1295 1294 prev_aclp = (ufs_ic_acl_t *)&aclp1->acl_ic_next;
1296 1295 }
1297 1296 }
1298 1297
1299 1298 /*
1300 1299 * ufs_si_inherit takes a parent acl structure (saclp) and the inode
1301 1300 * of the object that is inheriting an acl and returns the inode
1302 1301 * with the acl linked to it. It also writes the acl to disk if
1303 1302 * it is a unique inode.
1304 1303 *
1305 1304 * ip - pointer to inode of object inheriting the acl (contents lock)
1306 1305 * tdp - parent inode (rw_lock and contents lock)
1307 1306 * mode - creation modes
1308 1307 * cr - credentials pointer
1309 1308 */
1310 1309 int
1311 1310 ufs_si_inherit(struct inode *ip, struct inode *tdp, o_mode_t mode, cred_t *cr)
1312 1311 {
1313 1312 si_t *tsp, *sp = tdp->i_ufs_acl;
1314 1313 int error;
1315 1314 o_mode_t old_modes, old_uid, old_gid;
1316 1315 int mask;
1317 1316
1318 1317 ASSERT(RW_WRITE_HELD(&ip->i_contents));
1319 1318 ASSERT(RW_WRITE_HELD(&tdp->i_rwlock));
1320 1319 ASSERT(RW_WRITE_HELD(&tdp->i_contents));
1321 1320
1322 1321 /*
1323 1322 * if links/symbolic links, or other invalid acl objects are copied
1324 1323 * or moved to a directory with a default acl do not allow inheritance
1325 1324 * just return.
1326 1325 */
1327 1326 if (!CHECK_ACL_ALLOWED(ip->i_mode & IFMT))
1328 1327 return (0);
1329 1328
1330 1329 /* lock the parent security information */
1331 1330 rw_enter(&sp->s_lock, RW_READER);
1332 1331
1333 1332 ASSERT(((tdp->i_mode & IFMT) == IFDIR) ||
1334 1333 ((tdp->i_mode & IFMT) == IFATTRDIR));
1335 1334
1336 1335 mask = ((sp->downer != NULL) ? 1 : 0) |
1337 1336 ((sp->dgroup != NULL) ? 2 : 0) |
1338 1337 ((sp->dother != NULL) ? 4 : 0);
1339 1338
1340 1339 if (mask == 0) {
1341 1340 rw_exit(&sp->s_lock);
1342 1341 return (0);
1343 1342 }
1344 1343
1345 1344 if (mask != 7) {
1346 1345 rw_exit(&sp->s_lock);
1347 1346 return (EINVAL);
1348 1347 }
1349 1348
1350 1349 tsp = kmem_zalloc(sizeof (si_t), KM_SLEEP);
1351 1350 rw_init(&tsp->s_lock, NULL, RW_DEFAULT, NULL);
1352 1351
1353 1352 /* copy the default acls */
1354 1353
1355 1354 ASSERT(RW_READ_HELD(&sp->s_lock));
1356 1355 acl_cpy(sp->downer, (ufs_ic_acl_t *)&tsp->aowner);
1357 1356 acl_cpy(sp->dgroup, (ufs_ic_acl_t *)&tsp->agroup);
1358 1357 acl_cpy(sp->dother, (ufs_ic_acl_t *)&tsp->aother);
1359 1358 acl_cpy(sp->dusers, (ufs_ic_acl_t *)&tsp->ausers);
1360 1359 acl_cpy(sp->dgroups, (ufs_ic_acl_t *)&tsp->agroups);
1361 1360 tsp->aclass.acl_ismask = sp->dclass.acl_ismask;
1362 1361 tsp->aclass.acl_maskbits = sp->dclass.acl_maskbits;
1363 1362
1364 1363 /*
1365 1364 * set the owner, group, and other values from the master
1366 1365 * inode.
1367 1366 */
1368 1367
1369 1368 MODE2ACL(tsp->aowner, (mode >> 6), ip->i_uid);
1370 1369 MODE2ACL(tsp->agroup, (mode >> 3), ip->i_gid);
1371 1370 MODE2ACL(tsp->aother, (mode), 0);
1372 1371
1373 1372 if (tsp->aclass.acl_ismask) {
1374 1373 tsp->aclass.acl_maskbits &= mode >> 3;
1375 1374 }
1376 1375
1377 1376
1378 1377 /* copy default acl if necessary */
1379 1378
1380 1379 if (((ip->i_mode & IFMT) == IFDIR) ||
1381 1380 ((ip->i_mode & IFMT) == IFATTRDIR)) {
1382 1381 acl_cpy(sp->downer, (ufs_ic_acl_t *)&tsp->downer);
1383 1382 acl_cpy(sp->dgroup, (ufs_ic_acl_t *)&tsp->dgroup);
1384 1383 acl_cpy(sp->dother, (ufs_ic_acl_t *)&tsp->dother);
1385 1384 acl_cpy(sp->dusers, (ufs_ic_acl_t *)&tsp->dusers);
1386 1385 acl_cpy(sp->dgroups, (ufs_ic_acl_t *)&tsp->dgroups);
1387 1386 tsp->dclass.acl_ismask = sp->dclass.acl_ismask;
1388 1387 tsp->dclass.acl_maskbits = sp->dclass.acl_maskbits;
1389 1388 }
1390 1389 /*
1391 1390 * save the new 9 mode bits in the inode (ip->ic_smode) for
1392 1391 * ufs_getattr. Be sure the mode can be recovered if the store
1393 1392 * fails.
1394 1393 */
1395 1394 old_modes = ip->i_mode;
1396 1395 old_uid = ip->i_uid;
1397 1396 old_gid = ip->i_gid;
1398 1397 /*
1399 1398 * store the acl, and get back a new security anchor if
1400 1399 * it is a duplicate.
1401 1400 */
1402 1401 rw_exit(&sp->s_lock);
1403 1402 rw_enter(&ip->i_rwlock, RW_WRITER);
1404 1403
1405 1404 /*
1406 1405 * Suppress out of inodes messages if instructed in the
1407 1406 * tdp inode.
1408 1407 */
1409 1408 ip->i_flag |= tdp->i_flag & IQUIET;
1410 1409
1411 1410 if ((error = ufs_si_store(ip, tsp, 0, cr)) != 0) {
1412 1411 ip->i_mode = old_modes;
1413 1412 ip->i_uid = old_uid;
1414 1413 ip->i_gid = old_gid;
1415 1414 }
1416 1415 ip->i_flag &= ~IQUIET;
1417 1416 rw_exit(&ip->i_rwlock);
1418 1417 return (error);
1419 1418 }
1420 1419
1421 1420 si_t *
1422 1421 ufs_acl_cp(si_t *sp)
1423 1422 {
1424 1423
1425 1424 si_t *dsp;
1426 1425
1427 1426 ASSERT(RW_READ_HELD(&sp->s_lock));
1428 1427 ASSERT(sp->s_ref && sp->s_use);
1429 1428
1430 1429 dsp = kmem_zalloc(sizeof (si_t), KM_SLEEP);
1431 1430 rw_init(&dsp->s_lock, NULL, RW_DEFAULT, NULL);
1432 1431
1433 1432 acl_cpy(sp->aowner, (ufs_ic_acl_t *)&dsp->aowner);
1434 1433 acl_cpy(sp->agroup, (ufs_ic_acl_t *)&dsp->agroup);
1435 1434 acl_cpy(sp->aother, (ufs_ic_acl_t *)&dsp->aother);
1436 1435 acl_cpy(sp->ausers, (ufs_ic_acl_t *)&dsp->ausers);
1437 1436 acl_cpy(sp->agroups, (ufs_ic_acl_t *)&dsp->agroups);
1438 1437
1439 1438 dsp->aclass.acl_ismask = sp->aclass.acl_ismask;
1440 1439 dsp->aclass.acl_maskbits = sp->aclass.acl_maskbits;
1441 1440
1442 1441 acl_cpy(sp->downer, (ufs_ic_acl_t *)&dsp->downer);
1443 1442 acl_cpy(sp->dgroup, (ufs_ic_acl_t *)&dsp->dgroup);
1444 1443 acl_cpy(sp->dother, (ufs_ic_acl_t *)&dsp->dother);
1445 1444 acl_cpy(sp->dusers, (ufs_ic_acl_t *)&dsp->dusers);
1446 1445 acl_cpy(sp->dgroups, (ufs_ic_acl_t *)&dsp->dgroups);
1447 1446
1448 1447 dsp->dclass.acl_ismask = sp->dclass.acl_ismask;
1449 1448 dsp->dclass.acl_maskbits = sp->dclass.acl_maskbits;
1450 1449
1451 1450 return (dsp);
1452 1451
1453 1452 }
1454 1453
1455 1454 int
1456 1455 ufs_acl_setattr(struct inode *ip, struct vattr *vap, cred_t *cr)
1457 1456 {
1458 1457
1459 1458 si_t *sp;
1460 1459 int mask = vap->va_mask;
1461 1460 int error = 0;
1462 1461
1463 1462 ASSERT(RW_WRITE_HELD(&ip->i_contents));
1464 1463
1465 1464 if (!(mask & (AT_MODE|AT_UID|AT_GID)))
1466 1465 return (0);
1467 1466
1468 1467 /*
1469 1468 * if no regular acl's, nothing to do, so let's get out
1470 1469 */
1471 1470 if (!(ip->i_ufs_acl) || !(ip->i_ufs_acl->aowner))
1472 1471 return (0);
1473 1472
1474 1473 rw_enter(&ip->i_ufs_acl->s_lock, RW_READER);
1475 1474 sp = ufs_acl_cp(ip->i_ufs_acl);
1476 1475 ASSERT(sp != ip->i_ufs_acl);
1477 1476
1478 1477 /*
1479 1478 * set the mask to the group permissions if a mask entry
1480 1479 * exists. Otherwise, set the group obj bits to the group
1481 1480 * permissions. Since non-trivial ACLs always have a mask,
1482 1481 * and the mask is the final arbiter of group permissions,
1483 1482 * setting the mask has the effect of changing the effective
1484 1483 * group permissions, even if the group_obj permissions in
1485 1484 * the ACL aren't changed. Posix P1003.1e states that when
1486 1485 * an ACL mask exists, chmod(2) must set the acl mask (NOT the
1487 1486 * group_obj permissions) to the requested group permissions.
1488 1487 */
1489 1488 if (mask & AT_MODE) {
1490 1489 sp->aowner->acl_ic_perm = (o_mode_t)(ip->i_mode & 0700) >> 6;
1491 1490 if (sp->aclass.acl_ismask)
1492 1491 sp->aclass.acl_maskbits =
1493 1492 (o_mode_t)(ip->i_mode & 070) >> 3;
1494 1493 else
1495 1494 sp->agroup->acl_ic_perm =
1496 1495 (o_mode_t)(ip->i_mode & 070) >> 3;
1497 1496 sp->aother->acl_ic_perm = (o_mode_t)(ip->i_mode & 07);
1498 1497 }
1499 1498
1500 1499 if (mask & AT_UID) {
1501 1500 /* Caller has verified our privileges */
1502 1501 sp->aowner->acl_ic_who = ip->i_uid;
1503 1502 }
1504 1503
1505 1504 if (mask & AT_GID) {
1506 1505 sp->agroup->acl_ic_who = ip->i_gid;
1507 1506 }
1508 1507
1509 1508 rw_exit(&ip->i_ufs_acl->s_lock);
1510 1509 error = ufs_si_store(ip, sp, 0, cr);
1511 1510 return (error);
1512 1511 }
1513 1512
1514 1513 static int
1515 1514 acl_count(ufs_ic_acl_t *p)
1516 1515 {
1517 1516 ufs_ic_acl_t *acl;
1518 1517 int count;
1519 1518
1520 1519 for (count = 0, acl = p; acl; acl = acl->acl_ic_next, count++)
1521 1520 ;
1522 1521 return (count);
1523 1522 }
1524 1523
1525 1524 /*
1526 1525 * Takes as input a security structure and generates a buffer
1527 1526 * with fsd's in a form which be written to the shadow inode.
1528 1527 */
1529 1528 static int
1530 1529 ufs_sectobuf(si_t *sp, caddr_t *buf, size_t *len)
1531 1530 {
1532 1531 size_t acl_size;
1533 1532 size_t def_acl_size;
1534 1533 caddr_t buffer;
1535 1534 struct ufs_fsd *fsdp;
1536 1535 ufs_acl_t *bufaclp;
1537 1536
1538 1537 /*
1539 1538 * Calc size of buffer to hold all the acls
1540 1539 */
1541 1540 acl_size = acl_count(sp->aowner) + /* owner */
1542 1541 acl_count(sp->agroup) + /* owner group */
1543 1542 acl_count(sp->aother) + /* owner other */
1544 1543 acl_count(sp->ausers) + /* acl list */
1545 1544 acl_count(sp->agroups); /* group alcs */
1546 1545 if (sp->aclass.acl_ismask)
1547 1546 acl_size++;
1548 1547
1549 1548 /* Convert to bytes */
1550 1549 acl_size *= sizeof (ufs_acl_t);
1551 1550
1552 1551 /* Add fsd header */
1553 1552 if (acl_size)
1554 1553 acl_size += 2 * sizeof (int);
1555 1554
1556 1555 /*
1557 1556 * Calc size of buffer to hold all the default acls
1558 1557 */
1559 1558 def_acl_size =
1560 1559 acl_count(sp->downer) + /* def owner */
1561 1560 acl_count(sp->dgroup) + /* def owner group */
1562 1561 acl_count(sp->dother) + /* def owner other */
1563 1562 acl_count(sp->dusers) + /* def users */
1564 1563 acl_count(sp->dgroups); /* def group acls */
1565 1564 if (sp->dclass.acl_ismask)
1566 1565 def_acl_size++;
1567 1566
1568 1567 /*
1569 1568 * Convert to bytes
1570 1569 */
1571 1570 def_acl_size *= sizeof (ufs_acl_t);
1572 1571
1573 1572 /*
1574 1573 * Add fsd header
1575 1574 */
1576 1575 if (def_acl_size)
1577 1576 def_acl_size += 2 * sizeof (int);
1578 1577
1579 1578 if (acl_size + def_acl_size == 0)
1580 1579 return (0);
1581 1580
1582 1581 buffer = kmem_zalloc((acl_size + def_acl_size), KM_SLEEP);
1583 1582 bufaclp = (ufs_acl_t *)buffer;
1584 1583
1585 1584 if (acl_size == 0)
1586 1585 goto wrtdefs;
1587 1586
1588 1587 /* create fsd and copy acls */
1589 1588 fsdp = (struct ufs_fsd *)bufaclp;
1590 1589 fsdp->fsd_type = FSD_ACL;
1591 1590 bufaclp = (ufs_acl_t *)&fsdp->fsd_data[0];
1592 1591
1593 1592 ACL_MOVE(sp->aowner, USER_OBJ, bufaclp);
1594 1593 ACL_MOVE(sp->agroup, GROUP_OBJ, bufaclp);
1595 1594 ACL_MOVE(sp->aother, OTHER_OBJ, bufaclp);
1596 1595 ACL_MOVE(sp->ausers, USER, bufaclp);
1597 1596 ACL_MOVE(sp->agroups, GROUP, bufaclp);
1598 1597
1599 1598 if (sp->aclass.acl_ismask) {
1600 1599 bufaclp->acl_tag = CLASS_OBJ;
1601 1600 bufaclp->acl_who = (uid_t)sp->aclass.acl_ismask;
1602 1601 bufaclp->acl_perm = (o_mode_t)sp->aclass.acl_maskbits;
1603 1602 bufaclp++;
1604 1603 }
1605 1604 ASSERT(acl_size <= INT_MAX);
1606 1605 fsdp->fsd_size = (int)acl_size;
1607 1606
1608 1607 wrtdefs:
1609 1608 if (def_acl_size == 0)
1610 1609 goto alldone;
1611 1610
1612 1611 /* if defaults exist then create fsd and copy default acls */
1613 1612 fsdp = (struct ufs_fsd *)bufaclp;
1614 1613 fsdp->fsd_type = FSD_DFACL;
1615 1614 bufaclp = (ufs_acl_t *)&fsdp->fsd_data[0];
1616 1615
1617 1616 ACL_MOVE(sp->downer, DEF_USER_OBJ, bufaclp);
1618 1617 ACL_MOVE(sp->dgroup, DEF_GROUP_OBJ, bufaclp);
1619 1618 ACL_MOVE(sp->dother, DEF_OTHER_OBJ, bufaclp);
1620 1619 ACL_MOVE(sp->dusers, DEF_USER, bufaclp);
1621 1620 ACL_MOVE(sp->dgroups, DEF_GROUP, bufaclp);
1622 1621 if (sp->dclass.acl_ismask) {
1623 1622 bufaclp->acl_tag = DEF_CLASS_OBJ;
1624 1623 bufaclp->acl_who = (uid_t)sp->dclass.acl_ismask;
1625 1624 bufaclp->acl_perm = (o_mode_t)sp->dclass.acl_maskbits;
1626 1625 bufaclp++;
1627 1626 }
1628 1627 ASSERT(def_acl_size <= INT_MAX);
1629 1628 fsdp->fsd_size = (int)def_acl_size;
1630 1629
1631 1630 alldone:
1632 1631 *buf = buffer;
1633 1632 *len = acl_size + def_acl_size;
1634 1633
1635 1634 return (0);
1636 1635 }
1637 1636
1638 1637 /*
1639 1638 * free a shadow inode on disk and in memory
1640 1639 */
1641 1640 int
1642 1641 ufs_si_free(si_t *sp, struct vfs *vfsp, cred_t *cr)
1643 1642 {
1644 1643 struct inode *sip;
1645 1644 int shadow;
1646 1645 int err = 0;
1647 1646 int refcnt;
1648 1647 int signature;
1649 1648
1650 1649 ASSERT(vfsp);
1651 1650 ASSERT(sp);
1652 1651
1653 1652 rw_enter(&sp->s_lock, RW_READER);
1654 1653 ASSERT(sp->s_shadow <= INT_MAX);
1655 1654 shadow = (int)sp->s_shadow;
1656 1655 ASSERT(sp->s_ref);
1657 1656 rw_exit(&sp->s_lock);
1658 1657
1659 1658 /*
1660 1659 * Decrement link count on the shadow inode,
1661 1660 * and decrement reference count on the sip.
1662 1661 */
1663 1662 if ((err = ufs_iget_alloced(vfsp, shadow, &sip, cr)) == 0) {
1664 1663 rw_enter(&sip->i_contents, RW_WRITER);
1665 1664 rw_enter(&sp->s_lock, RW_WRITER);
1666 1665 ASSERT(sp->s_shadow == shadow);
1667 1666 ASSERT(sip->i_dquot == 0);
1668 1667 /* Decrement link count */
1669 1668 ASSERT(sip->i_nlink > 0);
1670 1669 /*
1671 1670 * bug #1264710 assertion failure below
1672 1671 */
1673 1672 sp->s_use = --sip->i_nlink;
1674 1673 ufs_setreclaim(sip);
1675 1674 TRANS_INODE(sip->i_ufsvfs, sip);
1676 1675 sip->i_flag |= ICHG | IMOD;
1677 1676 sip->i_seq++;
1678 1677 ITIMES_NOLOCK(sip);
1679 1678 /* Dec ref counts on si referenced by this ip */
1680 1679 refcnt = --sp->s_ref;
1681 1680 signature = sp->s_signature;
1682 1681 ASSERT(sp->s_ref >= 0 && sp->s_ref <= sp->s_use);
1683 1682 /*
1684 1683 * Release s_lock before calling VN_RELE
1685 1684 * (which may want to acquire i_contents).
1686 1685 */
1687 1686 rw_exit(&sp->s_lock);
1688 1687 rw_exit(&sip->i_contents);
1689 1688 VN_RELE(ITOV(sip));
1690 1689 } else {
1691 1690 rw_enter(&sp->s_lock, RW_WRITER);
1692 1691 /* Dec ref counts on si referenced by this ip */
1693 1692 refcnt = --sp->s_ref;
1694 1693 signature = sp->s_signature;
1695 1694 ASSERT(sp->s_ref >= 0 && sp->s_ref <= sp->s_use);
1696 1695 rw_exit(&sp->s_lock);
1697 1696 }
1698 1697
1699 1698 if (refcnt == 0)
1700 1699 si_cache_del(sp, signature);
1701 1700 return (err);
1702 1701 }
1703 1702
1704 1703 /*
1705 1704 * Seach the si cache for an si structure by inode #.
1706 1705 * Returns a locked si structure.
1707 1706 *
1708 1707 * Parameters:
1709 1708 * ip - Ptr to an inode on this fs
1710 1709 * spp - Ptr to ptr to si struct for the results, if found.
1711 1710 *
1712 1711 * Returns: 0 - Success (results in spp)
1713 1712 * 1 - Failure (spp undefined)
1714 1713 */
1715 1714 static int
1716 1715 si_cachei_get(struct inode *ip, si_t **spp)
1717 1716 {
1718 1717 si_t *sp;
1719 1718
1720 1719 rw_enter(&si_cache_lock, RW_READER);
1721 1720 loop:
1722 1721 for (sp = si_cachei[SI_HASH(ip->i_shadow)]; sp; sp = sp->s_forw)
1723 1722 if (sp->s_shadow == ip->i_shadow && sp->s_dev == ip->i_dev)
1724 1723 break;
1725 1724
1726 1725 if (sp == NULL) {
1727 1726 /* Not in cache */
1728 1727 rw_exit(&si_cache_lock);
1729 1728 return (1);
1730 1729 }
1731 1730 /* Found it */
1732 1731 rw_enter(&sp->s_lock, RW_WRITER);
1733 1732 alldone:
1734 1733 rw_exit(&si_cache_lock);
1735 1734 *spp = sp;
1736 1735 return (0);
1737 1736 }
1738 1737
1739 1738 /*
1740 1739 * Seach the si cache by si structure (ie duplicate of the one passed in).
1741 1740 * In order for a match the signatures must be the same and
1742 1741 * the devices must be the same, the acls must match and
1743 1742 * link count of the cached shadow must be less than the
1744 1743 * size of ic_nlink - 1. MAXLINK - 1 is used to allow the count
1745 1744 * to be incremented one more time by the caller.
1746 1745 * Returns a locked si structure.
1747 1746 *
1748 1747 * Parameters:
1749 1748 * ip - Ptr to an inode on this fs
1750 1749 * spi - Ptr to si the struct we're searching the cache for.
1751 1750 * spp - Ptr to ptr to si struct for the results, if found.
1752 1751 *
1753 1752 * Returns: 0 - Success (results in spp)
1754 1753 * 1 - Failure (spp undefined)
1755 1754 */
1756 1755 static int
1757 1756 si_cachea_get(struct inode *ip, si_t *spi, si_t **spp)
1758 1757 {
1759 1758 si_t *sp;
1760 1759
1761 1760 spi->s_dev = ip->i_dev;
1762 1761 spi->s_signature = si_signature(spi);
1763 1762 rw_enter(&si_cache_lock, RW_READER);
1764 1763 loop:
1765 1764 for (sp = si_cachea[SI_HASH(spi->s_signature)]; sp; sp = sp->s_next) {
1766 1765 if (sp->s_signature == spi->s_signature &&
1767 1766 sp->s_dev == spi->s_dev &&
1768 1767 sp->s_use > 0 && /* deleting */
1769 1768 sp->s_use <= (MAXLINK - 1) && /* Too many links */
1770 1769 !si_cmp(sp, spi))
1771 1770 break;
1772 1771 }
1773 1772
1774 1773 if (sp == NULL) {
1775 1774 /* Cache miss */
1776 1775 rw_exit(&si_cache_lock);
1777 1776 return (1);
1778 1777 }
1779 1778 /* Found it */
1780 1779 rw_enter(&sp->s_lock, RW_WRITER);
1781 1780 alldone:
1782 1781 spi->s_shadow = sp->s_shadow; /* XXX For debugging */
1783 1782 rw_exit(&si_cache_lock);
1784 1783 *spp = sp;
1785 1784 return (0);
1786 1785 }
1787 1786
1788 1787 /*
1789 1788 * Place an si structure in the si cache. May cause duplicates.
1790 1789 *
1791 1790 * Parameters:
1792 1791 * sp - Ptr to the si struct to add to the cache.
1793 1792 *
1794 1793 * Returns: Nothing (void)
1795 1794 */
1796 1795 static void
1797 1796 si_cache_put(si_t *sp)
1798 1797 {
1799 1798 si_t **tspp;
1800 1799
1801 1800 ASSERT(sp->s_fore == NULL);
1802 1801 rw_enter(&si_cache_lock, RW_WRITER);
1803 1802 if (!sp->s_signature)
1804 1803 sp->s_signature = si_signature(sp);
1805 1804 sp->s_flags |= SI_CACHED;
1806 1805 sp->s_fore = NULL;
1807 1806
1808 1807 /* The 'by acl' chains */
1809 1808 tspp = &si_cachea[SI_HASH(sp->s_signature)];
1810 1809 sp->s_next = *tspp;
1811 1810 *tspp = sp;
1812 1811
1813 1812 /* The 'by inode' chains */
1814 1813 tspp = &si_cachei[SI_HASH(sp->s_shadow)];
1815 1814 sp->s_forw = *tspp;
1816 1815 *tspp = sp;
1817 1816
1818 1817 rw_exit(&si_cache_lock);
1819 1818 }
1820 1819
1821 1820 /*
1822 1821 * The sp passed in is a candidate for deletion from the cache. We acquire
1823 1822 * the cache lock first, so no cache searches can be done. Then we search
1824 1823 * for the acl in the cache, and if we find it we can lock it and check that
1825 1824 * nobody else attached to it while we were acquiring the locks. If the acl
1826 1825 * is in the cache and still has a zero reference count, then we remove it
1827 1826 * from the cache and deallocate it. If the reference count is non-zero or
1828 1827 * it is not found in the cache, then someone else attached to it or has
1829 1828 * already freed it, so we just return.
1830 1829 *
1831 1830 * Parameters:
1832 1831 * sp - Ptr to the sp struct which is the candicate for deletion.
1833 1832 * signature - the signature for the acl for lookup in the hash table
1834 1833 *
1835 1834 * Returns: Nothing (void)
1836 1835 */
1837 1836 void
1838 1837 si_cache_del(si_t *sp, int signature)
1839 1838 {
1840 1839 si_t **tspp;
1841 1840 int hash;
1842 1841 int foundacl = 0;
1843 1842
1844 1843 /*
1845 1844 * Unlink & free the sp from the other queues, then destroy it.
1846 1845 * Search the 'by acl' chain first, then the 'by inode' chain
1847 1846 * after the acl is locked.
1848 1847 */
1849 1848 rw_enter(&si_cache_lock, RW_WRITER);
1850 1849 hash = SI_HASH(signature);
1851 1850 for (tspp = &si_cachea[hash]; *tspp; tspp = &(*tspp)->s_next) {
1852 1851 if (*tspp == sp) {
1853 1852 /*
1854 1853 * Wait to grab the acl lock until after the acl has
1855 1854 * been found in the cache. Otherwise it might try to
1856 1855 * grab a lock that has already been destroyed, or
1857 1856 * delete an acl that has already been freed.
1858 1857 */
1859 1858 rw_enter(&sp->s_lock, RW_WRITER);
1860 1859 /* See if someone else attached to it */
1861 1860 if (sp->s_ref) {
1862 1861 rw_exit(&sp->s_lock);
1863 1862 rw_exit(&si_cache_lock);
1864 1863 return;
1865 1864 }
1866 1865 ASSERT(sp->s_fore == NULL);
1867 1866 ASSERT(sp->s_flags & SI_CACHED);
1868 1867 foundacl = 1;
1869 1868 *tspp = sp->s_next;
1870 1869 break;
1871 1870 }
1872 1871 }
1873 1872
1874 1873 /*
1875 1874 * If the acl was not in the cache, we assume another thread has
1876 1875 * deleted it already. This could happen if another thread attaches to
1877 1876 * the acl and then releases it after this thread has already found the
1878 1877 * reference count to be zero but has not yet taken the cache lock.
1879 1878 * Both threads end up seeing a reference count of zero, and call into
1880 1879 * si_cache_del. See bug 4244827 for details on the race condition.
1881 1880 */
1882 1881 if (foundacl == 0) {
1883 1882 rw_exit(&si_cache_lock);
1884 1883 return;
1885 1884 }
1886 1885
1887 1886 /* Now check the 'by inode' chain */
1888 1887 hash = SI_HASH(sp->s_shadow);
1889 1888 for (tspp = &si_cachei[hash]; *tspp; tspp = &(*tspp)->s_forw) {
1890 1889 if (*tspp == sp) {
1891 1890 *tspp = sp->s_forw;
1892 1891 break;
1893 1892 }
1894 1893 }
1895 1894
1896 1895 /*
1897 1896 * At this point, we can unlock everything because this si
1898 1897 * is no longer in the cache, thus cannot be attached to.
1899 1898 */
1900 1899 rw_exit(&sp->s_lock);
1901 1900 rw_exit(&si_cache_lock);
1902 1901 sp->s_flags &= ~SI_CACHED;
1903 1902 (void) ufs_si_free_mem(sp);
1904 1903 }
1905 1904
1906 1905 /*
1907 1906 * Alloc the hash buckets for the si cache & initialize
1908 1907 * the unreferenced anchor and the cache lock.
1909 1908 */
1910 1909 void
1911 1910 si_cache_init(void)
1912 1911 {
1913 1912 rw_init(&si_cache_lock, NULL, RW_DEFAULT, NULL);
1914 1913
1915 1914 /* The 'by acl' headers */
1916 1915 si_cachea = kmem_zalloc(si_cachecnt * sizeof (si_t *), KM_SLEEP);
1917 1916 /* The 'by inode' headers */
1918 1917 si_cachei = kmem_zalloc(si_cachecnt * sizeof (si_t *), KM_SLEEP);
1919 1918 }
1920 1919
1921 1920 /*
1922 1921 * aclcksum takes an acl and generates a checksum. It takes as input
1923 1922 * the acl to start at.
1924 1923 *
1925 1924 * s_aclp - pointer to starting acl
1926 1925 *
1927 1926 * returns checksum
1928 1927 */
1929 1928 static int
1930 1929 aclcksum(ufs_ic_acl_t *s_aclp)
1931 1930 {
1932 1931 ufs_ic_acl_t *aclp;
1933 1932 int signature = 0;
1934 1933 for (aclp = s_aclp; aclp; aclp = aclp->acl_ic_next) {
1935 1934 signature += aclp->acl_ic_perm;
1936 1935 signature += aclp->acl_ic_who;
1937 1936 }
1938 1937 return (signature);
1939 1938 }
1940 1939
1941 1940 /*
1942 1941 * Generate a unique signature for an si structure. Used by the
1943 1942 * search routine si_cachea_get() to quickly identify candidates
1944 1943 * prior to calling si_cmp().
1945 1944 * Parameters:
1946 1945 * sp - Ptr to the si struct to generate the signature for.
1947 1946 *
1948 1947 * Returns: A signature for the si struct (really a checksum)
1949 1948 */
1950 1949 static int
1951 1950 si_signature(si_t *sp)
1952 1951 {
1953 1952 int signature = sp->s_dev;
1954 1953
1955 1954 signature += aclcksum(sp->aowner) + aclcksum(sp->agroup) +
1956 1955 aclcksum(sp->aother) + aclcksum(sp->ausers) +
1957 1956 aclcksum(sp->agroups) + aclcksum(sp->downer) +
1958 1957 aclcksum(sp->dgroup) + aclcksum(sp->dother) +
1959 1958 aclcksum(sp->dusers) + aclcksum(sp->dgroups);
1960 1959 if (sp->aclass.acl_ismask)
1961 1960 signature += sp->aclass.acl_maskbits;
1962 1961 if (sp->dclass.acl_ismask)
1963 1962 signature += sp->dclass.acl_maskbits;
1964 1963
1965 1964 return (signature);
1966 1965 }
1967 1966
1968 1967 /*
1969 1968 * aclcmp compares to acls to see if they are identical.
1970 1969 *
1971 1970 * sp1 is source
1972 1971 * sp2 is sourceb
1973 1972 *
1974 1973 * returns 0 if equal and 1 if not equal
1975 1974 */
1976 1975 static int
1977 1976 aclcmp(ufs_ic_acl_t *aclin1p, ufs_ic_acl_t *aclin2p)
1978 1977 {
1979 1978 ufs_ic_acl_t *aclp1;
1980 1979 ufs_ic_acl_t *aclp2;
1981 1980
1982 1981 /*
1983 1982 * if the starting pointers are equal then they are equal so
1984 1983 * just return.
1985 1984 */
1986 1985 if (aclin1p == aclin2p)
1987 1986 return (0);
1988 1987 /*
1989 1988 * check element by element
1990 1989 */
1991 1990 for (aclp1 = aclin1p, aclp2 = aclin2p; aclp1 && aclp2;
1992 1991 aclp1 = aclp1->acl_ic_next, aclp2 = aclp2->acl_ic_next) {
1993 1992 if (aclp1->acl_ic_perm != aclp2->acl_ic_perm ||
1994 1993 aclp1->acl_ic_who != aclp2->acl_ic_who)
1995 1994 return (1);
1996 1995 }
1997 1996 /*
1998 1997 * both must be zero (at the end of the acl)
1999 1998 */
2000 1999 if (aclp1 || aclp2)
2001 2000 return (1);
2002 2001
2003 2002 return (0);
2004 2003 }
2005 2004
2006 2005 /*
2007 2006 * Do extensive, field-by-field compare of two si structures. Returns
2008 2007 * 0 if they are exactly identical, 1 otherwise.
2009 2008 *
2010 2009 * Paramters:
2011 2010 * sp1 - Ptr to 1st si struct
2012 2011 * sp2 - Ptr to 2nd si struct
2013 2012 *
2014 2013 * Returns:
2015 2014 * 0 - Not identical
2016 2015 * 1 - Identical
2017 2016 */
2018 2017 static int
2019 2018 si_cmp(si_t *sp1, si_t *sp2)
2020 2019 {
2021 2020 if (sp1->s_dev != sp2->s_dev)
2022 2021 return (1);
2023 2022 if (aclcmp(sp1->aowner, sp2->aowner) ||
2024 2023 aclcmp(sp1->agroup, sp2->agroup) ||
2025 2024 aclcmp(sp1->aother, sp2->aother) ||
2026 2025 aclcmp(sp1->ausers, sp2->ausers) ||
2027 2026 aclcmp(sp1->agroups, sp2->agroups) ||
2028 2027 aclcmp(sp1->downer, sp2->downer) ||
2029 2028 aclcmp(sp1->dgroup, sp2->dgroup) ||
2030 2029 aclcmp(sp1->dother, sp2->dother) ||
2031 2030 aclcmp(sp1->dusers, sp2->dusers) ||
2032 2031 aclcmp(sp1->dgroups, sp2->dgroups))
2033 2032 return (1);
2034 2033 if (sp1->aclass.acl_ismask != sp2->aclass.acl_ismask)
2035 2034 return (1);
2036 2035 if (sp1->dclass.acl_ismask != sp2->dclass.acl_ismask)
2037 2036 return (1);
2038 2037 if (sp1->aclass.acl_ismask &&
2039 2038 sp1->aclass.acl_maskbits != sp2->aclass.acl_maskbits)
2040 2039 return (1);
2041 2040 if (sp1->dclass.acl_ismask &&
2042 2041 sp1->dclass.acl_maskbits != sp2->dclass.acl_maskbits)
2043 2042 return (1);
2044 2043
2045 2044 return (0);
2046 2045 }
2047 2046
2048 2047 /*
2049 2048 * Remove all acls associated with a device. All acls must have
2050 2049 * a reference count of zero.
2051 2050 *
2052 2051 * inputs:
2053 2052 * device - device to remove from the cache
2054 2053 *
2055 2054 * outputs:
2056 2055 * none
2057 2056 */
2058 2057 void
2059 2058 ufs_si_cache_flush(dev_t dev)
2060 2059 {
2061 2060 si_t *tsp, **tspp;
2062 2061 int i;
2063 2062
2064 2063 rw_enter(&si_cache_lock, RW_WRITER);
2065 2064 for (i = 0; i < si_cachecnt; i++) {
2066 2065 tspp = &si_cachea[i];
2067 2066 while (*tspp) {
2068 2067 if ((*tspp)->s_dev == dev) {
2069 2068 *tspp = (*tspp)->s_next;
2070 2069 } else {
2071 2070 tspp = &(*tspp)->s_next;
2072 2071 }
2073 2072 }
2074 2073 }
2075 2074 for (i = 0; i < si_cachecnt; i++) {
2076 2075 tspp = &si_cachei[i];
2077 2076 while (*tspp) {
2078 2077 if ((*tspp)->s_dev == dev) {
2079 2078 tsp = *tspp;
2080 2079 *tspp = (*tspp)->s_forw;
2081 2080 tsp->s_flags &= ~SI_CACHED;
2082 2081 ufs_si_free_mem(tsp);
2083 2082 } else {
2084 2083 tspp = &(*tspp)->s_forw;
2085 2084 }
2086 2085 }
2087 2086 }
2088 2087 rw_exit(&si_cache_lock);
2089 2088 }
2090 2089
2091 2090 /*
2092 2091 * ufs_si_del is used to unhook a sp from a inode in memory
2093 2092 *
2094 2093 * ip is the inode to remove the sp from.
2095 2094 */
2096 2095 void
2097 2096 ufs_si_del(struct inode *ip)
2098 2097 {
2099 2098 si_t *sp = ip->i_ufs_acl;
2100 2099 int refcnt;
2101 2100 int signature;
2102 2101
2103 2102 if (sp) {
2104 2103 rw_enter(&sp->s_lock, RW_WRITER);
2105 2104 refcnt = --sp->s_ref;
2106 2105 signature = sp->s_signature;
2107 2106 ASSERT(sp->s_ref >= 0 && sp->s_ref <= sp->s_use);
2108 2107 rw_exit(&sp->s_lock);
2109 2108 if (refcnt == 0)
2110 2109 si_cache_del(sp, signature);
2111 2110 ip->i_ufs_acl = NULL;
2112 2111 }
2113 2112 }
↓ open down ↓ |
1359 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX