Line data Source code
1 : /*
2 : * Generic pidhash and scalable, time-bounded PID allocator
3 : *
4 : * (C) 2002-2003 Nadia Yvette Chambers, IBM
5 : * (C) 2004 Nadia Yvette Chambers, Oracle
6 : * (C) 2002-2004 Ingo Molnar, Red Hat
7 : *
8 : * pid-structures are backing objects for tasks sharing a given ID to chain
9 : * against. There is very little to them aside from hashing them and
10 : * parking tasks using given ID's on a list.
11 : *
12 : * The hash is always changed with the tasklist_lock write-acquired,
13 : * and the hash is only accessed with the tasklist_lock at least
14 : * read-acquired, so there's no additional SMP locking needed here.
15 : *
16 : * We have a list of bitmap pages, which bitmaps represent the PID space.
17 : * Allocating and freeing PIDs is completely lockless. The worst-case
18 : * allocation scenario when all but one out of 1 million PIDs possible are
19 : * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20 : * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
21 : *
22 : * Pid namespaces:
23 : * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
24 : * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
25 : * Many thanks to Oleg Nesterov for comments and help
26 : *
27 : */
28 :
29 : #include <linux/mm.h>
30 : #include <linux/export.h>
31 : #include <linux/slab.h>
32 : #include <linux/init.h>
33 : #include <linux/rculist.h>
34 : #include <linux/bootmem.h>
35 : #include <linux/hash.h>
36 : #include <linux/pid_namespace.h>
37 : #include <linux/init_task.h>
38 : #include <linux/syscalls.h>
39 : #include <linux/proc_ns.h>
40 : #include <linux/proc_fs.h>
41 :
42 : #define pid_hashfn(nr, ns) \
43 : hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
44 : static struct hlist_head *pid_hash;
45 : static unsigned int pidhash_shift = 4;
46 : struct pid init_struct_pid = INIT_STRUCT_PID;
47 :
48 : int pid_max = PID_MAX_DEFAULT;
49 :
50 : #define RESERVED_PIDS 300
51 :
52 : int pid_max_min = RESERVED_PIDS + 1;
53 : int pid_max_max = PID_MAX_LIMIT;
54 :
55 : static inline int mk_pid(struct pid_namespace *pid_ns,
56 : struct pidmap *map, int off)
57 : {
58 425 : return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
59 : }
60 :
61 : #define find_next_offset(map, off) \
62 : find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
63 :
64 : /*
65 : * PID-map pages start out as NULL, they get allocated upon
66 : * first use and are never deallocated. This way a low pid_max
67 : * value does not cause lots of bitmaps to be allocated, but
68 : * the scheme scales to up to 4 million PIDs, runtime.
69 : */
70 : struct pid_namespace init_pid_ns = {
71 : .kref = {
72 : .refcount = ATOMIC_INIT(2),
73 : },
74 : .pidmap = {
75 : [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
76 : },
77 : .last_pid = 0,
78 : .nr_hashed = PIDNS_HASH_ADDING,
79 : .level = 0,
80 : .child_reaper = &init_task,
81 : .user_ns = &init_user_ns,
82 : .ns.inum = PROC_PID_INIT_INO,
83 : #ifdef CONFIG_PID_NS
84 : .ns.ops = &pidns_operations,
85 : #endif
86 : };
87 : EXPORT_SYMBOL_GPL(init_pid_ns);
88 :
89 : /*
90 : * Note: disable interrupts while the pidmap_lock is held as an
91 : * interrupt might come in and do read_lock(&tasklist_lock).
92 : *
93 : * If we don't disable interrupts there is a nasty deadlock between
94 : * detach_pid()->free_pid() and another cpu that does
95 : * spin_lock(&pidmap_lock) followed by an interrupt routine that does
96 : * read_lock(&tasklist_lock);
97 : *
98 : * After we clean up the tasklist_lock and know there are no
99 : * irq handlers that take it we can leave the interrupts enabled.
100 : * For now it is easier to be safe than to prove it can't happen.
101 : */
102 :
103 : static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
104 :
105 2911 : static void free_pidmap(struct upid *upid)
106 : {
107 2911 : int nr = upid->nr;
108 2911 : struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE;
109 2911 : int offset = nr & BITS_PER_PAGE_MASK;
110 :
111 5822 : clear_bit(offset, map->page);
112 2911 : atomic_inc(&map->nr_free);
113 2911 : }
114 :
115 : /*
116 : * If we started walking pids at 'base', is 'a' seen before 'b'?
117 : */
118 : static int pid_before(int base, int a, int b)
119 : {
120 : /*
121 : * This is the same as saying
122 : *
123 : * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT
124 : * and that mapping orders 'a' and 'b' with respect to 'base'.
125 : */
126 1 : return (unsigned)(a - base) < (unsigned)(b - base);
127 : }
128 :
129 : /*
130 : * We might be racing with someone else trying to set pid_ns->last_pid
131 : * at the pid allocation time (there's also a sysctl for this, but racing
132 : * with this one is OK, see comment in kernel/pid_namespace.c about it).
133 : * We want the winner to have the "later" value, because if the
134 : * "earlier" value prevails, then a pid may get reused immediately.
135 : *
136 : * Since pids rollover, it is not sufficient to just pick the bigger
137 : * value. We have to consider where we started counting from.
138 : *
139 : * 'base' is the value of pid_ns->last_pid that we observed when
140 : * we started looking for a pid.
141 : *
142 : * 'pid' is the pid that we eventually found.
143 : */
144 : static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid)
145 : {
146 : int prev;
147 : int last_write = base;
148 : do {
149 : prev = last_write;
150 5988 : last_write = cmpxchg(&pid_ns->last_pid, prev, pid);
151 2995 : } while ((prev != last_write) && (pid_before(base, last_write, pid)));
152 : }
153 :
154 2993 : static int alloc_pidmap(struct pid_namespace *pid_ns)
155 : {
156 2993 : int i, offset, max_scan, pid, last = pid_ns->last_pid;
157 : struct pidmap *map;
158 :
159 2993 : pid = last + 1;
160 2993 : if (pid >= pid_max)
161 : pid = RESERVED_PIDS;
162 2993 : offset = pid & BITS_PER_PAGE_MASK;
163 2993 : map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
164 : /*
165 : * If last_pid points into the middle of the map->page we
166 : * want to scan this bitmap block twice, the second time
167 : * we start with offset == 0 (or RESERVED_PIDS).
168 : */
169 2993 : max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset;
170 2993 : for (i = 0; i <= max_scan; ++i) {
171 2993 : if (unlikely(!map->page)) {
172 : void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
173 : /*
174 : * Free the page if someone raced with us
175 : * installing it:
176 : */
177 : spin_lock_irq(&pidmap_lock);
178 0 : if (!map->page) {
179 0 : map->page = page;
180 : page = NULL;
181 : }
182 : spin_unlock_irq(&pidmap_lock);
183 0 : kfree(page);
184 0 : if (unlikely(!map->page))
185 : break;
186 : }
187 2993 : if (likely(atomic_read(&map->nr_free))) {
188 : for ( ; ; ) {
189 2994 : if (!test_and_set_bit(offset, map->page)) {
190 2993 : atomic_dec(&map->nr_free);
191 : set_last_pid(pid_ns, last, pid);
192 : return pid;
193 : }
194 1 : offset = find_next_offset(map, offset);
195 1 : if (offset >= BITS_PER_PAGE)
196 : break;
197 : pid = mk_pid(pid_ns, map, offset);
198 1 : if (pid >= pid_max)
199 : break;
200 : }
201 : }
202 0 : if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
203 0 : ++map;
204 : offset = 0;
205 : } else {
206 0 : map = &pid_ns->pidmap[0];
207 : offset = RESERVED_PIDS;
208 0 : if (unlikely(last == offset))
209 : break;
210 : }
211 : pid = mk_pid(pid_ns, map, offset);
212 : }
213 : return -1;
214 : }
215 :
216 440 : int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
217 : {
218 : int offset;
219 : struct pidmap *map, *end;
220 :
221 440 : if (last >= PID_MAX_LIMIT)
222 : return -1;
223 :
224 440 : offset = (last + 1) & BITS_PER_PAGE_MASK;
225 440 : map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
226 440 : end = &pid_ns->pidmap[PIDMAP_ENTRIES];
227 456 : for (; map < end; map++, offset = 0) {
228 440 : if (unlikely(!map->page))
229 0 : continue;
230 440 : offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
231 440 : if (offset < BITS_PER_PAGE)
232 424 : return mk_pid(pid_ns, map, offset);
233 : }
234 : return -1;
235 : }
236 :
237 69887 : void put_pid(struct pid *pid)
238 : {
239 : struct pid_namespace *ns;
240 :
241 69887 : if (!pid)
242 69887 : return;
243 :
244 11544 : ns = pid->numbers[pid->level].ns;
245 20180 : if ((atomic_read(&pid->count) == 1) ||
246 8636 : atomic_dec_and_test(&pid->count)) {
247 2908 : kmem_cache_free(ns->pid_cachep, pid);
248 2908 : put_pid_ns(ns);
249 : }
250 : }
251 : EXPORT_SYMBOL_GPL(put_pid);
252 :
253 2911 : static void delayed_put_pid(struct rcu_head *rhp)
254 : {
255 2911 : struct pid *pid = container_of(rhp, struct pid, rcu);
256 2911 : put_pid(pid);
257 2911 : }
258 :
259 2911 : void free_pid(struct pid *pid)
260 : {
261 : /* We can be called with write_lock_irq(&tasklist_lock) held */
262 : int i;
263 : unsigned long flags;
264 :
265 2911 : spin_lock_irqsave(&pidmap_lock, flags);
266 5822 : for (i = 0; i <= pid->level; i++) {
267 2911 : struct upid *upid = pid->numbers + i;
268 2911 : struct pid_namespace *ns = upid->ns;
269 : hlist_del_rcu(&upid->pid_chain);
270 2911 : switch(--ns->nr_hashed) {
271 : case 2:
272 : case 1:
273 : /* When all that is left in the pid namespace
274 : * is the reaper wake up the reaper. The reaper
275 : * may be sleeping in zap_pid_ns_processes().
276 : */
277 0 : wake_up_process(ns->child_reaper);
278 0 : break;
279 : case PIDNS_HASH_ADDING:
280 : /* Handle a fork failure of the first process */
281 : WARN_ON(ns->child_reaper);
282 0 : ns->nr_hashed = 0;
283 : /* fall through */
284 : case 0:
285 0 : schedule_work(&ns->proc_work);
286 : break;
287 : }
288 : }
289 : spin_unlock_irqrestore(&pidmap_lock, flags);
290 :
291 2911 : for (i = 0; i <= pid->level; i++)
292 2911 : free_pidmap(pid->numbers + i);
293 :
294 2911 : call_rcu(&pid->rcu, delayed_put_pid);
295 2911 : }
296 :
297 2993 : struct pid *alloc_pid(struct pid_namespace *ns)
298 : {
299 : struct pid *pid;
300 : enum pid_type type;
301 : int i, nr;
302 : struct pid_namespace *tmp;
303 : struct upid *upid;
304 :
305 2993 : pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
306 2993 : if (!pid)
307 : goto out;
308 :
309 : tmp = ns;
310 2993 : pid->level = ns->level;
311 5986 : for (i = ns->level; i >= 0; i--) {
312 2993 : nr = alloc_pidmap(tmp);
313 2993 : if (nr < 0)
314 : goto out_free;
315 :
316 2993 : pid->numbers[i].nr = nr;
317 2993 : pid->numbers[i].ns = tmp;
318 2993 : tmp = tmp->parent;
319 : }
320 :
321 2993 : if (unlikely(is_child_reaper(pid))) {
322 1 : if (pid_ns_prepare_proc(ns))
323 : goto out_free;
324 : }
325 :
326 : get_pid_ns(ns);
327 2993 : atomic_set(&pid->count, 1);
328 11972 : for (type = 0; type < PIDTYPE_MAX; ++type)
329 8979 : INIT_HLIST_HEAD(&pid->tasks[type]);
330 :
331 2993 : upid = pid->numbers + ns->level;
332 : spin_lock_irq(&pidmap_lock);
333 2993 : if (!(ns->nr_hashed & PIDNS_HASH_ADDING))
334 : goto out_unlock;
335 2993 : for ( ; upid >= pid->numbers; --upid) {
336 5986 : hlist_add_head_rcu(&upid->pid_chain,
337 5986 : &pid_hash[pid_hashfn(upid->nr, upid->ns)]);
338 2993 : upid->ns->nr_hashed++;
339 : }
340 : spin_unlock_irq(&pidmap_lock);
341 :
342 : out:
343 2993 : return pid;
344 :
345 : out_unlock:
346 : spin_unlock_irq(&pidmap_lock);
347 0 : put_pid_ns(ns);
348 :
349 : out_free:
350 0 : while (++i <= ns->level)
351 0 : free_pidmap(pid->numbers + i);
352 :
353 0 : kmem_cache_free(ns->pid_cachep, pid);
354 : pid = NULL;
355 0 : goto out;
356 : }
357 :
358 0 : void disable_pid_allocation(struct pid_namespace *ns)
359 : {
360 : spin_lock_irq(&pidmap_lock);
361 0 : ns->nr_hashed &= ~PIDNS_HASH_ADDING;
362 : spin_unlock_irq(&pidmap_lock);
363 0 : }
364 :
365 5215 : struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
366 : {
367 : struct upid *pnr;
368 :
369 10487 : hlist_for_each_entry_rcu(pnr,
370 : &pid_hash[pid_hashfn(nr, ns)], pid_chain)
371 4798 : if (pnr->nr == nr && pnr->ns == ns)
372 4741 : return container_of(pnr, struct pid,
373 : numbers[ns->level]);
374 :
375 : return NULL;
376 : }
377 : EXPORT_SYMBOL_GPL(find_pid_ns);
378 :
379 2738 : struct pid *find_vpid(int nr)
380 : {
381 5476 : return find_pid_ns(nr, task_active_pid_ns(current));
382 : }
383 : EXPORT_SYMBOL_GPL(find_vpid);
384 :
385 : /*
386 : * attach_pid() must be called with the tasklist_lock write-held.
387 : */
388 8964 : void attach_pid(struct task_struct *task, enum pid_type type)
389 : {
390 9197 : struct pid_link *link = &task->pids[type];
391 9197 : hlist_add_head_rcu(&link->node, &link->pid->tasks[type]);
392 8964 : }
393 :
394 8975 : static void __change_pid(struct task_struct *task, enum pid_type type,
395 : struct pid *new)
396 : {
397 : struct pid_link *link;
398 : struct pid *pid;
399 : int tmp;
400 :
401 8975 : link = &task->pids[type];
402 8975 : pid = link->pid;
403 :
404 : hlist_del_rcu(&link->node);
405 8975 : link->pid = new;
406 :
407 28223 : for (tmp = PIDTYPE_MAX; --tmp >= 0; )
408 32676 : if (!hlist_empty(&pid->tasks[tmp]))
409 8975 : return;
410 :
411 2910 : free_pid(pid);
412 : }
413 :
414 8742 : void detach_pid(struct task_struct *task, enum pid_type type)
415 : {
416 8742 : __change_pid(task, type, NULL);
417 8742 : }
418 :
419 233 : void change_pid(struct task_struct *task, enum pid_type type,
420 : struct pid *pid)
421 : {
422 233 : __change_pid(task, type, pid);
423 : attach_pid(task, type);
424 233 : }
425 :
426 : /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
427 0 : void transfer_pid(struct task_struct *old, struct task_struct *new,
428 : enum pid_type type)
429 : {
430 0 : new->pids[type].pid = old->pids[type].pid;
431 0 : hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
432 0 : }
433 :
434 5289 : struct task_struct *pid_task(struct pid *pid, enum pid_type type)
435 : {
436 : struct task_struct *result = NULL;
437 17296 : if (pid) {
438 : struct hlist_node *first;
439 17262 : first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
440 : lockdep_tasklist_lock_is_held());
441 17262 : if (first)
442 17176 : result = hlist_entry(first, struct task_struct, pids[(type)].node);
443 : }
444 5289 : return result;
445 : }
446 : EXPORT_SYMBOL(pid_task);
447 :
448 : /*
449 : * Must be called under rcu_read_lock().
450 : */
451 842 : struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
452 : {
453 : rcu_lockdep_assert(rcu_read_lock_held(),
454 : "find_task_by_pid_ns() needs rcu_read_lock()"
455 : " protection");
456 1684 : return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
457 : }
458 :
459 656 : struct task_struct *find_task_by_vpid(pid_t vnr)
460 : {
461 1312 : return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
462 : }
463 :
464 4610 : struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
465 : {
466 : struct pid *pid;
467 : rcu_read_lock();
468 4610 : if (type != PIDTYPE_PID)
469 0 : task = task->group_leader;
470 4610 : pid = get_pid(task->pids[type].pid);
471 : rcu_read_unlock();
472 4610 : return pid;
473 : }
474 : EXPORT_SYMBOL_GPL(get_task_pid);
475 :
476 11165 : struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
477 : {
478 : struct task_struct *result;
479 : rcu_read_lock();
480 : result = pid_task(pid, type);
481 11165 : if (result)
482 11165 : get_task_struct(result);
483 : rcu_read_unlock();
484 11165 : return result;
485 : }
486 : EXPORT_SYMBOL_GPL(get_pid_task);
487 :
488 1699 : struct pid *find_get_pid(pid_t nr)
489 : {
490 : struct pid *pid;
491 :
492 : rcu_read_lock();
493 1699 : pid = get_pid(find_vpid(nr));
494 : rcu_read_unlock();
495 :
496 1699 : return pid;
497 : }
498 : EXPORT_SYMBOL_GPL(find_get_pid);
499 :
500 2201 : pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
501 : {
502 : struct upid *upid;
503 : pid_t nr = 0;
504 :
505 251168 : if (pid && ns->level <= pid->level) {
506 250885 : upid = &pid->numbers[ns->level];
507 250885 : if (upid->ns == ns)
508 250885 : nr = upid->nr;
509 : }
510 2201 : return nr;
511 : }
512 : EXPORT_SYMBOL_GPL(pid_nr_ns);
513 :
514 235122 : pid_t pid_vnr(struct pid *pid)
515 : {
516 470244 : return pid_nr_ns(pid, task_active_pid_ns(current));
517 : }
518 : EXPORT_SYMBOL_GPL(pid_vnr);
519 :
520 12588 : pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
521 : struct pid_namespace *ns)
522 : {
523 : pid_t nr = 0;
524 :
525 : rcu_read_lock();
526 12588 : if (!ns)
527 6061 : ns = task_active_pid_ns(current);
528 12588 : if (likely(pid_alive(task))) {
529 12588 : if (type != PIDTYPE_PID)
530 1412 : task = task->group_leader;
531 12588 : nr = pid_nr_ns(task->pids[type].pid, ns);
532 : }
533 : rcu_read_unlock();
534 :
535 12588 : return nr;
536 : }
537 : EXPORT_SYMBOL(__task_pid_nr_ns);
538 :
539 1257 : pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
540 : {
541 1257 : return pid_nr_ns(task_tgid(tsk), ns);
542 : }
543 : EXPORT_SYMBOL(task_tgid_nr_ns);
544 :
545 10955 : struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
546 : {
547 10955 : return ns_of_pid(task_pid(tsk));
548 : }
549 : EXPORT_SYMBOL_GPL(task_active_pid_ns);
550 :
551 : /*
552 : * Used by proc to find the first pid that is greater than or equal to nr.
553 : *
554 : * If there is a pid at nr this function is exactly the same as find_pid_ns.
555 : */
556 1211 : struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
557 : {
558 : struct pid *pid;
559 :
560 : do {
561 1635 : pid = find_pid_ns(nr, ns);
562 1635 : if (pid)
563 : break;
564 440 : nr = next_pidmap(ns, nr);
565 440 : } while (nr > 0);
566 :
567 1211 : return pid;
568 : }
569 :
570 : /*
571 : * The pid hash table is scaled according to the amount of memory in the
572 : * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
573 : * more.
574 : */
575 1 : void __init pidhash_init(void)
576 : {
577 : unsigned int i, pidhash_size;
578 :
579 1 : pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
580 : HASH_EARLY | HASH_SMALL,
581 : &pidhash_shift, NULL,
582 : 0, 4096);
583 1 : pidhash_size = 1U << pidhash_shift;
584 :
585 2049 : for (i = 0; i < pidhash_size; i++)
586 2048 : INIT_HLIST_HEAD(&pid_hash[i]);
587 1 : }
588 :
589 1 : void __init pidmap_init(void)
590 : {
591 : /* Veryify no one has done anything silly */
592 : BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING);
593 :
594 : /* bump default and minimum pid_max based on number of cpus */
595 1 : pid_max = min(pid_max_max, max_t(int, pid_max,
596 : PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
597 1 : pid_max_min = max_t(int, pid_max_min,
598 : PIDS_PER_CPU_MIN * num_possible_cpus());
599 1 : pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
600 :
601 1 : init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
602 : /* Reserve PID 0. We never call free_pidmap(0) */
603 : set_bit(0, init_pid_ns.pidmap[0].page);
604 : atomic_dec(&init_pid_ns.pidmap[0].nr_free);
605 :
606 1 : init_pid_ns.pid_cachep = KMEM_CACHE(pid,
607 : SLAB_HWCACHE_ALIGN | SLAB_PANIC);
608 1 : }
|