patch-2.4.0-test7 linux/kernel/fork.c
Next file: linux/kernel/ksyms.c
Previous file: linux/kernel/exit.c
Back to the patch index
Back to the overall index
- Lines: 133
- Date:
Wed Aug 23 11:33:48 2000
- Orig file:
v2.4.0-test6/linux/kernel/fork.c
- Orig date:
Wed Aug 9 19:19:51 2000
diff -u --recursive --new-file v2.4.0-test6/linux/kernel/fork.c linux/kernel/fork.c
@@ -32,9 +32,6 @@
unsigned long total_forks; /* Handle normal Linux uptimes. */
int last_pid;
-/* SLAB cache for mm_struct's. */
-kmem_cache_t *mm_cachep;
-
struct task_struct *pidhash[PIDHASH_SZ];
void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
@@ -314,18 +311,19 @@
tsk->mm = mm;
tsk->active_mm = mm;
- /*
- * child gets a private LDT (if there was an LDT in the parent)
- */
- copy_segments(tsk, mm);
-
down(¤t->mm->mmap_sem);
retval = dup_mmap(mm);
up(¤t->mm->mmap_sem);
if (retval)
goto free_pt;
- init_new_context(tsk,mm);
+ /*
+ * child gets a private LDT (if there was an LDT in the parent)
+ */
+ copy_segments(tsk, mm);
+
+ if (init_new_context(tsk,mm))
+ goto free_pt;
good_mm:
tsk->mm = mm;
@@ -340,7 +338,7 @@
static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old)
{
- struct fs_struct *fs = kmalloc(sizeof(*old), GFP_KERNEL);
+ struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
/* We don't need to lock fs - think why ;-) */
if (fs) {
atomic_set(&fs->count, 1);
@@ -506,7 +504,7 @@
atomic_inc(¤t->sig->count);
return 0;
}
- tsk->sig = kmalloc(sizeof(*tsk->sig), GFP_KERNEL);
+ tsk->sig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL);
if (!tsk->sig)
return -1;
spin_lock_init(&tsk->sig->siglock);
@@ -553,8 +551,6 @@
*p = *current;
- lock_kernel();
-
retval = -EAGAIN;
if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur)
goto bad_fork_free;
@@ -671,11 +667,12 @@
nr_threads++;
write_unlock_irq(&tasklist_lock);
+ if (p->ptrace & PT_PTRACED)
+ send_sig(SIGSTOP, p, 1);
+
wake_up_process(p); /* do this last */
++total_forks;
-bad_fork:
- unlock_kernel();
fork_out:
if ((clone_flags & CLONE_VFORK) && (retval > 0))
down(&sem);
@@ -696,5 +693,53 @@
free_uid(p->user);
bad_fork_free:
free_task_struct(p);
- goto bad_fork;
+ goto fork_out;
+}
+
+/* SLAB cache for signal_struct structures (tsk->sig) */
+kmem_cache_t *sigact_cachep;
+
+/* SLAB cache for files_struct structures (tsk->files) */
+kmem_cache_t *files_cachep;
+
+/* SLAB cache for fs_struct structures (tsk->fs) */
+kmem_cache_t *fs_cachep;
+
+/* SLAB cache for vm_area_struct structures */
+kmem_cache_t *vm_area_cachep;
+
+/* SLAB cache for mm_struct structures (tsk->mm) */
+kmem_cache_t *mm_cachep;
+
+void __init proc_caches_init(void)
+{
+ sigact_cachep = kmem_cache_create("signal_act",
+ sizeof(struct signal_struct), 0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if (!sigact_cachep)
+ panic("Cannot create signal action SLAB cache");
+
+ files_cachep = kmem_cache_create("files_cache",
+ sizeof(struct files_struct), 0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if (!files_cachep)
+ panic("Cannot create files SLAB cache");
+
+ fs_cachep = kmem_cache_create("fs_cache",
+ sizeof(struct fs_struct), 0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if (!fs_cachep)
+ panic("Cannot create fs_struct SLAB cache");
+
+ vm_area_cachep = kmem_cache_create("vm_area_struct",
+ sizeof(struct vm_area_struct), 0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if(!vm_area_cachep)
+ panic("vma_init: Cannot alloc vm_area_struct SLAB cache");
+
+ mm_cachep = kmem_cache_create("mm_struct",
+ sizeof(struct mm_struct), 0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if(!mm_cachep)
+ panic("vma_init: Cannot alloc mm_struct SLAB cache");
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)