patch-2.4.0-test6 linux/kernel/fork.c

Next file: linux/kernel/kmod.c
Previous file: linux/kernel/exit.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.4.0-test5/linux/kernel/fork.c linux/kernel/fork.c
@@ -37,125 +37,6 @@
 
 struct task_struct *pidhash[PIDHASH_SZ];
 
-/* UID task count cache, to prevent walking entire process list every
- * single fork() operation.
- */
-#define UIDHASH_SZ	(PIDHASH_SZ >> 2)
-
-static struct user_struct {
-	atomic_t count;
-	struct user_struct *next, **pprev;
-	unsigned int uid;
-} *uidhash[UIDHASH_SZ];
-
-spinlock_t uidhash_lock = SPIN_LOCK_UNLOCKED;
-
-kmem_cache_t *uid_cachep;
-
-#define uidhashfn(uid)	(((uid >> 8) ^ uid) & (UIDHASH_SZ - 1))
-
-/*
- * These routines must be called with the uidhash spinlock held!
- */
-static inline void uid_hash_insert(struct user_struct *up, unsigned int hashent)
-{
-	if((up->next = uidhash[hashent]) != NULL)
-		uidhash[hashent]->pprev = &up->next;
-	up->pprev = &uidhash[hashent];
-	uidhash[hashent] = up;
-}
-
-static inline void uid_hash_remove(struct user_struct *up)
-{
-	if(up->next)
-		up->next->pprev = up->pprev;
-	*up->pprev = up->next;
-}
-
-static inline struct user_struct *uid_hash_find(unsigned short uid, unsigned int hashent)
-{
-	struct user_struct *up, *next;
-
-	next = uidhash[hashent];
-	for (;;) {
-		up = next;
-		if (next) {
-			next = up->next;
-			if (up->uid != uid)
-				continue;
-			atomic_inc(&up->count);
-		}
-		break;
-	}
-	return up;
-}
-
-/*
- * For SMP, we need to re-test the user struct counter
- * after having aquired the spinlock. This allows us to do
- * the common case (not freeing anything) without having
- * any locking.
- */
-#ifdef CONFIG_SMP
-  #define uid_hash_free(up)	(!atomic_read(&(up)->count))
-#else
-  #define uid_hash_free(up)	(1)
-#endif
-
-void free_uid(struct task_struct *p)
-{
-	struct user_struct *up = p->user;
-
-	if (up) {
-		p->user = NULL;
-		if (atomic_dec_and_test(&up->count)) {
-			spin_lock(&uidhash_lock);
-			if (uid_hash_free(up)) {
-				uid_hash_remove(up);
-				kmem_cache_free(uid_cachep, up);
-			}
-			spin_unlock(&uidhash_lock);
-		}
-	}
-}
-
-int alloc_uid(struct task_struct *p)
-{
-	unsigned int hashent = uidhashfn(p->uid);
-	struct user_struct *up;
-
-	spin_lock(&uidhash_lock);
-	up = uid_hash_find(p->uid, hashent);
-	spin_unlock(&uidhash_lock);
-
-	if (!up) {
-		struct user_struct *new;
-
-		new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL);
-		if (!new)
-			return -EAGAIN;
-		new->uid = p->uid;
-		atomic_set(&new->count, 1);
-
-		/*
-		 * Before adding this, check whether we raced
-		 * on adding the same user already..
-		 */
-		spin_lock(&uidhash_lock);
-		up = uid_hash_find(p->uid, hashent);
-		if (up) {
-			kmem_cache_free(uid_cachep, new);
-		} else {
-			uid_hash_insert(new, hashent);
-			up = new;
-		}
-		spin_unlock(&uidhash_lock);
-
-	}
-	p->user = up;
-	return 0;
-}
-
 void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
 {
 	unsigned long flags;
@@ -185,17 +66,6 @@
 
 void __init fork_init(unsigned long mempages)
 {
-	int i;
-
-	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
-				       0,
-				       SLAB_HWCACHE_ALIGN, NULL, NULL);
-	if(!uid_cachep)
-		panic("Cannot create uid taskcount SLAB cache\n");
-
-	for(i = 0; i < UIDHASH_SZ; i++)
-		uidhash[i] = 0;
-
 	/*
 	 * The default maximum number of threads is set to a safe
 	 * value: the thread structures can take up at most half
@@ -258,10 +128,17 @@
 	struct vm_area_struct * mpnt, *tmp, **pprev;
 	int retval;
 
-	/* Kill me slowly. UGLY! FIXME! */
-	memcpy(&mm->start_code, &current->mm->start_code, 15*sizeof(unsigned long));
-
 	flush_cache_mm(current->mm);
+	mm->locked_vm = 0;
+	mm->mmap = NULL;
+	mm->mmap_avl = NULL;
+	mm->mmap_cache = NULL;
+	mm->map_count = 0;
+	mm->context = 0;
+	mm->cpu_vm_mask = 0;
+	mm->swap_cnt = 0;
+	mm->swap_address = 0;
+	mm->segments = NULL;
 	pprev = &mm->mmap;
 	for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) {
 		struct file *file;
@@ -319,6 +196,22 @@
 	return retval;
 }
 
+#define allocate_mm()	(kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
+
+static struct mm_struct * mm_init(struct mm_struct * mm)
+{
+	atomic_set(&mm->mm_users, 1);
+	atomic_set(&mm->mm_count, 1);
+	init_MUTEX(&mm->mmap_sem);
+	mm->page_table_lock = SPIN_LOCK_UNLOCKED;
+	mm->pgd = pgd_alloc();
+	if (mm->pgd)
+		return mm;
+	kmem_cache_free(mm_cachep, mm);
+	return NULL;
+}
+	
+
 /*
  * Allocate and initialize an mm_struct.
  */
@@ -326,17 +219,10 @@
 {
 	struct mm_struct * mm;
 
-	mm = kmem_cache_alloc(mm_cachep, SLAB_KERNEL);
+	mm = allocate_mm();
 	if (mm) {
 		memset(mm, 0, sizeof(*mm));
-		atomic_set(&mm->mm_users, 1);
-		atomic_set(&mm->mm_count, 1);
-		init_MUTEX(&mm->mmap_sem);
-		mm->page_table_lock = SPIN_LOCK_UNLOCKED;
-		mm->pgd = pgd_alloc();
-		if (mm->pgd)
-			return mm;
-		kmem_cache_free(mm_cachep, mm);
+		return mm_init(mm);
 	}
 	return NULL;
 }
@@ -416,10 +302,15 @@
 	}
 
 	retval = -ENOMEM;
-	mm = mm_alloc();
+	mm = allocate_mm();
 	if (!mm)
 		goto fail_nomem;
 
+	/* Copy the current MM stuff.. */
+	memcpy(mm, current->mm, sizeof(*mm));
+	if (!mm_init(mm))
+		goto fail_nomem;
+
 	tsk->mm = mm;
 	tsk->active_mm = mm;
 
@@ -434,10 +325,11 @@
 	if (retval)
 		goto free_pt;
 
+	init_new_context(tsk,mm);
+
 good_mm:
 	tsk->mm = mm;
 	tsk->active_mm = mm;
-	init_new_context(tsk,mm);
 	return 0;
 
 free_pt:
@@ -664,11 +556,10 @@
 	lock_kernel();
 
 	retval = -EAGAIN;
-	if (p->user) {
-		if (atomic_read(&p->user->count) >= p->rlim[RLIMIT_NPROC].rlim_cur)
-			goto bad_fork_free;
-		atomic_inc(&p->user->count);
-	}
+	if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur)
+		goto bad_fork_free;
+	atomic_inc(&p->user->__count);
+	atomic_inc(&p->user->processes);
 
 	/*
 	 * Counter increases are protected by
@@ -801,8 +692,8 @@
 	if (p->binfmt && p->binfmt->module)
 		__MOD_DEC_USE_COUNT(p->binfmt->module);
 bad_fork_cleanup_count:
-	if (p->user)
-		free_uid(p);
+	atomic_dec(&p->user->processes);
+	free_uid(p->user);
 bad_fork_free:
 	free_task_struct(p);
 	goto bad_fork;

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)