patch-2.4.0-test3 linux/include/linux/sched.h

Next file: linux/include/linux/skbuff.h
Previous file: linux/include/linux/quotaops.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.4.0-test2/linux/include/linux/sched.h linux/include/linux/sched.h
@@ -248,7 +248,9 @@
 struct user_struct;
 
 struct task_struct {
-/* these are hardcoded - don't touch */
+	/*
+	 * offsets of these are hardcoded elsewhere - touch with care
+	 */
 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
 	unsigned long flags;	/* per process flags, defined below */
 	int sigpending;
@@ -258,21 +260,29 @@
 					 */
 	struct exec_domain *exec_domain;
 	volatile long need_resched;
+	unsigned long ptrace;
+
+	int lock_depth;		/* Lock depth */
 
-	cycles_t avg_slice;
-	int lock_depth;		/* Lock depth. We can context switch in and out of holding a syscall kernel lock... */	
-/* begin intel cache line */
+/*
+ * offset 32 begins here on 32-bit platforms. We keep
+ * all fields in a single cacheline that are needed for
+ * the goodness() loop in schedule().
+ */
 	long counter;
-	long priority;
+	long nice;
 	unsigned long policy;
-/* memory management info */
-	struct mm_struct *mm, *active_mm;
-	int has_cpu;
-	int processor;
-	unsigned long ptrace;
+	struct mm_struct *mm;
+	int has_cpu, processor;
+	unsigned long cpus_allowed;
+	/*
+	 * (only the 'next' pointer fits into the cacheline, but
+	 * that's just fine.)
+	 */
 	struct list_head run_list;
+
 	struct task_struct *next_task, *prev_task;
-	int last_processor;
+	struct mm_struct *active_mm;
 
 /* task state */
 	struct linux_binfmt *binfmt;
@@ -380,7 +390,9 @@
  */
 #define _STK_LIM	(8*1024*1024)
 
-#define DEF_PRIORITY	(20*HZ/100)	/* 200 ms time slices */
+#define DEF_COUNTER	(10*HZ/100)	/* 100 ms time slice */
+#define MAX_COUNTER	(20*HZ/100)
+#define DEF_NICE	(0)
 
 /*
  *  INIT_TASK is used to set up the first task table, touch at
@@ -394,11 +406,12 @@
     addr_limit:		KERNEL_DS,					\
     exec_domain:	&default_exec_domain,				\
     lock_depth:		-1,						\
-    counter:		DEF_PRIORITY,					\
-    priority:		DEF_PRIORITY,					\
+    counter:		DEF_COUNTER,					\
+    nice:		DEF_NICE,					\
     policy:		SCHED_OTHER,					\
     mm:			NULL,						\
     active_mm:		&init_mm,					\
+    cpus_allowed:	-1,						\
     run_list:		LIST_HEAD_INIT(tsk.run_list),			\
     next_task:		&tsk,						\
     prev_task:		&tsk,						\
@@ -848,6 +861,25 @@
 static inline void task_unlock(struct task_struct *p)
 {
 	spin_unlock(&p->alloc_lock);
+}
+
+/* write full pathname into buffer and return start of pathname */
+static inline char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
+				char *buf, int buflen)
+{
+	char *res;
+	struct vfsmount *rootmnt;
+	struct dentry *root;
+	read_lock(&current->fs->lock);
+	rootmnt = mntget(current->fs->rootmnt);
+	root = dget(current->fs->root);
+	read_unlock(&current->fs->lock);
+	spin_lock(&dcache_lock);
+	res = __d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
+	spin_unlock(&dcache_lock);
+	dput(root);
+	mntput(rootmnt);
+	return res;
 }
 
 #endif /* __KERNEL__ */

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)