patch-2.4.21 linux-2.4.21/arch/x86_64/kernel/sys_x86_64.c

Next file: linux-2.4.21/arch/x86_64/kernel/syscall.c
Previous file: linux-2.4.21/arch/x86_64/kernel/smpboot.c
Back to the patch index
Back to the overall index

diff -urN linux-2.4.20/arch/x86_64/kernel/sys_x86_64.c linux-2.4.21/arch/x86_64/kernel/sys_x86_64.c
@@ -65,23 +65,34 @@
 	return error;
 }
 
+
 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
 {
 	struct vm_area_struct *vma;
 	unsigned long end = TASK_SIZE;
 
-	if (current->thread.flags & THREAD_IA32)
-		flags |= MAP_32BIT; 
-	if (flags & MAP_32BIT)
-		end = 0xffffffff-1;
-	if (len > end)
-		return -ENOMEM;
-	if (!addr) { 
-		addr = TASK_UNMAPPED_64;
-		if (flags & MAP_32BIT) {
+	if (current->thread.flags & THREAD_IA32) {
+		if (!addr) 
 			addr = TASK_UNMAPPED_32;
+		end = 0xffff0000;
+	} else if (flags & MAP_32BIT) { 
+		/* This is usually used needed to map code in small
+		   model: it needs to be in the first 31bit. Limit it
+		   to that.  This means we need to move the unmapped
+		   base down for this case.  This may give conflicts
+		   with the heap, but we assume that malloc falls back
+		   to mmap. Give it 1GB of playground for now. -AK */ 
+		if (!addr) 
+			addr = 0x40000000; 
+		end = 0x80000000;		
+	} else { 
+		if (!addr) 
+			addr = TASK_UNMAPPED_64; 
+		end = TASK_SIZE; 
 		}
-	} 
+
+	if (len > end)
+		return -ENOMEM;
 	addr = PAGE_ALIGN(addr);
 
 	for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)