diff -urN -X dontdiff linux/drivers/char/mem.c vmalloc/drivers/char/mem.c
--- linux/drivers/char/mem.c	Thu Dec 16 21:59:38 1999
+++ vmalloc/drivers/char/mem.c	Wed Dec 22 09:02:18 1999
@@ -226,7 +226,8 @@
 {
 	unsigned long p = *ppos;
 	ssize_t read = 0;
-	ssize_t virtr;
+	ssize_t virtr = 0;
+	char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
 		
 	if (p < (unsigned long) high_memory) { 
 		read = count;
@@ -238,22 +239,45 @@
 		if (p < PAGE_SIZE && read > 0) {
 			size_t tmp = PAGE_SIZE - p;
 			if (tmp > read) tmp = read;
-			clear_user(buf, tmp);
+			if (clear_user(buf, tmp))
+				return -EFAULT;
 			buf += tmp;
 			p += tmp;
 			read -= tmp;
 			count -= tmp;
 		}
 #endif
-		copy_to_user(buf, (char *)p, read);
+		if(copy_to_user(buf, (char *)p, read))
+			return -EFAULT;
 		p += read;
 		buf += read;
 		count -= read;
 	}
 
-	virtr = vread(buf, (char *)p, count);
-	if (virtr < 0)
-		return virtr;
+	kbuf = (char *)get_free_page(GFP_KERNEL);
+	if (!kbuf)
+		return -ENOMEM;
+	while (count > 0) {
+		int len = count;
+		
+		if (len > PAGE_SIZE)
+			len = PAGE_SIZE;
+
+		len = vread(kbuf, (char *)p, len);
+		if (len < 0) {
+			free_page((unsigned long)kbuf);
+			return len;
+		}
+		if (copy_to_user(buf, kbuf, len)) {
+			free_page((unsigned long)kbuf);
+			return -EFAULT;
+		}
+		count -= len;
+		buf += len;
+		virtr += len;
+		p += len;
+	}
+	free_page((unsigned long)kbuf);
 	*ppos += p + virtr;
 	return virtr + read;
 }
diff -urN -X dontdiff linux/fs/proc/kcore.c vmalloc/fs/proc/kcore.c
--- linux/fs/proc/kcore.c	Wed Dec 22 08:54:59 1999
+++ vmalloc/fs/proc/kcore.c	Wed Dec 22 09:02:18 1999
@@ -325,13 +325,12 @@
 	size_t elf_buflen;
 	int num_vma;
 
-	/* XXX we need to somehow lock vmlist between here
-	 * and after elf_kcore_store_hdr() returns.
-	 * For now assume that num_vma does not change (TA)
-	 */
+	read_lock(&vmlist_lock);
 	proc_root_kcore.size = size = get_kcore_size(&num_vma, &elf_buflen);
-	if (buflen == 0 || *fpos >= size)
+	if (buflen == 0 || *fpos >= size) {
+		read_unlock(&vmlist_lock);
 		return 0;
+	}
 
 	/* trim buflen to not go beyond EOF */
 	if (buflen > size - *fpos)
@@ -345,10 +344,13 @@
 		if (buflen < tsz)
 			tsz = buflen;
 		elf_buf = kmalloc(elf_buflen, GFP_ATOMIC);
-		if (!elf_buf)
+		if (!elf_buf) {
+			read_unlock(&vmlist_lock);
 			return -ENOMEM;
+		}
 		memset(elf_buf, 0, elf_buflen);
 		elf_kcore_store_hdr(elf_buf, num_vma, elf_buflen);
+		read_unlock(&vmlist_lock);
 		if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
 			kfree(elf_buf);
 			return -EFAULT;
@@ -362,7 +364,8 @@
 		/* leave now if filled buffer already */
 		if (buflen == 0)
 			return acc;
-	}
+	} else
+		read_unlock(&vmlist_lock);
 
 	/* where page 0 not mapped, write zeros into buffer */
 #if defined (__i386__) || defined (__mc68000__)
diff -urN -X dontdiff linux/include/linux/vmalloc.h vmalloc/include/linux/vmalloc.h
--- linux/include/linux/vmalloc.h	Tue Dec 21 00:01:18 1999
+++ vmalloc/include/linux/vmalloc.h	Wed Dec 22 09:05:56 1999
@@ -3,6 +3,7 @@
 
 #include <linux/sched.h>
 #include <linux/mm.h>
+#include <linux/spinlock.h>
 
 #include <asm/pgtable.h>
 
@@ -24,6 +25,10 @@
 void vmfree_area_pages(unsigned long address, unsigned long size);
 int vmalloc_area_pages(unsigned long address, unsigned long size);
 
+/* vmlist_lock is a read-write spinlock that protects vmlist 
+ * Currently (2.3.34) used in mm/vmalloc.c and fs/proc/kcore.c.
+ */
+extern rwlock_t vmlist_lock;
 extern struct vm_struct * vmlist;
 #endif
 
diff -urN -X dontdiff linux/ipc/util.c vmalloc/ipc/util.c
--- linux/ipc/util.c	Tue Dec 14 18:01:21 1999
+++ vmalloc/ipc/util.c	Wed Dec 22 11:03:35 1999
@@ -161,25 +161,19 @@
 void* ipc_alloc(int size)
 {
 	void* out;
-	if(size > PAGE_SIZE) {
-		lock_kernel();
+	if(size > PAGE_SIZE)
 		out = vmalloc(size);
-		unlock_kernel();
-	} else {
+	else
 		out = kmalloc(size, GFP_KERNEL);
-	}
 	return out;
 }
 
 void ipc_free(void* ptr, int size)
 {
-	if(size > PAGE_SIZE) {
-		lock_kernel();
+	if(size > PAGE_SIZE)
 		vfree(ptr);
-		unlock_kernel();
-	} else {
+	else
 		kfree(ptr);
-	}
 }
 
 /* 
diff -urN -X dontdiff linux/mm/vmalloc.c vmalloc/mm/vmalloc.c
--- linux/mm/vmalloc.c	Sat Nov 20 18:09:05 1999
+++ vmalloc/mm/vmalloc.c	Wed Dec 22 09:03:36 1999
@@ -3,14 +3,17 @@
  *
  *  Copyright (C) 1993  Linus Torvalds
  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ *  SMP-threaded vmalloc/vfree/ioremap, Tigran Aivazian, Dec 1999
  */
 
 #include <linux/malloc.h>
 #include <linux/vmalloc.h>
+#include <linux/spinlock.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgalloc.h>
 
+rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
 struct vm_struct * vmlist = NULL;
 
 static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size)
@@ -162,11 +165,13 @@
 	if (!area)
 		return NULL;
 	addr = VMALLOC_START;
+	write_lock(&vmlist_lock);
 	for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
 		if (size + addr < (unsigned long) tmp->addr)
 			break;
 		addr = tmp->size + (unsigned long) tmp->addr;
 		if (addr > VMALLOC_END-size) {
+			write_unlock(&vmlist_lock);
 			kfree(area);
 			return NULL;
 		}
@@ -176,6 +181,7 @@
 	area->size = size + PAGE_SIZE;
 	area->next = *p;
 	*p = area;
+	write_unlock(&vmlist_lock);
 	return area;
 }
 
@@ -189,14 +195,17 @@
 		printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
 		return;
 	}
+	write_lock(&vmlist_lock);
 	for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
 		if (tmp->addr == addr) {
 			*p = tmp->next;
 			vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
+			write_unlock(&vmlist_lock);
 			kfree(tmp);
 			return;
 		}
 	}
+	write_unlock(&vmlist_lock);
 	printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr);
 }
 
@@ -224,16 +233,17 @@
 	return addr;
 }
 
-long vread(char *buf, char *addr, unsigned long count)
+long vread(char *kbuf, char *addr, unsigned long count)
 {
 	struct vm_struct *tmp;
-	char *vaddr, *buf_start = buf;
+	char *vaddr, *buf_start = kbuf;
 	unsigned long n;
 
 	/* Don't allow overflow */
 	if ((unsigned long) addr + count < count)
 		count = -(unsigned long) addr;
 
+	read_lock(&vmlist_lock);
 	for (tmp = vmlist; tmp; tmp = tmp->next) {
 		vaddr = (char *) tmp->addr;
 		if (addr >= vaddr + tmp->size - PAGE_SIZE)
@@ -241,8 +251,8 @@
 		while (addr < vaddr) {
 			if (count == 0)
 				goto finished;
-			put_user('\0', buf);
-			buf++;
+			*kbuf = '\0';
+			kbuf++;
 			addr++;
 			count--;
 		}
@@ -250,12 +260,13 @@
 		do {
 			if (count == 0)
 				goto finished;
-			put_user(*addr, buf);
-			buf++;
+			*kbuf = *addr;
+			kbuf++;
 			addr++;
 			count--;
 		} while (--n > 0);
 	}
 finished:
-	return buf - buf_start;
+	read_unlock(&vmlist_lock);
+	return kbuf - buf_start;
 }
