patch-2.4.0-test6 linux/mm/slab.c
Next file: linux/mm/swap_state.c
Previous file: linux/mm/page_io.c
Back to the patch index
Back to the overall index
- Lines: 303
- Date:
Mon Aug 7 21:01:36 2000
- Orig file:
v2.4.0-test5/linux/mm/slab.c
- Orig date:
Fri Jul 14 12:12:16 2000
diff -u --recursive --new-file v2.4.0-test5/linux/mm/slab.c linux/mm/slab.c
@@ -43,16 +43,13 @@
* of the entries in the array are given back into the global cache.
* This reduces the number of spinlock operations.
*
- * The c_cpuarray can be changed with a smp_call_function call,
- * it may not be read with enabled local interrupts.
+ * The c_cpuarray may not be read with enabled local interrupts.
*
* SMP synchronization:
* constructors and destructors are called without any locking.
* Several members in kmem_cache_t and slab_t never change, they
* are accessed without any locking.
* The per-cpu arrays are never accessed from the wrong cpu, no locking.
- * smp_call_function() is used if one cpu must flush the arrays from
- * other cpus.
* The non-constant members are protected with a per-cache irq spinlock.
*
* Further notes from the original documentation:
@@ -372,7 +369,6 @@
*/
static int g_cpucache_up;
-static void drain_cache (void *__cachep);
static void enable_cpucache (kmem_cache_t *cachep);
static void enable_all_cpucaches (void);
#endif
@@ -463,14 +459,17 @@
} while (sizes->cs_size);
}
-void __init kmem_cpucache_init(void)
+int __init kmem_cpucache_init(void)
{
#ifdef CONFIG_SMP
g_cpucache_up = 1;
enable_all_cpucaches();
#endif
+ return 0;
}
+__initcall(kmem_cpucache_init);
+
/* Interface to system's page allocator. No need to hold the cache-lock.
*/
static inline void * kmem_getpages (kmem_cache_t *cachep, unsigned long flags)
@@ -497,7 +496,7 @@
static inline void kmem_freepages (kmem_cache_t *cachep, void *addr)
{
unsigned long i = (1<<cachep->gfporder);
- struct page *page = mem_map + MAP_NR(addr);
+ struct page *page = virt_to_page(addr);
/* free_pages() does not clear the type bit - we do that.
* The pages have been unlinked from their cache-slab,
@@ -838,17 +837,50 @@
return ret;
}
+#ifdef CONFIG_SMP
+static DECLARE_MUTEX(cache_drain_sem);
+static kmem_cache_t *cache_to_drain = NULL;
+static DECLARE_WAIT_QUEUE_HEAD(cache_drain_wait);
+unsigned long slab_cache_drain_mask;
+
+static void drain_cpu_caches(kmem_cache_t *cachep)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ unsigned long cpu_mask = 0;
+ int i;
+
+ for (i = 0; i < smp_num_cpus; i++)
+ cpu_mask |= (1UL << cpu_logical_map(i));
+
+ down(&cache_drain_sem);
+
+ cache_to_drain = cachep;
+ slab_cache_drain_mask = cpu_mask;
+
+ slab_drain_local_cache();
+
+ add_wait_queue(&cache_drain_wait, &wait);
+ current->state = TASK_UNINTERRUPTIBLE;
+ while (slab_cache_drain_mask != 0UL)
+ schedule();
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&cache_drain_wait, &wait);
+
+ cache_to_drain = NULL;
+
+ up(&cache_drain_sem);
+}
+#else
+#define drain_cpu_caches(cachep) do { } while (0)
+#endif
+
static int __kmem_cache_shrink(kmem_cache_t *cachep)
{
slab_t *slabp;
int ret;
-#ifdef CONFIG_SMP
- smp_call_function(drain_cache, cachep, 1, 1);
- local_irq_disable();
- drain_cache(cachep);
- local_irq_enable();
-#endif
+ drain_cpu_caches(cachep);
+
spin_lock_irq(&cachep->spinlock);
/* If the cache is growing, stop shrinking. */
@@ -1083,7 +1115,7 @@
/* Nasty!!!!!! I hope this is OK. */
i = 1 << cachep->gfporder;
- page = mem_map + MAP_NR(objp);
+ page = virt_to_page(objp);
do {
SET_PAGE_CACHE(page, cachep);
SET_PAGE_SLAB(page, slabp);
@@ -1289,9 +1321,9 @@
*/
#if DEBUG
-# define CHECK_NR(nr) \
+# define CHECK_NR(pg) \
do { \
- if (nr >= max_mapnr) { \
+ if (!VALID_PAGE(pg)) { \
printk(KERN_ERR "kfree: out of range ptr %lxh.\n", \
(unsigned long)objp); \
BUG(); \
@@ -1299,6 +1331,7 @@
} while (0)
# define CHECK_PAGE(page) \
do { \
+ CHECK_NR(page); \
if (!PageSlab(page)) { \
printk(KERN_ERR "kfree: bad ptr %lxh.\n", \
(unsigned long)objp); \
@@ -1307,23 +1340,21 @@
} while (0)
#else
-# define CHECK_NR(nr) do { } while (0)
-# define CHECK_PAGE(nr) do { } while (0)
+# define CHECK_PAGE(pg) do { } while (0)
#endif
static inline void kmem_cache_free_one(kmem_cache_t *cachep, void *objp)
{
slab_t* slabp;
- CHECK_NR(MAP_NR(objp));
- CHECK_PAGE(mem_map + MAP_NR(objp));
+ CHECK_PAGE(virt_to_page(objp));
/* reduces memory footprint
*
if (OPTIMIZE(cachep))
slabp = (void*)((unsigned long)objp&(~(PAGE_SIZE-1)));
else
*/
- slabp = GET_PAGE_SLAB(mem_map + MAP_NR(objp));
+ slabp = GET_PAGE_SLAB(virt_to_page(objp));
#if DEBUG
if (cachep->flags & SLAB_DEBUG_INITIAL)
@@ -1420,8 +1451,7 @@
#ifdef CONFIG_SMP
cpucache_t *cc = cc_data(cachep);
- CHECK_NR(MAP_NR(objp));
- CHECK_PAGE(mem_map + MAP_NR(objp));
+ CHECK_PAGE(virt_to_page(objp));
if (cc) {
int batchcount;
if (cc->avail < cc->limit) {
@@ -1504,9 +1534,8 @@
{
unsigned long flags;
#if DEBUG
- CHECK_NR(MAP_NR(objp));
- CHECK_PAGE(mem_map + MAP_NR(objp));
- if (cachep != GET_PAGE_CACHE(mem_map + MAP_NR(objp)))
+ CHECK_PAGE(virt_to_page(objp));
+ if (cachep != GET_PAGE_CACHE(virt_to_page(objp)))
BUG();
#endif
@@ -1530,9 +1559,8 @@
if (!objp)
return;
local_irq_save(flags);
- CHECK_NR(MAP_NR(objp));
- CHECK_PAGE(mem_map + MAP_NR(objp));
- c = GET_PAGE_CACHE(mem_map + MAP_NR(objp));
+ CHECK_PAGE(virt_to_page(objp));
+ c = GET_PAGE_CACHE(virt_to_page(objp));
__kmem_cache_free(c, (void*)objp);
local_irq_restore(flags);
}
@@ -1554,36 +1582,67 @@
}
#ifdef CONFIG_SMP
-/*
- * called with local interrupts disabled
- */
-static void drain_cache (void* __cachep)
+
+typedef struct ccupdate_struct_s
{
- kmem_cache_t *cachep = __cachep;
- cpucache_t *cc = cc_data(cachep);
+ kmem_cache_t *cachep;
+ cpucache_t *new[NR_CPUS];
+} ccupdate_struct_t;
- if (cc && cc->avail) {
- free_block(cachep, cc_entry(cc), cc->avail);
- cc->avail = 0;
+static ccupdate_struct_t *ccupdate_state = NULL;
+
+/* Called from per-cpu timer interrupt. */
+void slab_drain_local_cache(void)
+{
+ local_irq_disable();
+ if (ccupdate_state != NULL) {
+ ccupdate_struct_t *new = ccupdate_state;
+ cpucache_t *old = cc_data(new->cachep);
+
+ cc_data(new->cachep) = new->new[smp_processor_id()];
+ new->new[smp_processor_id()] = old;
+ } else {
+ kmem_cache_t *cachep = cache_to_drain;
+ cpucache_t *cc = cc_data(cachep);
+
+ if (cc && cc->avail) {
+ free_block(cachep, cc_entry(cc), cc->avail);
+ cc->avail = 0;
+ }
}
+ local_irq_enable();
+
+ clear_bit(smp_processor_id(), &slab_cache_drain_mask);
+ if (slab_cache_drain_mask == 0)
+ wake_up(&cache_drain_wait);
}
-typedef struct ccupdate_struct_s
+static void do_ccupdate(ccupdate_struct_t *data)
{
- kmem_cache_t* cachep;
- cpucache_t* new[NR_CPUS];
-} ccupdate_struct_t;
+ DECLARE_WAITQUEUE(wait, current);
+ unsigned long cpu_mask = 0;
+ int i;
-/*
- * called with local interrupts disabled
- */
-static void ccupdate_callback (void* __new)
-{
- ccupdate_struct_t* new = __new;
- cpucache_t *old = cc_data(new->cachep);
+ for (i = 0; i < smp_num_cpus; i++)
+ cpu_mask |= (1UL << cpu_logical_map(i));
+
+ down(&cache_drain_sem);
- cc_data(new->cachep) = new->new[smp_processor_id()];
- new->new[smp_processor_id()] = old;
+ ccupdate_state = data;
+ slab_cache_drain_mask = cpu_mask;
+
+ slab_drain_local_cache();
+
+ add_wait_queue(&cache_drain_wait, &wait);
+ current->state = TASK_UNINTERRUPTIBLE;
+ while (slab_cache_drain_mask != 0UL)
+ schedule();
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&cache_drain_wait, &wait);
+
+ ccupdate_state = NULL;
+
+ up(&cache_drain_sem);
}
/* called with cache_chain_sem acquired. */
@@ -1624,10 +1683,7 @@
cachep->batchcount = batchcount;
spin_unlock_irq(&cachep->spinlock);
- smp_call_function(ccupdate_callback,&new,1,1);
- local_irq_disable();
- ccupdate_callback(&new);
- local_irq_enable();
+ do_ccupdate(&new);
for (i = 0; i < smp_num_cpus; i++) {
cpucache_t* ccold = new.new[cpu_logical_map(i)];
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)