patch-2.2.13 linux/mm/page_alloc.c

Next file: linux/net/802/tr.c
Previous file: linux/mm/mmap.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.2.12/linux/mm/page_alloc.c linux/mm/page_alloc.c
@@ -35,17 +35,19 @@
 #else
 #define NR_MEM_LISTS 10
 #endif
+#define NR_MEM_TYPES 2		/* GFP_DMA vs not for now. */
 
 /* The start of this MUST match the start of "struct page" */
 struct free_area_struct {
 	struct page *next;
 	struct page *prev;
 	unsigned int * map;
+	unsigned long count;
 };
 
 #define memory_head(x) ((struct page *)(x))
 
-static struct free_area_struct free_area[NR_MEM_LISTS];
+static struct free_area_struct free_area[NR_MEM_TYPES][NR_MEM_LISTS];
 
 static inline void init_mem_queue(struct free_area_struct * head)
 {
@@ -61,6 +63,7 @@
 	entry->next = next;
 	next->prev = entry;
 	head->next = entry;
+	head->count++;
 }
 
 static inline void remove_mem_queue(struct page * entry)
@@ -90,9 +93,9 @@
  */
 spinlock_t page_alloc_lock = SPIN_LOCK_UNLOCKED;
 
-static inline void free_pages_ok(unsigned long map_nr, unsigned long order)
+static inline void free_pages_ok(unsigned long map_nr, unsigned long order, unsigned type)
 {
-	struct free_area_struct *area = free_area + order;
+	struct free_area_struct *area = free_area[type] + order;
 	unsigned long index = map_nr >> (1 + order);
 	unsigned long mask = (~0UL) << order;
 	unsigned long flags;
@@ -106,6 +109,7 @@
 	while (mask + (1 << (NR_MEM_LISTS-1))) {
 		if (!test_and_change_bit(index, area->map))
 			break;
+		area->count--;
 		remove_mem_queue(list(map_nr ^ -mask));
 		mask <<= 1;
 		area++;
@@ -119,33 +123,28 @@
 	spin_unlock_irqrestore(&page_alloc_lock, flags);
 }
 
-void __free_page(struct page *page)
+static inline void __free_pages(struct page *page, unsigned long order)
 {
 	if (!PageReserved(page) && atomic_dec_and_test(&page->count)) {
 		if (PageSwapCache(page))
 			panic ("Freeing swap cache page");
 		page->flags &= ~(1 << PG_referenced);
-		free_pages_ok(page - mem_map, 0);
+		free_pages_ok(page - mem_map, order, PageDMA(page) ? 1 : 0);
 		return;
 	}
 }
 
+void __free_page(struct page *page)
+{
+	__free_pages(page, 0);
+}
+
 void free_pages(unsigned long addr, unsigned long order)
 {
 	unsigned long map_nr = MAP_NR(addr);
 
-	if (map_nr < max_mapnr) {
-		mem_map_t * map = mem_map + map_nr;
-		if (PageReserved(map))
-			return;
-		if (atomic_dec_and_test(&map->count)) {
-			if (PageSwapCache(map))
-				panic ("Freeing swap cache pages");
-			map->flags &= ~(1 << PG_referenced);
-			free_pages_ok(map_nr, order);
-			return;
-		}
-	}
+	if (map_nr < max_mapnr)
+		__free_pages(mem_map + map_nr, order);
 }
 
 /*
@@ -153,52 +152,9 @@
  */
 #define MARK_USED(index, order, area) \
 	change_bit((index) >> (1+(order)), (area)->map)
-#define CAN_DMA(x) (PageDMA(x))
 #define ADDRESS(x) (PAGE_OFFSET + ((x) << PAGE_SHIFT))
-#define RMQUEUE_DMA(order) \
-do { struct free_area_struct * area = free_area+order; \
-     unsigned long new_order = order; \
-	do { struct page *prev = memory_head(area), *ret = prev->next; \
-		while (memory_head(area) != ret) { \
-			if (CAN_DMA(ret)) { \
-				unsigned long map_nr; \
-				(prev->next = ret->next)->prev = prev; \
-				map_nr = ret - mem_map; \
-				MARK_USED(map_nr, new_order, area); \
-				nr_free_pages -= 1 << order; \
-				EXPAND(ret, map_nr, order, new_order, area); \
-				spin_unlock_irqrestore(&page_alloc_lock, flags); \
-				return ADDRESS(map_nr); \
-			} \
-			prev = ret; \
-			ret = ret->next; \
-		} \
-		new_order++; area++; \
-	} while (new_order < NR_MEM_LISTS); \
-} while (0)
-#define RMQUEUE_NODMA(order) \
-do { struct free_area_struct * area = free_area+order; \
-     unsigned long new_order = order; \
-	do { struct page *prev = memory_head(area), *ret = prev->next; \
-		while (memory_head(area) != ret) { \
-			if (!CAN_DMA(ret)) { \
-				unsigned long map_nr; \
-				(prev->next = ret->next)->prev = prev; \
-				map_nr = ret - mem_map; \
-				MARK_USED(map_nr, new_order, area); \
-				nr_free_pages -= 1 << order; \
-				EXPAND(ret, map_nr, order, new_order, area); \
-				spin_unlock_irqrestore(&page_alloc_lock, flags); \
-				return ADDRESS(map_nr); \
-			} \
-			prev = ret; \
-			ret = ret->next; \
-		} \
-		new_order++; area++; \
-	} while (new_order < NR_MEM_LISTS); \
-} while (0)
-#define RMQUEUE_ANY(order) \
-do { struct free_area_struct * area = free_area+order; \
+#define RMQUEUE_TYPE(order, type) \
+do { struct free_area_struct * area = free_area[type]+order; \
      unsigned long new_order = order; \
 	do { struct page *prev = memory_head(area), *ret = prev->next; \
 		if (memory_head(area) != ret) { \
@@ -207,10 +163,10 @@
 			map_nr = ret - mem_map; \
 			MARK_USED(map_nr, new_order, area); \
 			nr_free_pages -= 1 << order; \
+			area->count--; \
 			EXPAND(ret, map_nr, order, new_order, area); \
 			spin_unlock_irqrestore(&page_alloc_lock, flags); \
 			return ADDRESS(map_nr); \
-		 \
 		} \
 		new_order++; area++; \
 	} while (new_order < NR_MEM_LISTS); \
@@ -275,12 +231,10 @@
 	}
 ok_to_allocate:
 	spin_lock_irqsave(&page_alloc_lock, flags);
-	if (gfp_mask & __GFP_DMA)
-		RMQUEUE_DMA(order);
-	else {
-		RMQUEUE_NODMA(order);
-		RMQUEUE_ANY(order);
-	}
+	/* if it's not a dma request, try non-dma first */
+	if (!(gfp_mask & __GFP_DMA))
+		RMQUEUE_TYPE(order, 0);
+	RMQUEUE_TYPE(order, 1);
 	spin_unlock_irqrestore(&page_alloc_lock, flags);
 
 	/*
@@ -305,26 +259,27 @@
 void show_free_areas(void)
 {
  	unsigned long order, flags;
- 	unsigned long total = 0;
+	unsigned type;
 
+	spin_lock_irqsave(&page_alloc_lock, flags);
 	printk("Free pages:      %6dkB\n ( ",nr_free_pages<<(PAGE_SHIFT-10));
 	printk("Free: %d (%d %d %d)\n",
 		nr_free_pages,
 		freepages.min,
 		freepages.low,
 		freepages.high);
-	spin_lock_irqsave(&page_alloc_lock, flags);
- 	for (order=0 ; order < NR_MEM_LISTS; order++) {
-		struct page * tmp;
-		unsigned long nr = 0;
-		for (tmp = free_area[order].next ; tmp != memory_head(free_area+order) ; tmp = tmp->next) {
-			nr ++;
+	for (type = 0; type < NR_MEM_TYPES; type++) {
+ 		unsigned long total = 0;
+		printk("%sDMA: ", type ? "" : "Non");
+ 		for (order=0 ; order < NR_MEM_LISTS; order++) {
+			unsigned long nr = free_area[type][order].count;
+
+			total += nr * ((PAGE_SIZE>>10) << order);
+			printk("%lu*%lukB ", nr, (unsigned long)((PAGE_SIZE>>10) << order));
 		}
-		total += nr * ((PAGE_SIZE>>10) << order);
-		printk("%lu*%lukB ", nr, (unsigned long)((PAGE_SIZE>>10) << order));
+		printk("= %lukB)\n", total);
 	}
 	spin_unlock_irqrestore(&page_alloc_lock, flags);
-	printk("= %lukB)\n", total);
 #ifdef SWAP_CACHE_INFO
 	show_swap_cache_info();
 #endif	
@@ -341,8 +296,7 @@
 unsigned long __init free_area_init(unsigned long start_mem, unsigned long end_mem)
 {
 	mem_map_t * p;
-	unsigned long mask = PAGE_MASK;
-	unsigned long i;
+	unsigned long i, j;
 
 	/*
 	 * Select nr of pages we try to keep free for important stuff
@@ -369,17 +323,20 @@
 		p->flags = (1 << PG_DMA) | (1 << PG_reserved);
 	} while (p > mem_map);
 
-	for (i = 0 ; i < NR_MEM_LISTS ; i++) {
-		unsigned long bitmap_size;
-		init_mem_queue(free_area+i);
-		mask += mask;
-		end_mem = (end_mem + ~mask) & mask;
-		bitmap_size = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT + i);
-		bitmap_size = (bitmap_size + 7) >> 3;
-		bitmap_size = LONG_ALIGN(bitmap_size);
-		free_area[i].map = (unsigned int *) start_mem;
-		memset((void *) start_mem, 0, bitmap_size);
-		start_mem += bitmap_size;
+	for (j = 0; j < NR_MEM_TYPES; j++) {
+		unsigned long mask = PAGE_MASK;
+		for (i = 0 ; i < NR_MEM_LISTS ; i++) {
+			unsigned long bitmap_size;
+			init_mem_queue(free_area[j]+i);
+			mask += mask;
+			end_mem = (end_mem + ~mask) & mask;
+			bitmap_size = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT + i);
+			bitmap_size = (bitmap_size + 7) >> 3;
+			bitmap_size = LONG_ALIGN(bitmap_size);
+			free_area[j][i].map = (unsigned int *) start_mem;
+			memset((void *) start_mem, 0, bitmap_size);
+			start_mem += bitmap_size;
+		}
 	}
 	return start_mem;
 }

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)