patch-2.4.0-test3 linux/drivers/block/ll_rw_blk.c

Next file: linux/drivers/block/loop.c
Previous file: linux/drivers/block/floppy.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.4.0-test2/linux/drivers/block/ll_rw_blk.c linux/drivers/block/ll_rw_blk.c
@@ -37,6 +37,8 @@
 extern int mac_floppy_init(void);
 #endif
 
+extern int lvm_init(void);
+
 /*
  * For the allocated request tables
  */
@@ -148,34 +150,40 @@
 	return ret;
 }
 
-/*
- * Hopefully the low level driver has finished any out standing requests
- * first...
- */
-void blk_cleanup_queue(request_queue_t * q)
+static int __block_cleanup_queue(struct list_head *head)
 {
 	struct list_head *entry;
 	struct request *rq;
-	int i = QUEUE_NR_REQUESTS;
+	int i = 0;
 
-	if (list_empty(&q->request_freelist))
-		return;
-
-	if (q->queue_requests)
-		BUG();
+	if (list_empty(head))
+		return 0;
 
-	entry = &q->request_freelist;
-	entry = entry->next;
+	entry = head->next;
 	do {
 		rq = list_entry(entry, struct request, table);
 		entry = entry->next;
 		list_del(&rq->table);
 		kmem_cache_free(request_cachep, rq);
-		i--;
-	} while (!list_empty(&q->request_freelist));
+		i++;
+	} while (!list_empty(head));
 
-	if (i)
-		printk("blk_cleanup_queue: leaked requests (%d)\n", i);
+	return i;
+}
+
+/*
+ * Hopefully the low level driver has finished any out standing requests
+ * first...
+ */
+void blk_cleanup_queue(request_queue_t * q)
+{
+	int count = QUEUE_NR_REQUESTS;
+
+	count -= __block_cleanup_queue(&q->request_freelist[READ]);
+	count -= __block_cleanup_queue(&q->request_freelist[WRITE]);
+
+	if (count)
+		printk("blk_cleanup_queue: leaked requests (%d)\n", count);
 
 	memset(q, 0, sizeof(*q));
 }
@@ -280,10 +288,9 @@
 	for (i = 0; i < QUEUE_NR_REQUESTS; i++) {
 		rq = kmem_cache_alloc(request_cachep, SLAB_KERNEL);
 		rq->rq_status = RQ_INACTIVE;
-		list_add(&rq->table, &q->request_freelist);
+		list_add(&rq->table, &q->request_freelist[i & 1]);
 	}
 
-	q->queue_requests = 0;
 	init_waitqueue_head(&q->wait_for_request);
 	spin_lock_init(&q->request_lock);
 }
@@ -291,7 +298,8 @@
 void blk_init_queue(request_queue_t * q, request_fn_proc * rfn)
 {
 	INIT_LIST_HEAD(&q->queue_head);
-	INIT_LIST_HEAD(&q->request_freelist);
+	INIT_LIST_HEAD(&q->request_freelist[READ]);
+	INIT_LIST_HEAD(&q->request_freelist[WRITE]);
 	elevator_init(&q->elevator, ELEVATOR_LINUS);
 	blk_init_free_list(q);
 	q->request_fn     	= rfn;
@@ -342,25 +350,37 @@
  */
 static inline struct request *get_request(request_queue_t *q, int rw)
 {
-	register struct request *rq = NULL;
-
-	if (!list_empty(&q->request_freelist)) {
-		elevator_t *e = &q->elevator;
+	struct list_head *list = &q->request_freelist[rw];
+	struct request *rq;
 
-		if ((q->queue_requests > QUEUE_WRITES_MAX) && (rw == WRITE))
-			return NULL;
+	/*
+	 * Reads get preferential treatment and are allowed to steal
+	 * from the write free list if necessary.
+	 */
+	if (!list_empty(list)) {
+		rq = blkdev_free_rq(list);
+		goto got_rq;
+	}
 
-		rq = blkdev_free_rq(&q->request_freelist);
-		list_del(&rq->table);
-		rq->rq_status = RQ_ACTIVE;
-		rq->special = NULL;
-		rq->q = q;
-		if (rq->cmd == READ)
-			rq->elevator_sequence = e->read_latency;
-		else
-			rq->elevator_sequence = e->write_latency;
-		q->queue_requests++;
+	/*
+	 * if the WRITE list is non-empty, we know that rw is READ
+	 * and that the READ list is empty. allow reads to 'steal'
+	 * from the WRITE list.
+	 */
+	if (!list_empty(&q->request_freelist[WRITE])) {
+		list = &q->request_freelist[WRITE];
+		rq = blkdev_free_rq(list);
+		goto got_rq;
 	}
+
+	return NULL;
+
+got_rq:
+	list_del(&rq->table);
+	rq->free_list = list;
+	rq->rq_status = RQ_ACTIVE;
+	rq->special = NULL;
+	rq->q = q;
 	return rq;
 }
 
@@ -492,9 +512,9 @@
 	/*
 	 * Request may not have originated from ll_rw_blk
 	 */
-	if (req->q) {
-		list_add(&req->table, &req->q->request_freelist);
-		req->q->queue_requests--;
+	if (req->free_list) {
+		list_add(&req->table, req->free_list);
+		req->free_list = NULL;
 		wake_up(&req->q->wait_for_request);
 	}
 }
@@ -563,7 +583,7 @@
 	int max_segments = MAX_SEGMENTS;
 	struct request * req = NULL;
 	int rw_ahead, max_sectors, el_ret;
-	struct list_head *head = &q->queue_head;
+	struct list_head *head;
 	int latency;
 	elevator_t *elevator = &q->elevator;
 
@@ -608,12 +628,6 @@
 				goto end_io;	/* Hmmph! Nothing to write */
 			refile_buffer(bh);
 		do_write:
-			/*
-			 * We don't allow the write-requests to fill up the
-			 * queue completely:  we want some room for reads,
-			 * as they take precedence. The last third of the
-			 * requests are only for reads.
-			 */
 			kstat.pgpgout++;
 			break;
 		default:
@@ -652,6 +666,13 @@
 	spin_lock_irq(&io_request_lock);
 	elevator_default_debug(q, bh->b_rdev);
 
+	/*
+	 * skip first entry, for devices with active queue head
+	 */
+	head = &q->queue_head;
+	if (q->head_active && !q->plugged)
+		head = head->next;
+
 	if (list_empty(head)) {
 		q->plug_device_fn(q, bh->b_rdev); /* is atomic */
 		goto get_rq;
@@ -709,12 +730,12 @@
 
 		req = __get_request_wait(q, rw);
 		spin_lock_irq(&io_request_lock);
+		
+		head = &q->queue_head;
+		if (q->head_active && !q->plugged)
+			head = head->next;
 	}
 
-	head = &q->queue_head;
-	if (q->head_active && !q->plugged)
-		head = head->next;
-
 /* fill up the request-info, and add it to the queue */
 	req->cmd = rw;
 	req->errors = 0;
@@ -842,7 +863,6 @@
 sorry:
 	for (i = 0; i < nr; i++)
 		buffer_IO_error(bhs[i]);
-	return;
 }
 
 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)