Merge branch 'for-linus' of ../linux-2.6-block into block-for-2.6.39/core

This merge creates two set of conflicts.  One is simple context
conflicts caused by removal of throtl_scheduled_delayed_work() in
for-linus and removal of throtl_shutdown_timer_wq() in
for-2.6.39/core.

The other is caused by commit 255bb490c8 (block: blk-flush shouldn't
call directly into q->request_fn() __blk_run_queue()) in for-linus
crashing with FLUSH reimplementation in for-2.6.39/core.  The conflict
isn't trivial but the resolution is straight-forward.

* __blk_run_queue() calls in flush_end_io() and flush_data_end_io()
  should be called with @force_kblockd set to %true.

* elv_insert() in blk_kick_flush() should use
  %ELEVATOR_INSERT_REQUEUE.

Both changes are to avoid invoking ->request_fn() directly from
request completion path and closely match the changes in the commit
255bb490c8.

Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
Tejun Heo
2011-03-04 19:09:02 +01:00
209 changed files with 1842 additions and 735 deletions

View File

@@ -212,9 +212,14 @@ static void flush_end_io(struct request *flush_rq, int error)
queued |= blk_flush_complete_seq(rq, seq, error);
}
/* after populating an empty queue, kick it to avoid stall */
/*
* Moving a request silently to empty queue_head may stall the
* queue. Kick the queue in those cases. This function is called
* from request completion path and calling directly into
* request_fn may confuse the driver. Always use kblockd.
*/
if (queued && was_empty)
__blk_run_queue(q);
__blk_run_queue(q, true);
}
/**
@@ -257,7 +262,7 @@ static bool blk_kick_flush(struct request_queue *q)
q->flush_rq.end_io = flush_end_io;
q->flush_pending_idx ^= 1;
elv_insert(q, &q->flush_rq, ELEVATOR_INSERT_FRONT);
elv_insert(q, &q->flush_rq, ELEVATOR_INSERT_REQUEUE);
return true;
}
@@ -266,9 +271,12 @@ static void flush_data_end_io(struct request *rq, int error)
struct request_queue *q = rq->q;
bool was_empty = elv_queue_empty(q);
/* after populating an empty queue, kick it to avoid stall */
/*
* After populating an empty queue, kick it to avoid stall. Read
* the comment in flush_end_io().
*/
if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error) && was_empty)
__blk_run_queue(q);
__blk_run_queue(q, true);
}
/**