Merge branch 'for-4.5/core' of git://git.kernel.dk/linux-block
Pull core block updates from Jens Axboe:
"We don't have a lot of core changes this time around, it's mostly in
drivers, which will come in a subsequent pull.
The cores changes include:
- blk-mq
- Prep patch from Christoph, changing blk_mq_alloc_request() to
take flags instead of just using gfp_t for sleep/nosleep.
- Doc patch from me, clarifying the difference between legacy
and blk-mq for timer usage.
- Fixes from Raghavendra for memory-less numa nodes, and a reuse
of CPU masks.
- Cleanup from Geliang Tang, using offset_in_page() instead of open
coding it.
- From Ilya, rename request_queue slab to it reflects what it holds,
and a fix for proper use of bdgrab/put.
- A real fix for the split across stripe boundaries from Keith. We
yanked a broken version of this from 4.4-rc final, this one works.
- From Mike Krinkin, emit a trace message when we split.
- From Wei Tang, two small cleanups, not explicitly clearing memory
that is already cleared"
* 'for-4.5/core' of git://git.kernel.dk/linux-block:
block: use bd{grab,put}() instead of open-coding
block: split bios to max possible length
block: add call to split trace point
blk-mq: Avoid memoryless numa node encoded in hctx numa_node
blk-mq: Reuse hardware context cpumask for tags
blk-mq: add a flags parameter to blk_mq_alloc_request
Revert "blk-flush: Queue through IO scheduler when flush not required"
block: clarify blk_add_timer() use case for blk-mq
bio: use offset_in_page macro
block: do not initialise statics to 0 or NULL
block: do not initialise globals to 0 or NULL
block: rename request_queue slab cache
This commit is contained in:
@@ -51,7 +51,7 @@ DEFINE_IDA(blk_queue_ida);
|
||||
/*
|
||||
* For the allocated request tables
|
||||
*/
|
||||
struct kmem_cache *request_cachep = NULL;
|
||||
struct kmem_cache *request_cachep;
|
||||
|
||||
/*
|
||||
* For queue allocation
|
||||
@@ -646,7 +646,7 @@ struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_alloc_queue);
|
||||
|
||||
int blk_queue_enter(struct request_queue *q, gfp_t gfp)
|
||||
int blk_queue_enter(struct request_queue *q, bool nowait)
|
||||
{
|
||||
while (true) {
|
||||
int ret;
|
||||
@@ -654,7 +654,7 @@ int blk_queue_enter(struct request_queue *q, gfp_t gfp)
|
||||
if (percpu_ref_tryget_live(&q->q_usage_counter))
|
||||
return 0;
|
||||
|
||||
if (!gfpflags_allow_blocking(gfp))
|
||||
if (nowait)
|
||||
return -EBUSY;
|
||||
|
||||
ret = wait_event_interruptible(q->mq_freeze_wq,
|
||||
@@ -1292,7 +1292,9 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
|
||||
struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
|
||||
{
|
||||
if (q->mq_ops)
|
||||
return blk_mq_alloc_request(q, rw, gfp_mask, false);
|
||||
return blk_mq_alloc_request(q, rw,
|
||||
(gfp_mask & __GFP_DIRECT_RECLAIM) ?
|
||||
0 : BLK_MQ_REQ_NOWAIT);
|
||||
else
|
||||
return blk_old_get_request(q, rw, gfp_mask);
|
||||
}
|
||||
@@ -2060,8 +2062,7 @@ blk_qc_t generic_make_request(struct bio *bio)
|
||||
do {
|
||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||
|
||||
if (likely(blk_queue_enter(q, __GFP_DIRECT_RECLAIM) == 0)) {
|
||||
|
||||
if (likely(blk_queue_enter(q, false) == 0)) {
|
||||
ret = q->make_request_fn(q, bio);
|
||||
|
||||
blk_queue_exit(q);
|
||||
@@ -3534,7 +3535,7 @@ int __init blk_dev_init(void)
|
||||
request_cachep = kmem_cache_create("blkdev_requests",
|
||||
sizeof(struct request), 0, SLAB_PANIC, NULL);
|
||||
|
||||
blk_requestq_cachep = kmem_cache_create("blkdev_queue",
|
||||
blk_requestq_cachep = kmem_cache_create("request_queue",
|
||||
sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
|
||||
|
||||
return 0;
|
||||
|
||||
Reference in New Issue
Block a user