tree wide: use kvfree() than conditional kfree()/vfree()
There are many locations that do
if (memory_was_allocated_by_vmalloc)
vfree(ptr);
else
kfree(ptr);
but kvfree() can handle both kmalloc()ed memory and vmalloc()ed memory
using is_vmalloc_addr(). Unless callers have special reasons, we can
replace this branch with kvfree(). Please check and reply if you found
problems.
Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Jan Kara <jack@suse.com>
Acked-by: Russell King <rmk+kernel@arm.linux.org.uk>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Acked-by: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Acked-by: David Rientjes <rientjes@google.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Oleg Drokin <oleg.drokin@intel.com>
Cc: Boris Petkov <bp@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
eab95db69d
commit
1d5cfdb076
@@ -364,12 +364,9 @@ static void bm_free_pages(struct page **pages, unsigned long number)
|
||||
}
|
||||
}
|
||||
|
||||
static void bm_vk_free(void *ptr, int v)
|
||||
static inline void bm_vk_free(void *ptr)
|
||||
{
|
||||
if (v)
|
||||
vfree(ptr);
|
||||
else
|
||||
kfree(ptr);
|
||||
kvfree(ptr);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -379,7 +376,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
|
||||
{
|
||||
struct page **old_pages = b->bm_pages;
|
||||
struct page **new_pages, *page;
|
||||
unsigned int i, bytes, vmalloced = 0;
|
||||
unsigned int i, bytes;
|
||||
unsigned long have = b->bm_number_of_pages;
|
||||
|
||||
BUG_ON(have == 0 && old_pages != NULL);
|
||||
@@ -401,7 +398,6 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
|
||||
PAGE_KERNEL);
|
||||
if (!new_pages)
|
||||
return NULL;
|
||||
vmalloced = 1;
|
||||
}
|
||||
|
||||
if (want >= have) {
|
||||
@@ -411,7 +407,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
|
||||
page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
|
||||
if (!page) {
|
||||
bm_free_pages(new_pages + have, i - have);
|
||||
bm_vk_free(new_pages, vmalloced);
|
||||
bm_vk_free(new_pages);
|
||||
return NULL;
|
||||
}
|
||||
/* we want to know which page it is
|
||||
@@ -427,11 +423,6 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
|
||||
*/
|
||||
}
|
||||
|
||||
if (vmalloced)
|
||||
b->bm_flags |= BM_P_VMALLOCED;
|
||||
else
|
||||
b->bm_flags &= ~BM_P_VMALLOCED;
|
||||
|
||||
return new_pages;
|
||||
}
|
||||
|
||||
@@ -469,7 +460,7 @@ void drbd_bm_cleanup(struct drbd_device *device)
|
||||
if (!expect(device->bitmap))
|
||||
return;
|
||||
bm_free_pages(device->bitmap->bm_pages, device->bitmap->bm_number_of_pages);
|
||||
bm_vk_free(device->bitmap->bm_pages, (BM_P_VMALLOCED & device->bitmap->bm_flags));
|
||||
bm_vk_free(device->bitmap->bm_pages);
|
||||
kfree(device->bitmap);
|
||||
device->bitmap = NULL;
|
||||
}
|
||||
@@ -643,7 +634,6 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi
|
||||
unsigned long want, have, onpages; /* number of pages */
|
||||
struct page **npages, **opages = NULL;
|
||||
int err = 0, growing;
|
||||
int opages_vmalloced;
|
||||
|
||||
if (!expect(b))
|
||||
return -ENOMEM;
|
||||
@@ -656,8 +646,6 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi
|
||||
if (capacity == b->bm_dev_capacity)
|
||||
goto out;
|
||||
|
||||
opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags);
|
||||
|
||||
if (capacity == 0) {
|
||||
spin_lock_irq(&b->bm_lock);
|
||||
opages = b->bm_pages;
|
||||
@@ -671,7 +659,7 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi
|
||||
b->bm_dev_capacity = 0;
|
||||
spin_unlock_irq(&b->bm_lock);
|
||||
bm_free_pages(opages, onpages);
|
||||
bm_vk_free(opages, opages_vmalloced);
|
||||
bm_vk_free(opages);
|
||||
goto out;
|
||||
}
|
||||
bits = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT));
|
||||
@@ -744,7 +732,7 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi
|
||||
|
||||
spin_unlock_irq(&b->bm_lock);
|
||||
if (opages != npages)
|
||||
bm_vk_free(opages, opages_vmalloced);
|
||||
bm_vk_free(opages);
|
||||
if (!growing)
|
||||
b->bm_set = bm_count_bits(b);
|
||||
drbd_info(device, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
|
||||
|
||||
@@ -536,9 +536,6 @@ struct drbd_bitmap; /* opaque for drbd_device */
|
||||
/* definition of bits in bm_flags to be used in drbd_bm_lock
|
||||
* and drbd_bitmap_io and friends. */
|
||||
enum bm_flag {
|
||||
/* do we need to kfree, or vfree bm_pages? */
|
||||
BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */
|
||||
|
||||
/* currently locked for bulk operation */
|
||||
BM_LOCKED_MASK = 0xf,
|
||||
|
||||
|
||||
Reference in New Issue
Block a user