@@ -972,7 +972,7 @@ static void disk_release(struct device *dev)
{
struct gendisk *disk = dev_to_disk(dev);
- kfree(disk->random);
+ _kfree(disk->random);
disk_replace_part_tbl(disk, NULL);
free_part_stats(&disk->part0);
kfree(disk);
@@ -2957,7 +2957,7 @@ static int try_smi_init(struct smi_info *new_smi)
if (new_smi->si_sm) {
if (new_smi->handlers)
new_smi->handlers->cleanup(new_smi->si_sm);
- kfree(new_smi->si_sm);
+ _kfree(new_smi->si_sm);
}
if (new_smi->addr_source_cleanup)
new_smi->addr_source_cleanup(new_smi);
@@ -3120,7 +3120,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
to_clean->handlers->cleanup(to_clean->si_sm);
- kfree(to_clean->si_sm);
+ _kfree(to_clean->si_sm);
if (to_clean->addr_source_cleanup)
to_clean->addr_source_cleanup(to_clean);
@@ -160,7 +160,7 @@ static int dmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
static struct class dmi_class = {
.name = "dmi",
- .dev_release = (void(*)(struct device *)) kfree,
+ .dev_release = (void(*)(struct device *)) _kfree,
.dev_uevent = dmi_dev_uevent,
};
@@ -76,7 +76,7 @@ int intel_ddc_get_modes(struct intel_output *intel_output)
drm_mode_connector_update_edid_property(&intel_output->base,
edid);
ret = drm_add_edid_modes(&intel_output->base, edid);
- kfree(edid);
+ _kfree(edid);
}
return ret;
@@ -736,7 +736,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
* directly here. (Probably a little bit obfuscating
* but legitime ...).
*/
- dev->release = (void (*)(struct device *))kfree;
+ dev->release = (void (*)(struct device *))_kfree;
} else
return -ENOMEM;
ret = device_register(dev);
@@ -1745,7 +1745,7 @@ static int netiucv_register_device(struct net_device *ndev)
* directly here. (Probably a little bit obfuscating
* but legitime ...).
*/
- dev->release = (void (*)(struct device *))kfree;
+ dev->release = (void (*)(struct device *))_kfree;
dev->driver = &netiucv_driver;
} else
return -ENOMEM;
@@ -215,7 +215,7 @@ static char *savemem(struct nfsd4_compoundargs *argp, __be32 *p, int nbytes)
BUG_ON(p != argp->tmpp);
argp->tmpp = NULL;
}
- if (defer_free(argp, kfree, p)) {
+ if (defer_free(argp, _kfree, p)) {
kfree(p);
return NULL;
} else
@@ -292,7 +292,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, struct iattr *ia
host_err = -ENOMEM;
goto out_nfserr;
}
- defer_free(argp, kfree, *acl);
+ defer_free(argp, _kfree, *acl);
(*acl)->naces = nace;
for (ace = (*acl)->aces; ace < (*acl)->aces + nace; ace++) {
@@ -526,6 +526,11 @@ struct sysinfo {
aren't permitted). */
#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1)
+/* If the type of pointer x matches type, force a compilation error,
+ otherwise produce x as a result */
+#define PTR_OR_BB_ON_TYPE(x, type) (&x[sizeof(char[0 - \
+ __builtin_types_compatible_p(typeof(x), type)])])
+
/* Trap pasters of __FUNCTION__ at compile-time */
#define __FUNCTION__ (__func__)
@@ -121,12 +121,15 @@ int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH)
#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT)
+/* This ensures that kfree is not called with a struct sk_buff* */
+#define kfree(x) _kfree(PTR_OR_BB_ON_TYPE((x), struct sk_buff*))
+
/*
* Common kmalloc functions provided by all allocators
*/
void * __must_check __krealloc(const void *, size_t, gfp_t);
void * __must_check krealloc(const void *, size_t, gfp_t);
-void kfree(const void *);
+void _kfree(const void *);
void kzfree(const void *);
size_t ksize(const void *);
@@ -1103,7 +1103,7 @@ NORET_TYPE void do_exit(long code)
if (unlikely(!list_empty(&tsk->pi_state_list)))
exit_pi_state_list(tsk);
if (unlikely(current->pi_state_cache))
- kfree(current->pi_state_cache);
+ _kfree(current->pi_state_cache);
#endif
/*
* Make sure we are holding no locks:
@@ -62,7 +62,7 @@ void kref_get(struct kref *kref)
int kref_put(struct kref *kref, void (*release)(struct kref *kref))
{
WARN_ON(release == NULL);
- WARN_ON(release == (void (*)(struct kref *))kfree);
+ WARN_ON(release == (void (*)(struct kref *))_kfree);
if (atomic_dec_and_test(&kref->refcount)) {
release(kref);
@@ -3711,7 +3711,7 @@ EXPORT_SYMBOL(kmem_cache_free);
* Don't free memory not originally allocated by kmalloc()
* or you will run into trouble.
*/
-void kfree(const void *objp)
+void _kfree(const void *objp)
{
struct kmem_cache *c;
unsigned long flags;
@@ -3726,7 +3726,7 @@ void kfree(const void *objp)
__cache_free(c, (void *)objp);
local_irq_restore(flags);
}
-EXPORT_SYMBOL(kfree);
+EXPORT_SYMBOL(_kfree);
unsigned int kmem_cache_size(struct kmem_cache *cachep)
{
@@ -487,7 +487,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
}
EXPORT_SYMBOL(__kmalloc_node);
-void kfree(const void *block)
+void _kfree(const void *block)
{
struct slob_page *sp;
@@ -502,7 +502,7 @@ void kfree(const void *block)
} else
put_page(&sp->page);
}
-EXPORT_SYMBOL(kfree);
+EXPORT_SYMBOL(_kfree);
/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
size_t ksize(const void *block)
@@ -2738,7 +2738,7 @@ size_t ksize(const void *object)
}
EXPORT_SYMBOL(ksize);
-void kfree(const void *x)
+void _kfree(const void *x)
{
struct page *page;
void *object = (void *)x;
@@ -2754,7 +2754,7 @@ void kfree(const void *x)
}
slab_free(page->slab, page, object, _RET_IP_);
}
-EXPORT_SYMBOL(kfree);
+EXPORT_SYMBOL(_kfree);
/*
* kmem_cache_shrink removes empty slabs from the partial lists and sorts
@@ -2671,7 +2671,7 @@ void netif_napi_del(struct napi_struct *napi)
struct sk_buff *skb, *next;
list_del_init(&napi->dev_list);
- kfree(napi->skb);
+ kfree_skb(napi->skb);
for (skb = napi->gro_list; skb; skb = next) {
next = skb->next;
@@ -4720,7 +4720,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
if (!tx) {
printk(KERN_ERR "alloc_netdev: Unable to allocate "
"tx qdiscs.\n");
- kfree(p);
+ _kfree(p);
return NULL;
}
@@ -2862,7 +2862,7 @@ static void security_netlbl_cache_add(struct netlbl_lsm_secattr *secattr,
}
*sid_cache = sid;
- secattr->cache->free = kfree;
+ secattr->cache->free = _kfree;
secattr->cache->data = sid_cache;
secattr->flags |= NETLBL_SECATTR_CACHE;
}