treewide: kzalloc_node() -> kcalloc_node()
The kzalloc_node() function has a 2-factor argument form, kcalloc_node(). This
patch replaces cases of:
kzalloc_node(a * b, gfp, node)
with:
kcalloc_node(a * b, gfp, node)
as well as handling cases of:
kzalloc_node(a * b * c, gfp, node)
with:
kzalloc_node(array3_size(a, b, c), gfp, node)
as it's slightly less ugly than:
kcalloc_node(array_size(a, b), c, gfp, node)
This does, however, attempt to ignore constant size factors like:
kzalloc_node(4 * 1024, gfp, node)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc_node(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc_node(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc_node(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc_node
+ kcalloc_node
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc_node(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc_node(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc_node(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc_node(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc_node(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc_node(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc_node(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc_node(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc_node(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc_node(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc_node(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc_node(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc_node(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc_node(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc_node(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc_node(C1 * C2 * C3, ...)
|
kzalloc_node(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc_node(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc_node(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc_node(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc_node(sizeof(THING) * C2, ...)
|
kzalloc_node(sizeof(TYPE) * C2, ...)
|
kzalloc_node(C1 * C2 * C3, ...)
|
kzalloc_node(C1 * C2, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Adam W. Willis <return.of.octobot@gmail.com>
Signed-off-by: UtsavBalar1231 <utsavbalar1231@gmail.com>
This commit is contained in:
committed by
UtsavBalar1231
parent
41b77821cf
commit
99d27d6fb6
@@ -1815,7 +1815,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
|
||||
if (!tags)
|
||||
return NULL;
|
||||
|
||||
tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
|
||||
tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
|
||||
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
|
||||
node);
|
||||
if (!tags->rqs) {
|
||||
@@ -1823,9 +1823,9 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
|
||||
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
|
||||
node);
|
||||
tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
|
||||
GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
|
||||
node);
|
||||
if (!tags->static_rqs) {
|
||||
kfree(tags->rqs);
|
||||
blk_mq_free_tags(tags);
|
||||
@@ -2421,7 +2421,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
||||
/* init q->mq_kobj and sw queues' kobjects */
|
||||
blk_mq_sysfs_init(q);
|
||||
|
||||
q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
|
||||
q->queue_hw_ctx = kcalloc_node(nr_cpu_ids, sizeof(*(q->queue_hw_ctx)),
|
||||
GFP_KERNEL, set->numa_node);
|
||||
if (!q->queue_hw_ctx)
|
||||
goto err_percpu;
|
||||
@@ -2636,14 +2636,14 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
|
||||
if (set->nr_hw_queues > nr_cpu_ids)
|
||||
set->nr_hw_queues = nr_cpu_ids;
|
||||
|
||||
set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
|
||||
set->tags = kcalloc_node(nr_cpu_ids, sizeof(struct blk_mq_tags *),
|
||||
GFP_KERNEL, set->numa_node);
|
||||
if (!set->tags)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = -ENOMEM;
|
||||
set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
|
||||
GFP_KERNEL, set->numa_node);
|
||||
set->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*set->mq_map),
|
||||
GFP_KERNEL, set->numa_node);
|
||||
if (!set->mq_map)
|
||||
goto out_free_tags;
|
||||
|
||||
|
||||
@@ -254,7 +254,7 @@ static int nitrox_enable_msix(struct nitrox_device *ndev)
|
||||
* Entry 192: NPS_CORE_INT_ACTIVE
|
||||
*/
|
||||
nr_entries = (ndev->nr_queues * NR_RING_VECTORS) + 1;
|
||||
entries = kzalloc_node(nr_entries * sizeof(struct msix_entry),
|
||||
entries = kcalloc_node(nr_entries, sizeof(struct msix_entry),
|
||||
GFP_KERNEL, ndev->node);
|
||||
if (!entries)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -238,7 +238,7 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
|
||||
if (!accel_dev->pf.vf_info)
|
||||
msix_num_entries += hw_data->num_banks;
|
||||
|
||||
entries = kzalloc_node(msix_num_entries * sizeof(*entries),
|
||||
entries = kcalloc_node(msix_num_entries, sizeof(*entries),
|
||||
GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
|
||||
if (!entries)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -373,7 +373,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
|
||||
|
||||
/* Why 3? outhdr + iv + inhdr */
|
||||
sg_total = src_nents + dst_nents + 3;
|
||||
sgs = kzalloc_node(sg_total * sizeof(*sgs), GFP_ATOMIC,
|
||||
sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_ATOMIC,
|
||||
dev_to_node(&vcrypto->vdev->dev));
|
||||
if (!sgs)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -174,7 +174,7 @@ int hfi1_create_kctxts(struct hfi1_devdata *dd)
|
||||
u16 i;
|
||||
int ret;
|
||||
|
||||
dd->rcd = kzalloc_node(dd->num_rcv_contexts * sizeof(*dd->rcd),
|
||||
dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd),
|
||||
GFP_KERNEL, dd->node);
|
||||
if (!dd->rcd)
|
||||
return -ENOMEM;
|
||||
@@ -424,15 +424,14 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
|
||||
* The resulting value will be rounded down to the closest
|
||||
* multiple of dd->rcv_entries.group_size.
|
||||
*/
|
||||
rcd->egrbufs.buffers = kzalloc_node(
|
||||
rcd->egrbufs.count * sizeof(*rcd->egrbufs.buffers),
|
||||
GFP_KERNEL, numa);
|
||||
rcd->egrbufs.buffers = kcalloc_node(rcd->egrbufs.count,
|
||||
sizeof(*rcd->egrbufs.buffers),
|
||||
GFP_KERNEL, numa);
|
||||
if (!rcd->egrbufs.buffers)
|
||||
goto bail;
|
||||
rcd->egrbufs.rcvtids = kzalloc_node(
|
||||
rcd->egrbufs.count *
|
||||
sizeof(*rcd->egrbufs.rcvtids),
|
||||
GFP_KERNEL, numa);
|
||||
rcd->egrbufs.rcvtids = kcalloc_node(rcd->egrbufs.count,
|
||||
sizeof(*rcd->egrbufs.rcvtids),
|
||||
GFP_KERNEL, numa);
|
||||
if (!rcd->egrbufs.rcvtids)
|
||||
goto bail;
|
||||
rcd->egrbufs.size = eager_buffer_size;
|
||||
|
||||
@@ -876,8 +876,9 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
|
||||
* so head == tail can mean empty.
|
||||
*/
|
||||
sc->sr_size = sci->credits + 1;
|
||||
sc->sr = kzalloc_node(sizeof(union pio_shadow_ring) *
|
||||
sc->sr_size, GFP_KERNEL, numa);
|
||||
sc->sr = kcalloc_node(sc->sr_size,
|
||||
sizeof(union pio_shadow_ring),
|
||||
GFP_KERNEL, numa);
|
||||
if (!sc->sr) {
|
||||
sc_free(sc);
|
||||
return NULL;
|
||||
@@ -2030,9 +2031,9 @@ int init_pervl_scs(struct hfi1_devdata *dd)
|
||||
hfi1_init_ctxt(dd->vld[15].sc);
|
||||
dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048);
|
||||
|
||||
dd->kernel_send_context = kzalloc_node(dd->num_send_contexts *
|
||||
sizeof(struct send_context *),
|
||||
GFP_KERNEL, dd->node);
|
||||
dd->kernel_send_context = kcalloc_node(dd->num_send_contexts,
|
||||
sizeof(struct send_context *),
|
||||
GFP_KERNEL, dd->node);
|
||||
if (!dd->kernel_send_context)
|
||||
goto freesc15;
|
||||
|
||||
|
||||
@@ -1680,8 +1680,8 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
|
||||
size = rcd->rcvegrbuf_size;
|
||||
if (!rcd->rcvegrbuf) {
|
||||
rcd->rcvegrbuf =
|
||||
kzalloc_node(chunk * sizeof(rcd->rcvegrbuf[0]),
|
||||
GFP_KERNEL, rcd->node_id);
|
||||
kcalloc_node(chunk, sizeof(rcd->rcvegrbuf[0]),
|
||||
GFP_KERNEL, rcd->node_id);
|
||||
if (!rcd->rcvegrbuf)
|
||||
goto bail;
|
||||
}
|
||||
|
||||
@@ -837,11 +837,10 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
|
||||
RCU_INIT_POINTER(qp->next, NULL);
|
||||
if (init_attr->qp_type == IB_QPT_RC) {
|
||||
qp->s_ack_queue =
|
||||
kzalloc_node(
|
||||
sizeof(*qp->s_ack_queue) *
|
||||
rvt_max_atomic(rdi),
|
||||
GFP_KERNEL,
|
||||
rdi->dparms.node);
|
||||
kcalloc_node(rvt_max_atomic(rdi),
|
||||
sizeof(*qp->s_ack_queue),
|
||||
GFP_KERNEL,
|
||||
rdi->dparms.node);
|
||||
if (!qp->s_ack_queue)
|
||||
goto bail_qp;
|
||||
}
|
||||
|
||||
@@ -708,7 +708,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
|
||||
if (!p)
|
||||
return NULL;
|
||||
if (sw_size) {
|
||||
s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
|
||||
s = kcalloc_node(sw_size, nelem, GFP_KERNEL, node);
|
||||
|
||||
if (!s) {
|
||||
dma_free_coherent(dev, len, p, *phys);
|
||||
|
||||
@@ -449,14 +449,14 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
|
||||
int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
|
||||
int i;
|
||||
|
||||
rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
|
||||
rq->mpwqe.info = kcalloc_node(wq_sz, sizeof(*rq->mpwqe.info),
|
||||
GFP_KERNEL, cpu_to_node(c->cpu));
|
||||
if (!rq->mpwqe.info)
|
||||
goto err_out;
|
||||
|
||||
/* We allocate more than mtt_sz as we will align the pointer */
|
||||
rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
|
||||
cpu_to_node(c->cpu));
|
||||
rq->mpwqe.mtt_no_align = kcalloc_node(wq_sz, mtt_alloc, GFP_KERNEL,
|
||||
cpu_to_node(c->cpu));
|
||||
if (unlikely(!rq->mpwqe.mtt_no_align))
|
||||
goto err_free_wqe_info;
|
||||
|
||||
@@ -629,7 +629,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
|
||||
break;
|
||||
default: /* MLX5_WQ_TYPE_LINKED_LIST */
|
||||
rq->wqe.frag_info =
|
||||
kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
|
||||
kcalloc_node(wq_sz, sizeof(*rq->wqe.frag_info),
|
||||
GFP_KERNEL, cpu_to_node(c->cpu));
|
||||
if (!rq->wqe.frag_info) {
|
||||
err = -ENOMEM;
|
||||
@@ -975,7 +975,7 @@ static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
|
||||
{
|
||||
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
|
||||
|
||||
sq->db.di = kzalloc_node(sizeof(*sq->db.di) * wq_sz,
|
||||
sq->db.di = kcalloc_node(wq_sz, sizeof(*sq->db.di),
|
||||
GFP_KERNEL, numa);
|
||||
if (!sq->db.di) {
|
||||
mlx5e_free_xdpsq_db(sq);
|
||||
@@ -1033,7 +1033,7 @@ static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
|
||||
{
|
||||
u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
|
||||
|
||||
sq->db.ico_wqe = kzalloc_node(sizeof(*sq->db.ico_wqe) * wq_sz,
|
||||
sq->db.ico_wqe = kcalloc_node(wq_sz, sizeof(*sq->db.ico_wqe),
|
||||
GFP_KERNEL, numa);
|
||||
if (!sq->db.ico_wqe)
|
||||
return -ENOMEM;
|
||||
@@ -1090,9 +1090,9 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
|
||||
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
|
||||
int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
|
||||
|
||||
sq->db.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.dma_fifo),
|
||||
sq->db.dma_fifo = kcalloc_node(df_sz, sizeof(*sq->db.dma_fifo),
|
||||
GFP_KERNEL, numa);
|
||||
sq->db.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.wqe_info),
|
||||
sq->db.wqe_info = kcalloc_node(wq_sz, sizeof(*sq->db.wqe_info),
|
||||
GFP_KERNEL, numa);
|
||||
if (!sq->db.dma_fifo || !sq->db.wqe_info) {
|
||||
mlx5e_free_txqsq_db(sq);
|
||||
|
||||
@@ -592,12 +592,12 @@ static int ndev_init_isr(struct amd_ntb_dev *ndev,
|
||||
ndev->db_mask = ndev->db_valid_mask;
|
||||
|
||||
/* Try to set up msix irq */
|
||||
ndev->vec = kzalloc_node(msix_max * sizeof(*ndev->vec),
|
||||
ndev->vec = kcalloc_node(msix_max, sizeof(*ndev->vec),
|
||||
GFP_KERNEL, node);
|
||||
if (!ndev->vec)
|
||||
goto err_msix_vec_alloc;
|
||||
|
||||
ndev->msix = kzalloc_node(msix_max * sizeof(*ndev->msix),
|
||||
ndev->msix = kcalloc_node(msix_max, sizeof(*ndev->msix),
|
||||
GFP_KERNEL, node);
|
||||
if (!ndev->msix)
|
||||
goto err_msix_alloc;
|
||||
|
||||
@@ -463,12 +463,12 @@ static int ndev_init_isr(struct intel_ntb_dev *ndev,
|
||||
|
||||
/* Try to set up msix irq */
|
||||
|
||||
ndev->vec = kzalloc_node(msix_max * sizeof(*ndev->vec),
|
||||
ndev->vec = kcalloc_node(msix_max, sizeof(*ndev->vec),
|
||||
GFP_KERNEL, node);
|
||||
if (!ndev->vec)
|
||||
goto err_msix_vec_alloc;
|
||||
|
||||
ndev->msix = kzalloc_node(msix_max * sizeof(*ndev->msix),
|
||||
ndev->msix = kcalloc_node(msix_max, sizeof(*ndev->msix),
|
||||
GFP_KERNEL, node);
|
||||
if (!ndev->msix)
|
||||
goto err_msix_alloc;
|
||||
|
||||
@@ -1097,7 +1097,7 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
|
||||
max_mw_count_for_spads = (spad_count - MW0_SZ_HIGH) / 2;
|
||||
nt->mw_count = min(mw_count, max_mw_count_for_spads);
|
||||
|
||||
nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec),
|
||||
nt->mw_vec = kcalloc_node(mw_count, sizeof(*nt->mw_vec),
|
||||
GFP_KERNEL, node);
|
||||
if (!nt->mw_vec) {
|
||||
rc = -ENOMEM;
|
||||
@@ -1143,7 +1143,7 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
|
||||
nt->qp_bitmap = qp_bitmap;
|
||||
nt->qp_bitmap_free = qp_bitmap;
|
||||
|
||||
nt->qp_vec = kzalloc_node(qp_count * sizeof(*nt->qp_vec),
|
||||
nt->qp_vec = kcalloc_node(qp_count, sizeof(*nt->qp_vec),
|
||||
GFP_KERNEL, node);
|
||||
if (!nt->qp_vec) {
|
||||
rc = -ENOMEM;
|
||||
|
||||
@@ -2352,7 +2352,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
if (!dev)
|
||||
return -ENOMEM;
|
||||
|
||||
dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(struct nvme_queue),
|
||||
dev->queues = kcalloc_node(num_possible_cpus() + 1,
|
||||
sizeof(struct nvme_queue),
|
||||
GFP_KERNEL, node);
|
||||
if (!dev->queues)
|
||||
goto free;
|
||||
|
||||
@@ -491,7 +491,7 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc,
|
||||
|
||||
/* allocate memory for scp_at_array (ptlrpc_at_array) */
|
||||
array->paa_reqs_array =
|
||||
kzalloc_node(sizeof(struct list_head) * size, GFP_NOFS,
|
||||
kcalloc_node(size, sizeof(struct list_head), GFP_NOFS,
|
||||
cfs_cpt_spread_node(svc->srv_cptable, cpt));
|
||||
if (!array->paa_reqs_array)
|
||||
return -ENOMEM;
|
||||
@@ -500,7 +500,7 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc,
|
||||
INIT_LIST_HEAD(&array->paa_reqs_array[index]);
|
||||
|
||||
array->paa_reqs_count =
|
||||
kzalloc_node(sizeof(__u32) * size, GFP_NOFS,
|
||||
kcalloc_node(size, sizeof(__u32), GFP_NOFS,
|
||||
cfs_cpt_spread_node(svc->srv_cptable, cpt));
|
||||
if (!array->paa_reqs_count)
|
||||
goto free_reqs_array;
|
||||
@@ -2540,7 +2540,7 @@ int ptlrpc_hr_init(void)
|
||||
hrp->hrp_nthrs = 1;
|
||||
|
||||
hrp->hrp_thrs =
|
||||
kzalloc_node(hrp->hrp_nthrs * sizeof(*hrt), GFP_NOFS,
|
||||
kcalloc_node(hrp->hrp_nthrs, sizeof(*hrt), GFP_NOFS,
|
||||
cfs_cpt_spread_node(ptlrpc_hr.hr_cpt_table,
|
||||
i));
|
||||
if (!hrp->hrp_thrs) {
|
||||
|
||||
@@ -635,7 +635,8 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
|
||||
}
|
||||
}
|
||||
|
||||
rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node);
|
||||
rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
|
||||
node);
|
||||
if (!rb->aux_pages)
|
||||
return -ENOMEM;
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
|
||||
return 0;
|
||||
}
|
||||
|
||||
sb->map = kzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
|
||||
sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
|
||||
if (!sb->map)
|
||||
return -ENOMEM;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user