Merge tag 'ASB-2025-04-05_11-5.4' of https://android.googlesource.com/kernel/common into android13-5.4-lahaina

https://source.android.com/docs/security/bulletin/2025-04-01
CVE-2024-50264
CVE-2024-53197
CVE-2024-56556
CVE-2024-53150

* tag 'ASB-2025-04-05_11-5.4' of https://android.googlesource.com/kernel/common:
  UPSTREAM: net: sched: Disallow replacing of child qdisc from one parent to another
  UPSTREAM: pfifo_tail_enqueue: Drop new packet when sch->limit == 0
  UPSTREAM: f2fs: compress: don't allow unaligned truncation on released compress inode
  UPSTREAM: net: core: reject skb_copy(_expand) for fraglist GSO skbs
  UPSTREAM: udp: prevent local UDP tunnel packets from being GROed
  UPSTREAM: udp: do not transition UDP GRO fraglist partial checksums to unnecessary
  UPSTREAM: udp: do not accept non-tunnel GSO skbs landing in a tunnel
  UPSTREAM: binder: Return EFAULT if we fail BINDER_ENABLE_ONEWAY_SPAM_DETECTION

Change-Id: If91ea6f68126e13b4dfc08471e94ced6d2d68ae9
This commit is contained in:
Michael Bestas
2025-04-23 17:49:35 +03:00
9 changed files with 82 additions and 28 deletions

View File

@@ -890,9 +890,14 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
ATTR_GID | ATTR_TIMES_SET))))
return -EPERM;
if ((attr->ia_valid & ATTR_SIZE) &&
!f2fs_is_compress_backend_ready(inode))
return -EOPNOTSUPP;
if ((attr->ia_valid & ATTR_SIZE)) {
if (!f2fs_is_compress_backend_ready(inode))
return -EOPNOTSUPP;
if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) &&
!IS_ALIGNED(attr->ia_size,
F2FS_BLK_TO_BYTES(F2FS_I(inode)->i_cluster_size)))
return -EINVAL;
}
err = setattr_prepare(dentry, attr);
if (err)

View File

@@ -131,6 +131,24 @@ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
}
}
DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
#if IS_ENABLED(CONFIG_IPV6)
DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
#endif
static inline bool udp_encap_needed(void)
{
if (static_branch_unlikely(&udp_encap_needed_key))
return true;
#if IS_ENABLED(CONFIG_IPV6)
if (static_branch_unlikely(&udpv6_encap_needed_key))
return true;
#endif
return false;
}
static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
{
if (!skb_is_gso(skb))
@@ -142,6 +160,16 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist)
return true;
/* GSO packets lacking the SKB_GSO_UDP_TUNNEL/_CSUM bits might still
* land in a tunnel as the socket check in udp_gro_receive cannot be
* foolproof.
*/
if (udp_encap_needed() &&
READ_ONCE(udp_sk(sk)->encap_rcv) &&
!(skb_shinfo(skb)->gso_type &
(SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)))
return true;
return false;
}

View File

@@ -1527,11 +1527,17 @@ static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
{
int headerlen = skb_headroom(skb);
unsigned int size = skb_end_offset(skb) + skb->data_len;
struct sk_buff *n = __alloc_skb(size, gfp_mask,
skb_alloc_rx_flag(skb), NUMA_NO_NODE);
struct sk_buff *n;
unsigned int size;
int headerlen;
if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
return NULL;
headerlen = skb_headroom(skb);
size = skb_end_offset(skb) + skb->data_len;
n = __alloc_skb(size, gfp_mask,
skb_alloc_rx_flag(skb), NUMA_NO_NODE);
if (!n)
return NULL;
@@ -1803,12 +1809,17 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
/*
* Allocate the copy buffer
*/
struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
gfp_mask, skb_alloc_rx_flag(skb),
NUMA_NO_NODE);
int oldheadroom = skb_headroom(skb);
int head_copy_len, head_copy_off;
struct sk_buff *n;
int oldheadroom;
if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
return NULL;
oldheadroom = skb_headroom(skb);
n = __alloc_skb(newheadroom + skb->len + newtailroom,
gfp_mask, skb_alloc_rx_flag(skb),
NUMA_NO_NODE);
if (!n)
return NULL;

View File

@@ -540,6 +540,13 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
}
DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key);
EXPORT_SYMBOL(udp_encap_needed_key);
#if IS_ENABLED(CONFIG_IPV6)
DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
EXPORT_SYMBOL(udpv6_encap_needed_key);
#endif
void udp_encap_enable(void)
{
static_branch_inc(&udp_encap_needed_key);

View File

@@ -512,11 +512,19 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
unsigned int off = skb_gro_offset(skb);
int flush = 1;
/* we can do L4 aggregation only if the packet can't land in a tunnel
* otherwise we could corrupt the inner stream
/* We can do L4 aggregation only if the packet can't land in a tunnel
* otherwise we could corrupt the inner stream. Detecting such packets
* cannot be foolproof and the aggregation might still happen in some
* cases. Such packets should be caught in udp_unexpected_gso later.
*/
NAPI_GRO_CB(skb)->is_flist = 0;
if (!sk || !udp_sk(sk)->gro_receive) {
/* If the packet was locally encapsulated in a UDP tunnel that
* wasn't detected above, do not GRO.
*/
if (skb->encapsulation)
goto out;
if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled : 1;
@@ -660,13 +668,7 @@ INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4);
skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
skb->csum_level++;
} else {
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum_level = 0;
}
__skb_incr_checksum_unnecessary(skb);
return 0;
}

View File

@@ -413,7 +413,7 @@ csum_copy_err:
goto try_again;
}
DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
void udpv6_encap_enable(void)
{
static_branch_inc(&udpv6_encap_needed_key);

View File

@@ -156,13 +156,7 @@ INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff)
skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4);
skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
skb->csum_level++;
} else {
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum_level = 0;
}
__skb_incr_checksum_unnecessary(skb);
return 0;
}

View File

@@ -1593,6 +1593,10 @@ replay:
q = qdisc_lookup(dev, tcm->tcm_handle);
if (!q)
goto create_n_graft;
if (q->parent != tcm->tcm_parent) {
NL_SET_ERR_MSG(extack, "Cannot move an existing qdisc to a different parent");
return -EINVAL;
}
if (n->nlmsg_flags & NLM_F_EXCL) {
NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
return -EEXIST;

View File

@@ -38,6 +38,9 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
{
unsigned int prev_backlog;
if (unlikely(sch->limit == 0))
return qdisc_drop(skb, sch, to_free);
if (likely(sch->q.qlen < sch->limit))
return qdisc_enqueue_tail(skb, sch);