treewide: fix mismerges in f01fa25d84

Fixes the following mis-applied commits:

cbf743995a, 3c2ae48ece, fc0c9ea31f, 1969c2d11a, eb1e322c70, f114a36246
ce4b7e4bf4, 93934e5d46, 0797e5f145, 6c5bc69f72, b55f0a9f86, a821ee4968
27b028b9c3, 517b875dfb, 061f2aff69

Signed-off-by: Samuel Pascua <pascua.samuel.14@gmail.com>
This commit is contained in:
Samuel Pascua
2025-09-15 15:10:08 +08:00
parent 80d0f60662
commit 56033e818d
20 changed files with 188 additions and 133 deletions

View File

@@ -1051,6 +1051,16 @@ config ARM64_TAGGED_ADDR_ABI
to system calls as pointer arguments. For details, see
Documentation/arm64/tagged-address-abi.rst.
config MITIGATE_SPECTRE_BRANCH_HISTORY
bool "Mitigate Spectre style attacks against branch history" if EXPERT
default y
depends on HARDEN_BRANCH_PREDICTOR || !KVM
help
Speculation attacks against some high-performance processors can
make use of branch history to influence future speculation.
When taking an exception from user-space, a sequence of branches
or a firmware call overwrites the branch history.
menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions"
depends on COMPAT

View File

@@ -273,7 +273,6 @@ CONFIG_USB_USBNET=y
# CONFIG_WLAN_VENDOR_TI is not set
# CONFIG_WLAN_VENDOR_ZYDAS is not set
# CONFIG_WLAN_VENDOR_QUANTENNA is not set
CONFIG_MAC80211_HWSIM=m
CONFIG_VIRT_WIFI=y
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_KEYRESET=y
@@ -479,11 +478,11 @@ CONFIG_LSM_MMAP_MIN_ADDR=65536
CONFIG_HARDENED_USERCOPY=y
CONFIG_STATIC_USERMODEHELPER=y
CONFIG_SECURITY_SELINUX=y
CONFIG_INIT_STACK_ALL_ZERO=y
CONFIG_INIT_STACK_ALL=y
CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
CONFIG_CRYPTO_ADIANTUM=y
CONFIG_CRYPTO_BLAKE2B=y
CONFIG_CRYPTO_LZ4=y
CONFIG_CRYPTO_ZSTD=y
CONFIG_CRYPTO_ANSI_CPRNG=y
# CONFIG_CRYPTO_DEV_VIRTIO is not set
CONFIG_XZ_DEC=y

View File

@@ -196,7 +196,7 @@ static void show_data(unsigned long addr, int nbytes, const char *name)
* don't attempt to dump non-kernel addresses or
* values that are probably just small negative numbers
*/
if (addr < PAGE_OFFSET || addr > -256UL)
if (addr < KIMAGE_VADDR || addr > -256UL)
return;
printk("\n%s: %pS:\n", name, addr);

View File

@@ -123,6 +123,9 @@ static unsigned int __init parse_logical_bootcpu(u64 dt_phys)
* attempt at mapping the FDT in setup_machine()
*/
early_fixmap_init();
fdt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
if (!fdt)
return 0;
mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;

View File

@@ -12,6 +12,7 @@ config 64BIT
config SPARC
bool
default y
select ARCH_HAS_CPU_FINALIZE_INIT if !SMP
select ARCH_MIGHT_HAVE_PC_PARPORT if SPARC64 && PCI
select ARCH_MIGHT_HAVE_PC_SERIO
select OF

View File

@@ -1940,7 +1940,7 @@ static int loop_add(struct loop_device **l, int i)
lo->tag_set.numa_node = NUMA_NO_NODE;
lo->tag_set.cmd_size = sizeof(struct loop_cmd);
lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE |
BLK_MQ_F_NO_SCHED_BY_DEFAULT;
BLK_MQ_F_NO_SCHED_BY_DEFAULT | BLK_MQ_F_NO_SCHED;
lo->tag_set.driver_data = lo;
err = blk_mq_alloc_tag_set(&lo->tag_set);

View File

@@ -648,6 +648,30 @@ config RANDOM_TRUST_CPU
setting. Enabling this implies trusting that the CPU can supply high
quality and non-backdoored random numbers.
Say Y here unless you have reason to mistrust your CPU or believe
its RNG facilities may be faulty. This may also be configured at
boot time with "random.trust_cpu=on/off".
config RANDOM_TRUST_BOOTLOADER
bool "Initialize RNG using bootloader-supplied seed"
default y
help
Initialize the RNG using a seed supplied by the bootloader or boot
environment (e.g. EFI or a bootloader-generated device tree). This
seed is not used directly, but is rather hashed into the main input
pool, and this happens regardless of whether or not this option is
enabled. Instead, this option controls whether the seed is credited
and hence can initialize the RNG. Additionally, other sources of
randomness are always used, regardless of this setting. Enabling
this implies trusting that the bootloader can supply high quality and
non-backdoored seeds.
Say Y here unless you have reason to mistrust your bootloader or
believe its RNG facilities may be faulty. This may also be configured
at boot time with "random.trust_bootloader=on/off".
endmenu
config OKL4_PIPE
bool "OKL4 Pipe Driver"
depends on OKL4_GUEST
@@ -694,26 +718,3 @@ config VSERVICES_VTTY_COUNT
help
The maximum number of Virtual Services serial devices to support.
This limit applies to both the client and server.
Say Y here unless you have reason to mistrust your CPU or believe
its RNG facilities may be faulty. This may also be configured at
boot time with "random.trust_cpu=on/off".
config RANDOM_TRUST_BOOTLOADER
bool "Initialize RNG using bootloader-supplied seed"
default y
help
Initialize the RNG using a seed supplied by the bootloader or boot
environment (e.g. EFI or a bootloader-generated device tree). This
seed is not used directly, but is rather hashed into the main input
pool, and this happens regardless of whether or not this option is
enabled. Instead, this option controls whether the seed is credited
and hence can initialize the RNG. Additionally, other sources of
randomness are always used, regardless of this setting. Enabling
this implies trusting that the bootloader can supply high quality and
non-backdoored seeds.
Say Y here unless you have reason to mistrust your bootloader or
believe its RNG facilities may be faulty. This may also be configured
at boot time with "random.trust_bootloader=on/off".
endmenu

View File

@@ -46,6 +46,12 @@ static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
static struct hlist_head *all_lists[] = {
&clk_root_list,
&clk_orphan_list,
NULL,
};
struct clk_handoff_vdd {
struct list_head list;
struct clk_vdd_class *vdd_class;
@@ -2762,12 +2768,6 @@ static u32 debug_suspend;
static DEFINE_MUTEX(clk_debug_lock);
static HLIST_HEAD(clk_debug_list);
static struct hlist_head *all_lists[] = {
&clk_root_list,
&clk_orphan_list,
NULL,
};
static struct hlist_head *orphan_list[] = {
&clk_orphan_list,
NULL,
@@ -4060,6 +4060,34 @@ static const struct clk_ops clk_nodrv_ops = {
.set_parent = clk_nodrv_set_parent,
};
static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
struct clk_core *target)
{
int i;
struct clk_core *child;
for (i = 0; i < root->num_parents; i++)
if (root->parents[i] == target)
root->parents[i] = NULL;
hlist_for_each_entry(child, &root->children, child_node)
clk_core_evict_parent_cache_subtree(child, target);
}
/* Remove this clk from all parent caches */
static void clk_core_evict_parent_cache(struct clk_core *core)
{
struct hlist_head **lists;
struct clk_core *root;
lockdep_assert_held(&prepare_lock);
for (lists = all_lists; *lists; lists++)
hlist_for_each_entry(root, *lists, child_node)
clk_core_evict_parent_cache_subtree(root, core);
}
/**
* clk_unregister - unregister a currently registered clock
* @clk: clock to unregister
@@ -4104,6 +4132,8 @@ void clk_unregister(struct clk *clk)
clk_core_set_parent(child, NULL);
}
clk_core_evict_parent_cache(clk->core);
hlist_del_init(&clk->core->child_node);
if (clk->core->prepare_count)

View File

@@ -98,53 +98,8 @@ out:
dentry->d_name.name, ret > 0 ? name : "");
}
static void dma_buf_release(struct dentry *dentry)
{
struct dma_buf *dmabuf;
dmabuf = dentry->d_fsdata;
BUG_ON(dmabuf->vmapping_counter);
/*
* Any fences that a dma-buf poll can wait on should be signaled
* before releasing dma-buf. This is the responsibility of each
* driver that uses the reservation objects.
*
* If you hit this BUG() it means someone dropped their ref to the
* dma-buf while still having pending operation to the buffer.
*/
BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
dmabuf->ops->release(dmabuf);
if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
reservation_object_fini(dmabuf->resv);
module_put(dmabuf->owner);
kfree(dmabuf->name);
kfree(dmabuf);
}
static int dma_buf_file_release(struct inode *inode, struct file *file)
{
struct dma_buf *dmabuf;
if (!is_dma_buf_file(file))
return -EINVAL;
dmabuf = file->private_data;
mutex_lock(&db_list.lock);
list_del(&dmabuf->list_node);
mutex_unlock(&db_list.lock);
return 0;
}
static const struct dentry_operations dma_buf_dentry_ops = {
.d_dname = dmabuffs_dname,
.d_release = dma_buf_release,
};
static struct vfsmount *dma_buf_mnt;
@@ -162,6 +117,47 @@ static struct file_system_type dma_buf_fs_type = {
.kill_sb = kill_anon_super,
};
static int dma_buf_release(struct inode *inode, struct file *file)
{
struct dma_buf *dmabuf;
struct dentry *dentry = file->f_path.dentry;
if (!is_dma_buf_file(file))
return -EINVAL;
dmabuf = file->private_data;
spin_lock(&dentry->d_lock);
dentry->d_fsdata = NULL;
spin_unlock(&dentry->d_lock);
BUG_ON(dmabuf->vmapping_counter);
/*
* Any fences that a dma-buf poll can wait on should be signaled
* before releasing dma-buf. This is the responsibility of each
* driver that uses the reservation objects.
*
* If you hit this BUG() it means someone dropped their ref to the
* dma-buf while still having pending operation to the buffer.
*/
BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
mutex_lock(&db_list.lock);
list_del(&dmabuf->list_node);
mutex_unlock(&db_list.lock);
dmabuf->ops->release(dmabuf);
dma_buf_ref_destroy(dmabuf);
if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
reservation_object_fini(dmabuf->resv);
module_put(dmabuf->owner);
dmabuf_dent_put(dmabuf);
return 0;
}
static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
{
struct dma_buf *dmabuf;
@@ -491,7 +487,7 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
}
static const struct file_operations dma_buf_fops = {
.release = dma_buf_file_release,
.release = dma_buf_release,
.mmap = dma_buf_mmap_internal,
.llseek = dma_buf_llseek,
.poll = dma_buf_poll,

View File

@@ -617,13 +617,11 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
goto out;
}
if (drvdata->enable) {
/* There is no point in reading a TMC in HW FIFO mode */
mode = readl_relaxed(drvdata->base + TMC_MODE);
if (mode != TMC_MODE_CIRCULAR_BUFFER) {
ret = -EINVAL;
goto out;
}
/* There is no point in reading a TMC in HW FIFO mode */
mode = readl_relaxed(drvdata->base + TMC_MODE);
if (mode != TMC_MODE_CIRCULAR_BUFFER) {
ret = -EINVAL;
goto out;
}
/* Don't interfere if operated from Perf */

View File

@@ -3006,6 +3006,15 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
err = -EIO;
goto err_command;
}
/*
* The card should drive cmd and dat[0:3] low immediately
* after the response of cmd11, but wait 1 ms to be sure
*/
mmc_delay(1);
if (host->ops->card_busy && !host->ops->card_busy(host)) {
err = -EAGAIN;
goto power_cycle;
}
/*
* During a signal voltage level switch, the clock must be gated
* for 5 ms according to the SD spec

View File

@@ -101,13 +101,15 @@ static u32 pps_config_sub_second_increment(void __iomem *ioaddr,
if (!(value & PTP_TCR_TSCTRLSSR))
data = div_u64((data * 1000), 465);
data &= PTP_SSIR_SSINC_MASK;
if (data > PTP_SSIR_SSINC_MAX)
data = PTP_SSIR_SSINC_MAX;
reg_value = data;
if (gmac4)
reg_value <<= GMAC4_PTP_SSIR_SSINC_SHIFT;
sns_inc &= PTP_SSIR_SNSINC_MASK;
if (sns_inc > PTP_SSIR_SNSINC_MAX)
sns_inc = PTP_SSIR_SNSINC_MAX;
reg_value2 = sns_inc;
if (gmac4)
reg_value2 <<= GMAC4_PTP_SSIR_SNSINC_SHIFT;

View File

@@ -68,7 +68,7 @@
/* SSIR defines */
#define PTP_SSIR_SSINC_MAX 0xff
#define GMAC4_PTP_SSIR_SSINC_SHIFT 16
#define PTP_SSIR_SNSINC_MASK 0xff
#define PTP_SSIR_SNSINC_MAX 0xff
#define GMAC4_PTP_SSIR_SNSINC_SHIFT 8
#endif /* __STMMAC_PTP_H__ */

View File

@@ -560,11 +560,17 @@ static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
{
/*
* TODO: Once clients remove their hacks where they assume kmap(ed)
* addresses are virtually contiguous implement this properly
*/
void *vaddr = ion_dma_buf_vmap(dmabuf);
struct ion_buffer *buffer = dmabuf->priv;
void *vaddr;
if (!buffer->heap->ops->map_kernel) {
pr_err("%s: map kernel is not implemented by this heap.\n",
__func__);
return ERR_PTR(-ENOTTY);
}
mutex_lock(&buffer->lock);
vaddr = ion_buffer_kmap_get(buffer);
mutex_unlock(&buffer->lock);
if (IS_ERR(vaddr))
return vaddr;
@@ -575,11 +581,13 @@ static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
void *ptr)
{
/*
* TODO: Once clients remove their hacks where they assume kmap(ed)
* addresses are virtually contiguous implement this properly
*/
ion_dma_buf_vunmap(dmabuf, ptr);
struct ion_buffer *buffer = dmabuf->priv;
if (buffer->heap->ops->map_kernel) {
mutex_lock(&buffer->lock);
ion_buffer_kmap_put(buffer);
mutex_unlock(&buffer->lock);
}
}
static int ion_sgl_sync_range(struct device *dev, struct scatterlist *sgl,

View File

@@ -838,8 +838,8 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
if (result)
goto remove_upper_file;
sprintf(dev->weight_attr_name, "cdev%d_weight", dev->id);
snprintf(dev->weight_attr_name, sizeof(dev->weight_attr_name),
"cdev%d_weight", dev->id);
sysfs_attr_init(&dev->weight_attr.attr);
dev->weight_attr.attr.name = dev->weight_attr_name;
dev->weight_attr.attr.mode = S_IWUSR | S_IRUGO;

View File

@@ -177,8 +177,6 @@ struct gadget_config_name {
struct list_head list;
};
#define MAX_USB_STRING_LEN 126
#define MAX_USB_STRING_WITH_NULL_LEN (MAX_USB_STRING_LEN+1)
#ifdef CONFIG_USB_TYPEC_MANAGER_NOTIFIER
int dwc3_gadget_get_cmply_link_state_wrapper(void)
{
@@ -203,6 +201,8 @@ int dwc3_gadget_get_cmply_link_state_wrapper(void)
EXPORT_SYMBOL(dwc3_gadget_get_cmply_link_state_wrapper);
#endif
#define USB_MAX_STRING_WITH_NULL_LEN (USB_MAX_STRING_LEN+1)
static int usb_string_copy(const char *s, char **s_copy)
{
int ret;
@@ -215,7 +215,7 @@ static int usb_string_copy(const char *s, char **s_copy)
if (copy) {
str = copy;
} else {
str = kmalloc(MAX_USB_STRING_WITH_NULL_LEN, GFP_KERNEL);
str = kmalloc(USB_MAX_STRING_WITH_NULL_LEN, GFP_KERNEL);
if (!str)
return -ENOMEM;
}

View File

@@ -3853,8 +3853,6 @@ static void ffs_func_unbind(struct usb_configuration *c,
func->function.ssp_descriptors = NULL;
func->interfaces_nums = NULL;
ffs_event_add(ffs, FUNCTIONFS_UNBIND);
ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
ffs->setup_state, ffs->flags);
}

View File

@@ -1438,8 +1438,7 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
if (newly_dirty)
__mark_inode_dirty(inode, I_DIRTY_PAGES);
if (dirty & I_DIRTY_TIME)
mark_inode_dirty_sync(inode);
/* Don't write the inode if only I_DIRTY_PAGES was set */
if (dirty & ~I_DIRTY_PAGES) {
int err = write_inode(inode, wbc);

View File

@@ -2413,6 +2413,20 @@ void drop_collected_mounts(struct vfsmount *mnt)
namespace_unlock();
}
static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
{
struct mount *child;
list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
if (!is_subdir(child->mnt_mountpoint, dentry))
continue;
if (child->mnt.mnt_flags & MNT_LOCKED)
return true;
}
return false;
}
/**
* clone_private_mount - create a private clone of a path
*
@@ -2434,6 +2448,9 @@ struct vfsmount *clone_private_mount(const struct path *path)
if (!check_mnt(old_mnt))
goto invalid;
if (has_locked_children(old_mnt, path->dentry))
goto invalid;
new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
up_read(&namespace_sem);
@@ -2784,23 +2801,6 @@ static int do_change_type(struct path *path, int ms_flags)
return err;
}
static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
{
struct mount *child;
list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
if (!is_subdir(child->mnt_mountpoint, dentry))
continue;
#ifdef CONFIG_RKP_NS_PROT
if (child->mnt->mnt_flags & MNT_LOCKED)
#else
if (child->mnt.mnt_flags & MNT_LOCKED)
#endif
return true;
}
return false;
}
/*
* do loopback mount.
*/

View File

@@ -236,6 +236,7 @@ ssize_t strscpy(char *dest, const char *src, size_t count)
EXPORT_SYMBOL(strscpy);
#endif
#ifndef __HAVE_ARCH_STPCPY
/**
* stpcpy - copy a string from src to dest returning a pointer to the new end
* of dest, including src's %NUL-terminator. May overrun dest.
@@ -244,14 +245,13 @@ EXPORT_SYMBOL(strscpy);
* @src: pointer to the beginning of string being copied from. Must not overlap
* dest.
*
* stpcpy differs from strcpy in a key way: the return value is a pointer
* to the new %NUL-terminating character in @dest. (For strcpy, the return
* value is a pointer to the start of @dest). This interface is considered
* unsafe as it doesn't perform bounds checking of the inputs. As such it's
* not recommended for usage. Instead, its definition is provided in case
* the compiler lowers other libcalls to stpcpy.
* stpcpy differs from strcpy in a key way: the return value is the new
* %NUL-terminated character. (for strcpy, the return value is a pointer to
* src. This interface is considered unsafe as it doesn't perform bounds
* checking of the inputs. As such it's not recommended for usage. Instead,
* its definition is provided in case the compiler lowers other libcalls to
* stpcpy.
*/
char *stpcpy(char *__restrict__ dest, const char *__restrict__ src);
char *stpcpy(char *__restrict__ dest, const char *__restrict__ src)
{
while ((*dest++ = *src++) != '\0')
@@ -259,6 +259,7 @@ char *stpcpy(char *__restrict__ dest, const char *__restrict__ src)
return --dest;
}
EXPORT_SYMBOL(stpcpy);
#endif
/**
* strscpy_pad() - Copy a C-string into a sized buffer