Merge android12-5.10 into android12-5.10-lts
This merges the android12-5.10 branch into the -lts branch, catching it up with the latest changes in there. It contains the following commits: *da5fb8337aMerge tag 'android12-5.10.238_r00' into android12-5.10 *245d93138bANDROID: fix kernelci build breaks for dcn_calcs *d2a168c1f2FROMGIT: f2fs: sysfs: export linear_lookup in features directory *feadd4d974FROMGIT: f2fs: sysfs: add encoding_flags entry *229ec4d838FROMGIT: f2fs: support to disable linear lookup fallback *7402843c3fRevert "ANDROID: usb: Optimization the transfer rate of accessory mode in USB3.2 mode" *d71b94aa8fBACKPORT: binder: Create safe versions of binder log files *8afc5b8d6bUPSTREAM: binder: Refactor binder_node print synchronization Change-Id: Ic1e20944063cb680af3fb79cb052b3d3dcdd7dae Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
@@ -236,7 +236,7 @@ Description: Shows all enabled kernel features.
|
||||
inode_checksum, flexible_inline_xattr, quota_ino,
|
||||
inode_crtime, lost_found, verity, sb_checksum,
|
||||
casefold, readonly, compression, test_dummy_encryption_v2,
|
||||
atomic_write, pin_file, encrypted_casefold.
|
||||
atomic_write, pin_file, encrypted_casefold, linear_lookup.
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/inject_rate
|
||||
Date: May 2016
|
||||
@@ -538,3 +538,16 @@ What: /sys/fs/f2fs/<disk>/last_age_weight
|
||||
Date: January 2023
|
||||
Contact: "Ping Xiong" <xiongping1@xiaomi.com>
|
||||
Description: When DATA SEPARATION is on, it controls the weight of last data block age.
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/encoding_flags
|
||||
Date: April 2025
|
||||
Contact: "Chao Yu" <chao@kernel.org>
|
||||
Description: This is a read-only entry to show the value of sb.s_encoding_flags, the
|
||||
value is hexadecimal.
|
||||
|
||||
============================ ==========
|
||||
Flag_Name Flag_Value
|
||||
============================ ==========
|
||||
SB_ENC_STRICT_MODE_FL 0x00000001
|
||||
SB_ENC_NO_COMPAT_FALLBACK_FL 0x00000002
|
||||
============================ ==========
|
||||
|
||||
@@ -6378,10 +6378,10 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
|
||||
}
|
||||
|
||||
static void print_binder_work_ilocked(struct seq_file *m,
|
||||
struct binder_proc *proc,
|
||||
const char *prefix,
|
||||
const char *transaction_prefix,
|
||||
struct binder_work *w)
|
||||
struct binder_proc *proc,
|
||||
const char *prefix,
|
||||
const char *transaction_prefix,
|
||||
struct binder_work *w, bool hash_ptrs)
|
||||
{
|
||||
struct binder_node *node;
|
||||
struct binder_transaction *t;
|
||||
@@ -6404,9 +6404,15 @@ static void print_binder_work_ilocked(struct seq_file *m,
|
||||
break;
|
||||
case BINDER_WORK_NODE:
|
||||
node = container_of(w, struct binder_node, work);
|
||||
seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
|
||||
prefix, node->debug_id,
|
||||
(u64)node->ptr, (u64)node->cookie);
|
||||
if (hash_ptrs)
|
||||
seq_printf(m, "%snode work %d: u%p c%p\n",
|
||||
prefix, node->debug_id,
|
||||
(void *)(long)node->ptr,
|
||||
(void *)(long)node->cookie);
|
||||
else
|
||||
seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
|
||||
prefix, node->debug_id,
|
||||
(u64)node->ptr, (u64)node->cookie);
|
||||
break;
|
||||
case BINDER_WORK_DEAD_BINDER:
|
||||
seq_printf(m, "%shas dead binder\n", prefix);
|
||||
@@ -6431,7 +6437,7 @@ static void print_binder_work_ilocked(struct seq_file *m,
|
||||
|
||||
static void print_binder_thread_ilocked(struct seq_file *m,
|
||||
struct binder_thread *thread,
|
||||
int print_always)
|
||||
bool print_always, bool hash_ptrs)
|
||||
{
|
||||
struct binder_transaction *t;
|
||||
struct binder_work *w;
|
||||
@@ -6461,14 +6467,16 @@ static void print_binder_thread_ilocked(struct seq_file *m,
|
||||
}
|
||||
list_for_each_entry(w, &thread->todo, entry) {
|
||||
print_binder_work_ilocked(m, thread->proc, " ",
|
||||
" pending transaction", w);
|
||||
" pending transaction",
|
||||
w, hash_ptrs);
|
||||
}
|
||||
if (!print_always && m->count == header_pos)
|
||||
m->count = start_pos;
|
||||
}
|
||||
|
||||
static void print_binder_node_nilocked(struct seq_file *m,
|
||||
struct binder_node *node)
|
||||
struct binder_node *node,
|
||||
bool hash_ptrs)
|
||||
{
|
||||
struct binder_ref *ref;
|
||||
struct binder_work *w;
|
||||
@@ -6478,8 +6486,13 @@ static void print_binder_node_nilocked(struct seq_file *m,
|
||||
hlist_for_each_entry(ref, &node->refs, node_entry)
|
||||
count++;
|
||||
|
||||
seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
|
||||
node->debug_id, (u64)node->ptr, (u64)node->cookie,
|
||||
if (hash_ptrs)
|
||||
seq_printf(m, " node %d: u%p c%p", node->debug_id,
|
||||
(void *)(long)node->ptr, (void *)(long)node->cookie);
|
||||
else
|
||||
seq_printf(m, " node %d: u%016llx c%016llx", node->debug_id,
|
||||
(u64)node->ptr, (u64)node->cookie);
|
||||
seq_printf(m, " pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
|
||||
node->sched_policy, node->min_priority,
|
||||
node->has_strong_ref, node->has_weak_ref,
|
||||
node->local_strong_refs, node->local_weak_refs,
|
||||
@@ -6493,7 +6506,8 @@ static void print_binder_node_nilocked(struct seq_file *m,
|
||||
if (node->proc) {
|
||||
list_for_each_entry(w, &node->async_todo, entry)
|
||||
print_binder_work_ilocked(m, node->proc, " ",
|
||||
" pending async transaction", w);
|
||||
" pending async transaction",
|
||||
w, hash_ptrs);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6509,8 +6523,54 @@ static void print_binder_ref_olocked(struct seq_file *m,
|
||||
binder_node_unlock(ref->node);
|
||||
}
|
||||
|
||||
static void print_binder_proc(struct seq_file *m,
|
||||
struct binder_proc *proc, int print_all)
|
||||
/**
|
||||
* print_next_binder_node_ilocked() - Print binder_node from a locked list
|
||||
* @m: struct seq_file for output via seq_printf()
|
||||
* @proc: struct binder_proc we hold the inner_proc_lock to (if any)
|
||||
* @node: struct binder_node to print fields of
|
||||
* @prev_node: struct binder_node we hold a temporary reference to (if any)
|
||||
* @hash_ptrs: whether to hash @node's binder_uintptr_t fields
|
||||
*
|
||||
* Helper function to handle synchronization around printing a struct
|
||||
* binder_node while iterating through @proc->nodes or the dead nodes list.
|
||||
* Caller must hold either @proc->inner_lock (for live nodes) or
|
||||
* binder_dead_nodes_lock. This lock will be released during the body of this
|
||||
* function, but it will be reacquired before returning to the caller.
|
||||
*
|
||||
* Return: pointer to the struct binder_node we hold a tmpref on
|
||||
*/
|
||||
static struct binder_node *
|
||||
print_next_binder_node_ilocked(struct seq_file *m, struct binder_proc *proc,
|
||||
struct binder_node *node,
|
||||
struct binder_node *prev_node, bool hash_ptrs)
|
||||
{
|
||||
/*
|
||||
* Take a temporary reference on the node so that isn't freed while
|
||||
* we print it.
|
||||
*/
|
||||
binder_inc_node_tmpref_ilocked(node);
|
||||
/*
|
||||
* Live nodes need to drop the inner proc lock and dead nodes need to
|
||||
* drop the binder_dead_nodes_lock before trying to take the node lock.
|
||||
*/
|
||||
if (proc)
|
||||
binder_inner_proc_unlock(proc);
|
||||
else
|
||||
spin_unlock(&binder_dead_nodes_lock);
|
||||
if (prev_node)
|
||||
binder_put_node(prev_node);
|
||||
binder_node_inner_lock(node);
|
||||
print_binder_node_nilocked(m, node, hash_ptrs);
|
||||
binder_node_inner_unlock(node);
|
||||
if (proc)
|
||||
binder_inner_proc_lock(proc);
|
||||
else
|
||||
spin_lock(&binder_dead_nodes_lock);
|
||||
return node;
|
||||
}
|
||||
|
||||
static void print_binder_proc(struct seq_file *m, struct binder_proc *proc,
|
||||
bool print_all, bool hash_ptrs)
|
||||
{
|
||||
struct binder_work *w;
|
||||
struct rb_node *n;
|
||||
@@ -6523,31 +6583,19 @@ static void print_binder_proc(struct seq_file *m,
|
||||
header_pos = m->count;
|
||||
|
||||
binder_inner_proc_lock(proc);
|
||||
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
|
||||
for (n = rb_first(&proc->threads); n; n = rb_next(n))
|
||||
print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
|
||||
rb_node), print_all);
|
||||
rb_node), print_all, hash_ptrs);
|
||||
|
||||
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
|
||||
for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
|
||||
struct binder_node *node = rb_entry(n, struct binder_node,
|
||||
rb_node);
|
||||
if (!print_all && !node->has_async_transaction)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* take a temporary reference on the node so it
|
||||
* survives and isn't removed from the tree
|
||||
* while we print it.
|
||||
*/
|
||||
binder_inc_node_tmpref_ilocked(node);
|
||||
/* Need to drop inner lock to take node lock */
|
||||
binder_inner_proc_unlock(proc);
|
||||
if (last_node)
|
||||
binder_put_node(last_node);
|
||||
binder_node_inner_lock(node);
|
||||
print_binder_node_nilocked(m, node);
|
||||
binder_node_inner_unlock(node);
|
||||
last_node = node;
|
||||
binder_inner_proc_lock(proc);
|
||||
last_node = print_next_binder_node_ilocked(m, proc, node,
|
||||
last_node,
|
||||
hash_ptrs);
|
||||
}
|
||||
binder_inner_proc_unlock(proc);
|
||||
if (last_node)
|
||||
@@ -6555,19 +6603,18 @@ static void print_binder_proc(struct seq_file *m,
|
||||
|
||||
if (print_all) {
|
||||
binder_proc_lock(proc);
|
||||
for (n = rb_first(&proc->refs_by_desc);
|
||||
n != NULL;
|
||||
n = rb_next(n))
|
||||
for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n))
|
||||
print_binder_ref_olocked(m, rb_entry(n,
|
||||
struct binder_ref,
|
||||
rb_node_desc));
|
||||
struct binder_ref,
|
||||
rb_node_desc));
|
||||
binder_proc_unlock(proc);
|
||||
}
|
||||
binder_alloc_print_allocated(m, &proc->alloc);
|
||||
binder_inner_proc_lock(proc);
|
||||
list_for_each_entry(w, &proc->todo, entry)
|
||||
print_binder_work_ilocked(m, proc, " ",
|
||||
" pending transaction", w);
|
||||
" pending transaction", w,
|
||||
hash_ptrs);
|
||||
list_for_each_entry(w, &proc->delivered_death, entry) {
|
||||
seq_puts(m, " has delivered dead binder\n");
|
||||
break;
|
||||
@@ -6693,7 +6740,7 @@ static void print_binder_proc_stats(struct seq_file *m,
|
||||
count = 0;
|
||||
ready_threads = 0;
|
||||
binder_inner_proc_lock(proc);
|
||||
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
|
||||
for (n = rb_first(&proc->threads); n; n = rb_next(n))
|
||||
count++;
|
||||
|
||||
list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
|
||||
@@ -6707,7 +6754,7 @@ static void print_binder_proc_stats(struct seq_file *m,
|
||||
ready_threads,
|
||||
free_async_space);
|
||||
count = 0;
|
||||
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
|
||||
for (n = rb_first(&proc->nodes); n; n = rb_next(n))
|
||||
count++;
|
||||
binder_inner_proc_unlock(proc);
|
||||
seq_printf(m, " nodes: %d\n", count);
|
||||
@@ -6715,7 +6762,7 @@ static void print_binder_proc_stats(struct seq_file *m,
|
||||
strong = 0;
|
||||
weak = 0;
|
||||
binder_proc_lock(proc);
|
||||
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
|
||||
for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
|
||||
struct binder_ref *ref = rb_entry(n, struct binder_ref,
|
||||
rb_node_desc);
|
||||
count++;
|
||||
@@ -6742,7 +6789,7 @@ static void print_binder_proc_stats(struct seq_file *m,
|
||||
print_binder_stats(m, " ", &proc->stats);
|
||||
}
|
||||
|
||||
static int state_show(struct seq_file *m, void *unused)
|
||||
static void print_binder_state(struct seq_file *m, bool hash_ptrs)
|
||||
{
|
||||
struct binder_proc *proc;
|
||||
struct binder_node *node;
|
||||
@@ -6753,31 +6800,40 @@ static int state_show(struct seq_file *m, void *unused)
|
||||
spin_lock(&binder_dead_nodes_lock);
|
||||
if (!hlist_empty(&binder_dead_nodes))
|
||||
seq_puts(m, "dead nodes:\n");
|
||||
hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
|
||||
/*
|
||||
* take a temporary reference on the node so it
|
||||
* survives and isn't removed from the list
|
||||
* while we print it.
|
||||
*/
|
||||
node->tmp_refs++;
|
||||
spin_unlock(&binder_dead_nodes_lock);
|
||||
if (last_node)
|
||||
binder_put_node(last_node);
|
||||
binder_node_lock(node);
|
||||
print_binder_node_nilocked(m, node);
|
||||
binder_node_unlock(node);
|
||||
last_node = node;
|
||||
spin_lock(&binder_dead_nodes_lock);
|
||||
}
|
||||
hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
|
||||
last_node = print_next_binder_node_ilocked(m, NULL, node,
|
||||
last_node,
|
||||
hash_ptrs);
|
||||
spin_unlock(&binder_dead_nodes_lock);
|
||||
if (last_node)
|
||||
binder_put_node(last_node);
|
||||
|
||||
mutex_lock(&binder_procs_lock);
|
||||
hlist_for_each_entry(proc, &binder_procs, proc_node)
|
||||
print_binder_proc(m, proc, 1);
|
||||
print_binder_proc(m, proc, true, hash_ptrs);
|
||||
mutex_unlock(&binder_procs_lock);
|
||||
}
|
||||
|
||||
static void print_binder_transactions(struct seq_file *m, bool hash_ptrs)
|
||||
{
|
||||
struct binder_proc *proc;
|
||||
|
||||
seq_puts(m, "binder transactions:\n");
|
||||
mutex_lock(&binder_procs_lock);
|
||||
hlist_for_each_entry(proc, &binder_procs, proc_node)
|
||||
print_binder_proc(m, proc, false, hash_ptrs);
|
||||
mutex_unlock(&binder_procs_lock);
|
||||
}
|
||||
|
||||
static int state_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
print_binder_state(m, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int state_hashed_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
print_binder_state(m, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -6799,14 +6855,13 @@ static int stats_show(struct seq_file *m, void *unused)
|
||||
|
||||
static int transactions_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct binder_proc *proc;
|
||||
|
||||
seq_puts(m, "binder transactions:\n");
|
||||
mutex_lock(&binder_procs_lock);
|
||||
hlist_for_each_entry(proc, &binder_procs, proc_node)
|
||||
print_binder_proc(m, proc, 0);
|
||||
mutex_unlock(&binder_procs_lock);
|
||||
print_binder_transactions(m, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int transactions_hashed_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
print_binder_transactions(m, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -6819,7 +6874,7 @@ static int proc_show(struct seq_file *m, void *unused)
|
||||
hlist_for_each_entry(itr, &binder_procs, proc_node) {
|
||||
if (itr->pid == pid) {
|
||||
seq_puts(m, "binder proc state:\n");
|
||||
print_binder_proc(m, itr, 1);
|
||||
print_binder_proc(m, itr, true, false);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&binder_procs_lock);
|
||||
@@ -6886,8 +6941,10 @@ const struct file_operations binder_fops = {
|
||||
};
|
||||
|
||||
DEFINE_SHOW_ATTRIBUTE(state);
|
||||
DEFINE_SHOW_ATTRIBUTE(state_hashed);
|
||||
DEFINE_SHOW_ATTRIBUTE(stats);
|
||||
DEFINE_SHOW_ATTRIBUTE(transactions);
|
||||
DEFINE_SHOW_ATTRIBUTE(transactions_hashed);
|
||||
DEFINE_SHOW_ATTRIBUTE(transaction_log);
|
||||
|
||||
const struct binder_debugfs_entry binder_debugfs_entries[] = {
|
||||
@@ -6897,6 +6954,12 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
|
||||
.fops = &state_fops,
|
||||
.data = NULL,
|
||||
},
|
||||
{
|
||||
.name = "state_hashed",
|
||||
.mode = 0444,
|
||||
.fops = &state_hashed_fops,
|
||||
.data = NULL,
|
||||
},
|
||||
{
|
||||
.name = "stats",
|
||||
.mode = 0444,
|
||||
@@ -6909,6 +6972,12 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
|
||||
.fops = &transactions_fops,
|
||||
.data = NULL,
|
||||
},
|
||||
{
|
||||
.name = "transactions_hashed",
|
||||
.mode = 0444,
|
||||
.fops = &transactions_hashed_fops,
|
||||
.data = NULL,
|
||||
},
|
||||
{
|
||||
.name = "transaction_log",
|
||||
.mode = 0444,
|
||||
|
||||
@@ -505,15 +505,15 @@ static void dcn_bw_calc_rq_dlg_ttu(
|
||||
/*todo: soc->sr_enter_plus_exit_time??*/
|
||||
dlg_sys_param->t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep;
|
||||
|
||||
dml1_rq_dlg_get_rq_params(dml, rq_param, input.pipe.src);
|
||||
dml1_extract_rq_regs(dml, rq_regs, rq_param);
|
||||
dml1_rq_dlg_get_rq_params(dml, rq_param, input->pipe.src);
|
||||
dml1_extract_rq_regs(dml, rq_regs, *rq_param);
|
||||
dml1_rq_dlg_get_dlg_params(
|
||||
dml,
|
||||
dlg_regs,
|
||||
ttu_regs,
|
||||
rq_param->dlg,
|
||||
dlg_sys_param,
|
||||
input,
|
||||
*dlg_sys_param,
|
||||
*input,
|
||||
true,
|
||||
true,
|
||||
v->pte_enable == dcn_bw_yes,
|
||||
|
||||
@@ -171,7 +171,7 @@ static struct usb_ss_ep_comp_descriptor acc_superspeedplus_comp_desc = {
|
||||
|
||||
/* the following 2 values can be tweaked if necessary */
|
||||
.bMaxBurst = 6,
|
||||
.bmAttributes = 16,
|
||||
/* .bmAttributes = 0, */
|
||||
};
|
||||
|
||||
static struct usb_endpoint_descriptor acc_superspeed_in_desc = {
|
||||
@@ -196,7 +196,7 @@ static struct usb_ss_ep_comp_descriptor acc_superspeed_comp_desc = {
|
||||
|
||||
/* the following 2 values can be tweaked if necessary */
|
||||
.bMaxBurst = 6,
|
||||
.bmAttributes = 16,
|
||||
/* .bmAttributes = 0, */
|
||||
};
|
||||
|
||||
static struct usb_endpoint_descriptor acc_highspeed_in_desc = {
|
||||
|
||||
@@ -438,7 +438,8 @@ start_find_entry:
|
||||
|
||||
out:
|
||||
#if IS_ENABLED(CONFIG_UNICODE)
|
||||
if (IS_CASEFOLDED(dir) && !de && use_hash) {
|
||||
if (!sb_no_casefold_compat_fallback(dir->i_sb) &&
|
||||
IS_CASEFOLDED(dir) && !de && use_hash) {
|
||||
use_hash = false;
|
||||
goto start_find_entry;
|
||||
}
|
||||
|
||||
@@ -213,6 +213,13 @@ static ssize_t encoding_show(struct f2fs_attr *a,
|
||||
return sprintf(buf, "(none)");
|
||||
}
|
||||
|
||||
static ssize_t encoding_flags_show(struct f2fs_attr *a,
|
||||
struct f2fs_sb_info *sbi, char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "%x\n",
|
||||
le16_to_cpu(F2FS_RAW_SUPER(sbi)->s_encoding_flags));
|
||||
}
|
||||
|
||||
static ssize_t mounted_time_sec_show(struct f2fs_attr *a,
|
||||
struct f2fs_sb_info *sbi, char *buf)
|
||||
{
|
||||
@@ -756,6 +763,7 @@ F2FS_GENERAL_RO_ATTR(features);
|
||||
F2FS_GENERAL_RO_ATTR(current_reserved_blocks);
|
||||
F2FS_GENERAL_RO_ATTR(unusable);
|
||||
F2FS_GENERAL_RO_ATTR(encoding);
|
||||
F2FS_GENERAL_RO_ATTR(encoding_flags);
|
||||
F2FS_GENERAL_RO_ATTR(mounted_time_sec);
|
||||
F2FS_GENERAL_RO_ATTR(main_blkaddr);
|
||||
F2FS_GENERAL_RO_ATTR(pending_discard);
|
||||
@@ -802,6 +810,9 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, compr_saved_block, compr_saved_block);
|
||||
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, compr_new_inode, compr_new_inode);
|
||||
#endif
|
||||
F2FS_FEATURE_RO_ATTR(pin_file);
|
||||
#ifdef CONFIG_UNICODE
|
||||
F2FS_FEATURE_RO_ATTR(linear_lookup);
|
||||
#endif
|
||||
|
||||
/* For ATGC */
|
||||
F2FS_RW_ATTR(ATGC_INFO, atgc_management, atgc_candidate_ratio, candidate_ratio);
|
||||
@@ -870,6 +881,7 @@ static struct attribute *f2fs_attrs[] = {
|
||||
ATTR_LIST(reserved_blocks),
|
||||
ATTR_LIST(current_reserved_blocks),
|
||||
ATTR_LIST(encoding),
|
||||
ATTR_LIST(encoding_flags),
|
||||
ATTR_LIST(mounted_time_sec),
|
||||
#ifdef CONFIG_F2FS_STAT_FS
|
||||
ATTR_LIST(cp_foreground_calls),
|
||||
@@ -930,6 +942,9 @@ static struct attribute *f2fs_feat_attrs[] = {
|
||||
ATTR_LIST(compression),
|
||||
#endif
|
||||
ATTR_LIST(pin_file),
|
||||
#ifdef CONFIG_UNICODE
|
||||
ATTR_LIST(linear_lookup),
|
||||
#endif
|
||||
NULL,
|
||||
};
|
||||
ATTRIBUTE_GROUPS(f2fs_feat);
|
||||
|
||||
@@ -1409,11 +1409,19 @@ extern int send_sigurg(struct fown_struct *fown);
|
||||
#define SB_NOUSER BIT(31)
|
||||
|
||||
/* These flags relate to encoding and casefolding */
|
||||
#define SB_ENC_STRICT_MODE_FL (1 << 0)
|
||||
#define SB_ENC_STRICT_MODE_FL (1 << 0)
|
||||
#define SB_ENC_NO_COMPAT_FALLBACK_FL (1 << 1)
|
||||
|
||||
#define sb_has_strict_encoding(sb) \
|
||||
(sb->s_encoding_flags & SB_ENC_STRICT_MODE_FL)
|
||||
|
||||
#if IS_ENABLED(CONFIG_UNICODE)
|
||||
#define sb_no_casefold_compat_fallback(sb) \
|
||||
(sb->s_encoding_flags & SB_ENC_NO_COMPAT_FALLBACK_FL)
|
||||
#else
|
||||
#define sb_no_casefold_compat_fallback(sb) (1)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Umount options
|
||||
*/
|
||||
|
||||
Reference in New Issue
Block a user