* refs/heads/tmp-a9d0273:
Linux 4.9.84
crypto: s5p-sss - Fix kernel Oops in AES-ECB mode
KVM: nVMX: invvpid handling improvements
KVM: VMX: clean up declaration of VPID/EPT invalidation types
KVM: async_pf: Fix #DF due to inject "Page not Present" and "Page Ready" exceptions simultaneously
x86/microcode/AMD: Change load_microcode_amd()'s param to bool to fix preemptibility bug
usb: phy: msm add regulator dependency
arm64: fix warning about swapper_pg_dir overflow
idle: i7300: add PCI dependency
spi: bcm-qspi: shut up warning about cfi header inclusion
binfmt_elf: compat: avoid unused function warning
arm64: sunxi: always enable reset controller
drm/i915: hide unused intel_panel_set_backlight function
kasan: rework Kconfig settings
clk: meson: gxbb: fix build error without RESET_CONTROLLER
ISDN: eicon: reduce stack size of sig_ind function
tw5864: use dev_warn instead of WARN to shut up warning
em28xx: only use mt9v011 if camera support is enabled
go7007: add MEDIA_CAMERA_SUPPORT dependency
tc358743: fix register i2c_rd/wr functions
shmem: fix compilation warnings on unused functions
KVM: add X86_LOCAL_APIC dependency
Input: tca8418_keypad - hide gcc-4.9 -Wmaybe-uninitialized warning
drm/nouveau: hide gcc-4.9 -Wmaybe-uninitialized
rbd: silence bogus -Wmaybe-uninitialized warning
drm: exynos: mark pm functions as __maybe_unused
security/keys: BIG_KEY requires CONFIG_CRYPTO
cw1200: fix bogus maybe-uninitialized warning
reiserfs: avoid a -Wmaybe-uninitialized warning
ALSA: hda/ca0132 - fix possible NULL pointer use
arm64: Kconfig: select COMPAT_BINFMT_ELF only when BINFMT_ELF is set
scsi: advansys: fix uninitialized data access
x86/vm86: Fix unused variable warning if THP is disabled
x86/platform: Add PCI dependency for PUNIT_ATOM_DEBUG
dmaengine: zx: fix build warning
x86: add MULTIUSER dependency for KVM
thermal: fix INTEL_SOC_DTS_IOSF_CORE dependencies
x86/build: Silence the build with "make -s"
tools build: Add tools tree support for 'make -s'
x86/fpu/math-emu: Fix possible uninitialized variable use
arm64: define BUG() instruction without CONFIG_BUG
gpio: xgene: mark PM functions as __maybe_unused
x86/ras/inject: Make it depend on X86_LOCAL_APIC=y
scsi: advansys: fix build warning for PCI=n
video: fbdev: via: remove possibly unused variables
perf: xgene: Include module.h
PCI: Change pci_host_common_probe() visibility
usb: musb: fix compilation warning on unused function
platform/x86: intel_mid_thermal: Fix suspend handlers unused warning
gpio: intel-mid: Fix build warning when !CONFIG_PM
PCI: vmd: Fix suspend handlers defined-but-not-used warning
perf/x86: Shut up false-positive -Wmaybe-uninitialized warning
vmxnet3: prevent building with 64K pages
clk: sunxi-ng: fix build error without CONFIG_RESET_CONTROLLER
shmem: avoid maybe-uninitialized warning
drm/i915: fix intel_backlight_device_register declaration
crypto: talitos - fix Kernel Oops on hashing an empty file
powerpc/64s: Improve RFI L1-D cache flush fallback
powerpc/64s: Simple RFI macro conversions
powerpc/64s: Fix conversion of slb_miss_common to use RFI_TO_USER/KERNEL
hippi: Fix a Fix a possible sleep-in-atomic bug in rr_close
xen: XEN_ACPI_PROCESSOR is Dom0-only
platform/x86: dell-laptop: Fix keyboard max lighting for Dell Latitude E6410
x86/mm/kmmio: Fix mmiotrace for page unaligned addresses
mm/early_ioremap: Fix boot hang with earlyprintk=efi,keep
usb: dwc3: of-simple: fix missing clk_disable_unprepare
usb: dwc3: gadget: Wait longer for controller to end command processing
dmaengine: jz4740: disable/unprepare clk if probe fails
drm/armada: fix leak of crtc structure
xfrm: Fix stack-out-of-bounds with misconfigured transport mode policies.
spi: sun4i: disable clocks in the remove function
ASoC: rockchip: disable clock on error
clk: fix a panic error caused by accessing NULL pointer
dmaengine: at_hdmac: fix potential NULL pointer dereference in atc_prep_dma_interleaved
dmaengine: ioat: Fix error handling path
gianfar: Disable EEE autoneg by default
509: fix printing uninitialized stack memory when OID is empty
net: ethernet: arc: fix error handling in emac_rockchip_probe
brcmfmac: Avoid build error with make W=1
btrfs: Fix possible off-by-one in btrfs_search_path_in_tree
net_sched: red: Avoid illegal values
net_sched: red: Avoid devision by zero
gianfar: fix a flooded alignment reports because of padding issue.
ARM: dts: Fix elm interrupt compiler warning
s390/dasd: prevent prefix I/O error
powerpc/perf: Fix oops when grouping different pmu events
m68k: add missing SOFTIRQENTRY_TEXT linker section
ipvlan: Add the skb->mark as flow4's member to lookup route
scripts/kernel-doc: Don't fail with status != 0 if error encountered with -none
sctp: only update outstanding_bytes for transmitted queue when doing prsctp_prune
RDMA/cma: Make sure that PSN is not over max allowed
i40iw: Correct ARP index mask
pinctrl: sunxi: Fix A64 UART mux value
pinctrl: sunxi: Fix A80 interrupt pin bank
media: s5k6aa: describe some function parameters
perf bench numa: Fixup discontiguous/sparse numa nodes
perf top: Fix window dimensions change handling
ARM: dts: am437x-cm-t43: Correct the dmas property of spi0
ARM: dts: am4372: Correct the interrupts_properties of McASP
ARM: dts: logicpd-somlv: Fix wl127x pinmux
ARM: dts: logicpd-som-lv: Fix gpmc addresses for NAND and enet
ARM: dts: Fix omap4 hang with GPS connected to USB by using wakeupgen
ARM: AM33xx: PRM: Remove am33xx_pwrdm_read_prev_pwrst function
ARM: OMAP2+: Fix SRAM virt to phys translation for save_secure_ram_context
usb: build drivers/usb/common/ when USB_SUPPORT is set
usbip: keep usbip_device sockfd state in sync with tcp_socket
staging: iio: ad5933: switch buffer mode to software
staging: iio: adc: ad7192: fix external frequency setting
binder: check for binder_thread allocation failure in binder_poll()
staging: android: ashmem: Fix a race condition in pin ioctls
dn_getsockoptdecnet: move nf_{get/set}sockopt outside sock lock
arm64: dts: add #cooling-cells to CPU nodes
ARM: 8743/1: bL_switcher: add MODULE_LICENSE tag
video: fbdev/mmp: add MODULE_LICENSE
ASoC: ux500: add MODULE_LICENSE tag
crypto: hash - prevent using keyed hashes without setting key
crypto: hash - annotate algorithms taking optional key
net: avoid skb_warn_bad_offload on IS_ERR
rds: tcp: atomically purge entries from rds_tcp_conn_list during netns delete
netfilter: xt_RATEEST: acquire xt_rateest_mutex for hash insert
netfilter: xt_cgroup: initialize info->priv in cgroup_mt_check_v1()
netfilter: on sockopt() acquire sock lock only in the required scope
netfilter: ipt_CLUSTERIP: fix out-of-bounds accesses in clusterip_tg_check()
netfilter: x_tables: avoid out-of-bounds reads in xt_request_find_{match|target}
netfilter: x_tables: fix int overflow in xt_alloc_table_info()
kcov: detect double association with a single task
KVM: x86: fix escape of guest dr6 to the host
blk_rq_map_user_iov: fix error override
staging: android: ion: Switch from WARN to pr_warn
staging: android: ion: Add __GFP_NOWARN for system contig heap
crypto: x86/twofish-3way - Fix %rbp usage
selinux: skip bounded transition processing if the policy isn't loaded
selinux: ensure the context is NUL terminated in security_context_to_sid_core()
Provide a function to create a NUL-terminated string from unterminated data
ptr_ring: fail early if queue occupies more than KMALLOC_MAX_SIZE
drm: Require __GFP_NOFAIL for the legacy drm_modeset_lock_all
blktrace: fix unlocked registration of tracepoints
sctp: set frag_point in sctp_setsockopt_maxseg correctly
xfrm: check id proto in validate_tmpl()
xfrm: Fix stack-out-of-bounds read on socket policy lookup.
mm,vmscan: Make unregister_shrinker() no-op if register_shrinker() failed.
xfrm: skip policies marked as dead while rehashing
cfg80211: check dev_set_name() return value
kcm: Only allow TCP sockets to be attached to a KCM mux
kcm: Check if sk_user_data already set in kcm_attach
vhost: use mutex_lock_nested() in vhost_dev_lock_vqs()
ANDROID: sdcardfs: Hold i_mutex for i_size_write
UPSTREAM: ANDROID: binder: synchronize_rcu() when using POLLFREE.
BACKPORT, FROMGIT: crypto: speck - add test vectors for Speck64-XTS
BACKPORT, FROMGIT: crypto: speck - add test vectors for Speck128-XTS
BACKPORT, FROMGIT: crypto: arm/speck - add NEON-accelerated implementation of Speck-XTS
FROMGIT: crypto: speck - export common helpers
BACKPORT, FROMGIT: crypto: speck - add support for the Speck block cipher
f2fs: updates on v4.16-rc1
Linux 4.9.83
media: r820t: fix r820t_write_reg for KASAN
ARM: dts: Delete bogus reference to the charlcd
arm: dts: mt2701: Add reset-cells
ARM: dts: s5pv210: add interrupt-parent for ohci
arm64: dts: msm8916: Add missing #phy-cells
ARM: pxa/tosa-bt: add MODULE_LICENSE tag
ARM: dts: exynos: fix RTC interrupt for exynos5410
vfs: don't do RCU lookup of empty pathnames
x86: fix build warnign with 32-bit PAE
x86/cpu: Change type of x86_cache_size variable to unsigned int
x86/spectre: Fix an error message
x86/cpu: Rename cpu_data.x86_mask to cpu_data.x86_stepping
selftests/x86/mpx: Fix incorrect bounds with old _sigfault
x86/speculation: Add <asm/msr-index.h> dependency
nospec: Move array_index_nospec() parameter checking into separate macro
x86/speculation: Fix up array_index_nospec_mask() asm constraint
selftests/x86: Do not rely on "int $0x80" in single_step_syscall.c
selftests/x86: Do not rely on "int $0x80" in test_mremap_vdso.c
selftests/x86/pkeys: Remove unused functions
x86/speculation: Clean up various Spectre related details
X86/nVMX: Properly set spec_ctrl and pred_cmd before merging MSRs
KVM/x86: Reduce retpoline performance impact in slot_handle_level_range(), by always inlining iterator helper methods
x86/speculation: Correct Speculation Control microcode blacklist again
x86/speculation: Update Speculation Control microcode blacklist
compiler-gcc.h: Introduce __optimize function attribute
x86/entry/64/compat: Clear registers for compat syscalls, to reduce speculation attack surface
arm: spear13xx: Fix spics gpio controller's warning
arm: spear13xx: Fix dmas cells
arm: spear600: Add missing interrupt-parent of rtc
ARM: dts: nomadik: add interrupt-parent for clcd
ARM: dts: STi: Add gpio polarity for "hdmi,hpd-gpio" property
ARM: lpc3250: fix uda1380 gpio numbers
arm64: dts: msm8916: Correct ipc references for smsm
s390: fix handling of -1 in set{,fs}[gu]id16 syscalls
ocfs2: try a blocking lock before return AOP_TRUNCATED_PAGE
PM / devfreq: Propagate error from devfreq_add_device()
cpufreq: powernv: Dont assume distinct pstate values for nominal and pmin
RDMA/rxe: Fix a race condition related to the QP error state
kselftest: fix OOM in memory compaction test
IB/mlx4: Fix incorrectly releasing steerable UD QPs when have only ETH ports
IB/qib: Fix comparison error with qperf compare/swap test
powerpc: fix build errors in stable tree
dm: correctly handle chained bios in dec_pending()
usb: Move USB_UHCI_BIG_ENDIAN_* out of USB_SUPPORT
mvpp2: fix multicast address filter
ALSA: seq: Fix racy pool initializations
ALSA: usb-audio: add implicit fb quirk for Behringer UFX1204
ALSA: hda/realtek: PCI quirk for Fujitsu U7x7
ALSA: hda/realtek - Enable Thinkpad Dock device for ALC298 platform
ALSA: usb-audio: Fix UAC2 get_ctl request with a RANGE attribute
ALSA: hda - Fix headset mic detection problem for two Dell machines
mtd: nand: vf610: set correct ooblayout
9p/trans_virtio: discard zero-length reply
Btrfs: fix unexpected -EEXIST when creating new inode
Btrfs: fix btrfs_evict_inode to handle abnormal inodes correctly
Btrfs: fix extent state leak from tree log
Btrfs: fix crash due to not cleaning up tree log block's dirty bits
Btrfs: fix deadlock in run_delalloc_nocow
target/iscsi: avoid NULL dereference in CHAP auth error path
rtlwifi: rtl8821ae: Fix connection lost problem correctly
console/dummy: leave .con_font_get set to NULL
video: fbdev: atmel_lcdfb: fix display-timings lookup
PCI: keystone: Fix interrupt-controller-node lookup
MIPS: Fix typo BIG_ENDIAN to CPU_BIG_ENDIAN
mm: Fix memory size alignment in devm_memremap_pages_release()
mm: hide a #warning for COMPILE_TEST
ext4: correct documentation for grpid mount option
ext4: save error to disk in __ext4_grp_locked_error()
ext4: fix a race in the ext4 shutdown path
jbd2: fix sphinx kernel-doc build warnings
mbcache: initialize entry->e_referenced in mb_cache_entry_create()
rtc-opal: Fix handling of firmware error codes, prevent busy loops
drm/radeon: adjust tested variable
drm/radeon: Add dpm quirk for Jet PRO (v2)
scsi: smartpqi: allow static build ("built-in")
BACKPORT: tee: shm: Potential NULL dereference calling tee_shm_register()
BACKPORT: tee: shm: don't put_page on null shm->pages
BACKPORT: tee: shm: make function __tee_shm_alloc static
BACKPORT: tee: optee: check type of registered shared memory
BACKPORT: tee: add start argument to shm_register callback
BACKPORT: tee: optee: fix header dependencies
BACKPORT: tee: shm: inline tee_shm_get_id()
BACKPORT: tee: use reference counting for tee_context
BACKPORT: tee: optee: enable dynamic SHM support
BACKPORT: tee: optee: add optee-specific shared pool implementation
BACKPORT: tee: optee: store OP-TEE capabilities in private data
BACKPORT: tee: optee: add registered buffers handling into RPC calls
BACKPORT: tee: optee: add registered shared parameters handling
BACKPORT: tee: optee: add shared buffer registration functions
BACKPORT: tee: optee: add page list manipulation functions
BACKPORT: tee: optee: Update protocol definitions
BACKPORT: tee: shm: add page accessor functions
BACKPORT: tee: shm: add accessors for buffer size and page offset
BACKPORT: tee: add register user memory
BACKPORT: tee: flexible shared memory pool creation
BACKPORT: optee: support asynchronous supplicant requests
BACKPORT: tee: add TEE_IOCTL_PARAM_ATTR_META
BACKPORT: tee: add tee_param_is_memref() for driver use
UPSTREAM: tcp: fix access to sk->sk_state in tcp_poll()
BACKPORT: tcp: fix potential double free issue for fastopen_req
BACKPORT: xfrm: Fix return value check of copy_sec_ctx.
time: Fix ktime_get_raw() incorrect base accumulation
FROMLIST: coresight: ETM: Add support for ARM Cortex-A73
FROMLIST: coresight: tmc: implementing TMC-ETR AUX space API
UPSTREAM: coresight: etm_perf: Fix using uninitialised work
UPSTREAM: coresight: fix kernel panic caused by invalid CPU
UPSTREAM: coresight: Fix disabling of CoreSight TPIU
UPSTREAM: coresight: perf: Add a missing call to etm_free_aux
UPSTREAM: coresight: tmc: Remove duplicate memset
UPSTREAM: coresight: tmc: Get rid of mode parameter for helper routines
UPSTREAM: coresight: tmc: Cleanup operation mode handling
UPSTREAM: coresight: reset "enable_sink" flag when need be
sched/fair: prevent possible infinite loop in sched_group_energy
ANDROID: qtaguid: Fix the UAF probelm with tag_ref_tree
UPSTREAM: ANDROID: binder: remove waitqueue when thread exits.
ANDROID: sdcardfs: Protect set_top
ANDROID: fsnotify: Notify lower fs of open
Revert "ANDROID: sdcardfs: notify lower file of opens"
ANDROID: sdcardfs: Use lower getattr times/size
ANDROID: sched: EAS: check energy_aware() before calling select_energy_cpu_brute() in up-migrate path
UPSTREAM: eventpoll.h: add missing epoll event masks
BACKPORT: thermal/drivers/hisi: Add support for hi3660 SoC
BACKPORT: thermal/drivers/hisi: Prepare to add support for other hisi platforms
BACKPORT: thermal/drivers/hisi: Add platform prefix to function name
BACKPORT: thermal/drivers/hisi: Put platform code together
BACKPORT: thermal/drivers/hisi: Use round up step value
BACKPORT: thermal/drivers/hisi: Move the clk setup in the corresponding functions
BACKPORT: thermal/drivers/hisi: Remove mutex_lock in the code
BACKPORT: thermal/drivers/hisi: Remove thermal data back pointer
BACKPORT: thermal/drivers/hisi: Convert long to int
BACKPORT: thermal/drivers/hisi: Rename and remove unused field
BACKPORT: thermal/drivers/hisi: Remove costly sensor inspection
BACKPORT: thermal/drivers/hisi: Fix configuration register setting
BACKPORT: thermal/drivers/hisi: Encapsulate register writes into helpers
BACKPORT: thermal/drivers/hisi: Remove pointless lock
BACKPORT: thermal/drivers/hisi: Remove the multiple sensors support
BACKPORT: thermal: hisilicon: constify thermal_zone_of_device_ops structures
ANDROID: xattr: Pass EOPNOTSUPP to permission2
ANDROID: sdcardfs: Move default_normal to superblock
UPSTREAM: tcp: fix a request socket leak
UPSTREAM: tcp: fix possible deadlock in TCP stack vs BPF filter
UPSTREAM: tcp: Add a tcp_filter hook before handle ack packet
FROMLIST: arm64: kpti: Fix the interaction between ASID switching and software PAN
FROMLIST: arm64: Move post_ttbr_update_workaround to C code
fscrypt: updates on 4.15-rc4
ANDROID: uid_sys_stats: fix the comment
BACKPORT: optee: fix invalid of_node_put() in optee_driver_init()
BACKPORT: tee: optee: sync with new naming of interrupts
BACKPORT: tee: indicate privileged dev in gen_caps
BACKPORT: tee: optee: interruptible RPC sleep
BACKPORT: tee: optee: add const to tee_driver_ops and tee_desc structures
BACKPORT: tee: tee_shm: Constify dma_buf_ops structures.
BACKPORT: tee: add forward declaration for struct device
BACKPORT: tee: optee: fix uninitialized symbol 'parg'
BACKPORT: tee.txt: standardize document format
BACKPORT: tee: add ARM_SMCCC dependency
clocksource: arch_timer: make virtual counter access configurable
arm64: issue isb when trapping CNTVCT_EL0 access
BACKPORT: arm64: Add CNTFRQ_EL0 trap handler
BACKPORT: arm64: Add CNTVCT_EL0 trap handler
ANDROID: sdcardfs: Fix missing break on default_normal
ANDROID: arm64: kaslr: fixup Falkor workaround for 4.9
ANDROID: usb: f_fs: Prevent gadget unbind if it is already unbound
arm64: Kconfig: Reword UNMAP_KERNEL_AT_EL0 kconfig entry
arm64: use RET instruction for exiting the trampoline
UPSTREAM: arm64: kaslr: Put kernel vectors address in separate data page
UPSTREAM: arm64: mm: Introduce TTBR_ASID_MASK for getting at the ASID in the TTBR
UPSTREAM: arm64: Kconfig: Add CONFIG_UNMAP_KERNEL_AT_EL0
UPSTREAM: arm64: entry: Add fake CPU feature for unmapping the kernel at EL0
UPSTREAM: arm64: tls: Avoid unconditional zeroing of tpidrro_el0 for native tasks
UPSTREAM: arm64: erratum: Work around Falkor erratum #E1003 in trampoline code
UPSTREAM: arm64: entry: Hook up entry trampoline to exception vectors
UPSTREAM: arm64: entry: Explicitly pass exception level to kernel_ventry macro
UPSTREAM: arm64: mm: Map entry trampoline into trampoline and kernel page tables
UPSTREAM: arm64: entry: Add exception trampoline page for exceptions from EL0
UPSTREAM: arm64: mm: Invalidate both kernel and user ASIDs when performing TLBI
UPSTREAM: arm64: mm: Add arm64_kernel_unmapped_at_el0 helper
UPSTREAM: arm64: mm: Allocate ASIDs in pairs
UPSTREAM: arm64: mm: Fix and re-enable ARM64_SW_TTBR0_PAN
UPSTREAM: arm64: mm: Rename post_ttbr0_update_workaround
UPSTREAM: arm64: mm: Move ASID from TTBR0 to TTBR1
UPSTREAM: arm64: mm: Temporarily disable ARM64_SW_TTBR0_PAN
UPSTREAM: arm64: mm: Use non-global mappings for kernel space
UPSTREAM: arm64: factor out entry stack manipulation
ANDROID: sdcardfs: Add default_normal option
ANDROID: sdcardfs: notify lower file of opens
blkdev: Refactoring block io latency histogram codes
UPSTREAM: netfilter: conntrack: use power efficient workqueue
ANDROID: binder: Remove obsolete proc waitqueue.
UPSTREAM: arm64: setup: introduce kaslr_offset()
UPSTREAM: kcov: fix comparison callback signature
UPSTREAM: kcov: support comparison operands collection
UPSTREAM: kcov: remove pointless current != NULL check
UPSTREAM: kcov: support compat processes
UPSTREAM: kcov: simplify interrupt check
UPSTREAM: kcov: make kcov work properly with KASLR enabled
UPSTREAM: kcov: add more missing includes
BACKPORT: irq: Make the irqentry text section unconditional
UPSTREAM: kasan: make get_wild_bug_type() static
UPSTREAM: kasan: separate report parts by empty lines
UPSTREAM: kasan: improve double-free report format
UPSTREAM: kasan: print page description after stacks
UPSTREAM: kasan: improve slab object description
UPSTREAM: kasan: change report header
UPSTREAM: kasan: simplify address description logic
UPSTREAM: kasan: change allocation and freeing stack traces headers
UPSTREAM: kasan: unify report headers
UPSTREAM: kasan: introduce helper functions for determining bug type
BACKPORT: kasan: report only the first error by default
UPSTREAM: kasan: fix races in quarantine_remove_cache()
UPSTREAM: kasan: resched in quarantine_remove_cache()
UPSTREAM: kasan, sched/headers: Uninline kasan_enable/disable_current()
UPSTREAM: kasan: drain quarantine of memcg slab objects
UPSTREAM: kasan: eliminate long stalls during quarantine reduction
UPSTREAM: kasan: support panic_on_warn
ANDROID: dma-buf/sw_sync: Rename active_list to link
ANDROID: initramfs: call free_initrd() when skipping init
BACKPORT: Documentation: tee subsystem and op-tee driver
BACKPORT: tee: add OP-TEE driver
BACKPORT: tee: generic TEE subsystem
BACKPORT: dt/bindings: add bindings for optee
BACKPORT: schedutil: Reset cached freq if it is not in sync with next_freq
sched: EAS/WALT: Don't take into account of running task's util
sched: EAS/WALT: take into account of waking task's load
sched: EAS: upmigrate misfit current task
sched: avoid pushing tasks to an offline CPU
sched: Extend active balance to accept 'push_task' argument
sched: walt: Correct WALT window size initialization
sched: WALT: account cumulative window demand
sched: EAS/WALT: finish accounting prior to task_tick
sched/fair: prevent meaningless active migration
sched: walt: Leverage existing helper APIs to apply invariance
UPSTREAM: net: xfrm: allow clearing socket xfrm policies.
UPSTREAM: time: Clean up CLOCK_MONOTONIC_RAW time handling
UPSTREAM: arm64: vdso: fix clock_getres for 4GiB-aligned res
f2fs: updates on 4.15-rc1
UPSTREAM: android: binder: fix type mismatch warning
BACKPORT: arm64: Use __pa_symbol for empty_zero_page
UPSTREAM: arm64: Use __pa_symbol for kernel symbols
UPSTREAM: mm: Introduce lm_alias
FROMLIST: binder: fix proc->files use-after-free
BACKPORT: xfrm: Clear sk_dst_cache when applying per-socket policy.
sched: WALT: fix potential overflow
sched: Update task->on_rq when tasks are moving between runqueues
sched: WALT: fix window mis-alignment
sched: EAS: kill incorrect nohz idle cpu kick
sched: EAS: fix incorrect energy delta calculation due to rounding error
sched: EAS/WALT: use cr_avg instead of prev_runnable_sum
sched: WALT: fix broken cumulative runnable average accounting
sched: deadline: WALT: account cumulative runnable avg
ANDROID: binder: clarify deferred thread work.
BACKPORT: net/tcp-fastopen: Add new API support
UPSTREAM: net: Remove __sk_dst_reset() in tcp_v6_connect()
UPSTREAM: net/tcp-fastopen: refactor cookie check logic
sched: compute task utilisation with WALT consistently
FROMLIST: arm64: Avoid aligning normal memory pointers in __memcpy_{to,from}io
UPSTREAM: security: bpf: replace include of linux/bpf.h with forward declarations
UPSTREAM: selinux: bpf: Add addtional check for bpf object file receive
UPSTREAM: selinux: bpf: Add selinux check for eBPF syscall operations
BACKPORT: security: bpf: Add LSM hooks for bpf object related syscall
BACKPORT: bpf: Add file mode configuration into bpf maps
cpufreq: Drop schedfreq governor
ANDROID: Revert "arm64: move ELF_ET_DYN_BASE to 4GB / 4MB"
ANDROID: Revert "arm: move ELF_ET_DYN_BASE to 4MB"
sched: EAS: Fix the condition to distinguish energy before/after
sched: EAS: update trg_cpu to backup_cpu if no energy saving for target_cpu
sched/fair: consider task utilization in group_max_util()
sched/fair: consider task utilization in group_norm_util()
sched/fair: enforce EAS mode
sched/fair: ignore backup CPU when not valid
sched/fair: trace energy_diff for non boosted tasks
UPSTREAM: sched/fair: Sync task util before slow-path wakeup
UPSTREAM: sched/core: Add missing update_rq_clock() call in set_user_nice()
UPSTREAM: sched/core: Add missing update_rq_clock() call for task_hot()
UPSTREAM: sched/core: Add missing update_rq_clock() in detach_task_cfs_rq()
UPSTREAM: sched/core: Add missing update_rq_clock() in post_init_entity_util_avg()
UPSTREAM: sched/fair: Fix task group initialization
cpufreq/sched: Consider max cpu capacity when choosing frequencies
cpufreq/sched: Use cpu max freq rather than policy max
sched/fair: remove erroneous RCU_LOCKDEP_WARN from start_cpu()
FROMLIST: ALSA: usx2y: Suppress kernel warning at page allocation failures
FROMLIST: kbuild: clang: fix build failures with sparse check
Revert "Revert "BACKPORT: efi/libstub/arm64: Set -fpie when building the EFI stub""
BACKPORT: efi/libstub: Unify command line param parsing
ANDROID: sched/walt: Fix divide by zero error in cpufreq notifier
ANDROID: binder: show high watermark of alloc->pages.
ANDROID: binder: Add thread->process_todo flag.
ANDROID: sched/fair: Select correct capacity state for energy_diff
ANDROID: cpufreq-dt: Set sane defaults for schedutil rate limits
BACKPORT: cpufreq: schedutil: Use policy-dependent transition delays
Revert "BACKPORT: efi/libstub/arm64: Set -fpie when building the EFI stub"
FROMLIST: android: binder: Fix null ptr dereference in debug msg
FROMLIST: android: binder: Change binder_shrinker to static
UPSTREAM: arm64: compat: Remove leftover variable declaration
ANDROID: HACK: arm64: use -mno-implicit-float instead of -mgeneral-regs-only
ANDROID: Kbuild, LLVMLinux: allow overriding clang target triple
CHROMIUM: arm64: Disable asm-operand-width warning for clang
CHROMIUM: kbuild: clang: Disable the 'duplicate-decl-specifier' warning
BACKPORT: x86/asm: Fix inline asm call constraints for Clang
BACKPORT: efi/libstub/arm64: Set -fpie when building the EFI stub
UPSTREAM: efi/libstub/arm64: Force 'hidden' visibility for section markers
UPSTREAM: efi/libstub/arm64: Use hidden attribute for struct screen_info reference
UPSTREAM: x86/build: Use cc-option to validate stack alignment parameter
UPSTREAM: x86/build: Fix stack alignment for CLang
UPSTREAM: compiler, clang: always inline when CONFIG_OPTIMIZE_INLINING is disabled
UPSTREAM: x86/boot: #undef memcpy() et al in string.c
UPSTREAM: llist: clang: introduce member_address_is_nonnull()
UPSTREAM: crypto: arm64/sha - avoid non-standard inline asm tricks
UPSTREAM: kbuild: clang: Disable 'address-of-packed-member' warning
UPSTREAM: x86/build: Specify stack alignment for clang
UPSTREAM: x86/build: Use __cc-option for boot code compiler options
UPSTREAM: kbuild: Add __cc-option macro
UPSTREAM: x86/mm/kaslr: Use the _ASM_MUL macro for multiplication to work around Clang incompatibility
UPSTREAM: crypto, x86: aesni - fix token pasting for clang
UPSTREAM: x86/kbuild: Use cc-option to enable -falign-{jumps/loops}
UPSTREAM: compiler, clang: properly override 'inline' for clang
UPSTREAM: compiler, clang: suppress warning for unused static inline functions
UPSTREAM: modules: mark __inittest/__exittest as __maybe_unused
UPSTREAM: kbuild: Add support to generate LLVM assembly files
UPSTREAM: kbuild: use -Oz instead of -Os when using clang
UPSTREAM: kbuild, LLVMLinux: Add -Werror to cc-option to support clang
UPSTREAM: kbuild: drop -Wno-unknown-warning-option from clang options
UPSTREAM: kbuild: fix asm-offset generation to work with clang
UPSTREAM: kbuild: consolidate redundant sed script ASM offset generation
UPSTREAM: kbuild: Consolidate header generation from ASM offset information
UPSTREAM: kbuild: clang: add -no-integrated-as to KBUILD_[AC]FLAGS
UPSTREAM: kbuild: Add better clang cross build support
FROMLIST: f2fs: expose some sectors to user in inline data or dentry case
UPSTREAM: sched/fair: Fix usage of find_idlest_group() when the local group is idlest
UPSTREAM: sched/fair: Fix usage of find_idlest_group() when no groups are allowed
UPSTREAM: sched/fair: Fix find_idlest_group() when local group is not allowed
UPSTREAM: sched/fair: Remove unnecessary comparison with -1
UPSTREAM: sched/fair: Move select_task_rq_fair() slow-path into its own function
UPSTREAM: sched/fair: Force balancing on NOHZ balance if local group has capacity
UPSTREAM: f2fs: fix potential panic during fstrim
f2fs: catch up to v4.14-rc1
UPSTREAM: sched: use load_avg for selecting idlest group
UPSTREAM: sched: fix find_idlest_group for fork
ANDROID: binder: fix node sched policy calculation
ANDROID: binder: init desired_prio.sched_policy before use it
BACKPORT: net: xfrm: support setting an output mark.
FROMLIST: tracing: Add support for preempt and irq enable/disable events
FROMLIST: tracing: Prepare to add preempt and irq trace events
Conflicts:
arch/arm64/Kconfig
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/cpucaps.h
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/mmu.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/io.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/vdso.c
arch/arm64/mm/context.c
arch/arm64/mm/mmu.c
drivers/Kconfig
drivers/Makefile
drivers/cpufreq/Kconfig
drivers/hwtracing/coresight/coresight-etm4x.c
drivers/hwtracing/coresight/coresight-priv.h
drivers/hwtracing/coresight/coresight-tmc-etr.c
drivers/hwtracing/coresight/coresight.c
drivers/scsi/ufs/ufshcd.h
drivers/staging/android/ion/ion-ioctl.c
drivers/staging/android/ion/ion_system_heap.c
drivers/usb/dwc3/gadget.c
include/linux/sched.h
include/trace/events/sched.h
kernel/kcov.c
kernel/sched/core.c
kernel/sched/cpufreq_sched.c
kernel/sched/cpufreq_schedutil.c
kernel/sched/fair.c
kernel/sched/sched.h
kernel/sched/walt.c
kernel/sched/walt.h
mm/kasan/report.c
security/security.c
security/selinux/hooks.c
Change-Id: I0ec8cbca6cb6384e22fbbe8def8a9d228229dc48
Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
1814 lines
51 KiB
C
1814 lines
51 KiB
C
#ifndef _LINUX_BLKDEV_H
|
|
#define _LINUX_BLKDEV_H
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#ifdef CONFIG_BLOCK
|
|
|
|
#include <linux/major.h>
|
|
#include <linux/genhd.h>
|
|
#include <linux/list.h>
|
|
#include <linux/llist.h>
|
|
#include <linux/timer.h>
|
|
#include <linux/workqueue.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/backing-dev-defs.h>
|
|
#include <linux/wait.h>
|
|
#include <linux/mempool.h>
|
|
#include <linux/pfn.h>
|
|
#include <linux/bio.h>
|
|
#include <linux/stringify.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/bsg.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/rcupdate.h>
|
|
#include <linux/percpu-refcount.h>
|
|
#include <linux/scatterlist.h>
|
|
|
|
struct module;
|
|
struct scsi_ioctl_command;
|
|
|
|
struct request_queue;
|
|
struct elevator_queue;
|
|
struct blk_trace;
|
|
struct request;
|
|
struct sg_io_hdr;
|
|
struct bsg_job;
|
|
struct blkcg_gq;
|
|
struct blk_flush_queue;
|
|
struct pr_ops;
|
|
|
|
#define BLKDEV_MIN_RQ 4
|
|
#define BLKDEV_MAX_RQ 128 /* Default maximum */
|
|
|
|
/*
|
|
* Maximum number of blkcg policies allowed to be registered concurrently.
|
|
* Defined here to simplify include dependency.
|
|
*/
|
|
#define BLKCG_MAX_POLS 2
|
|
|
|
typedef void (rq_end_io_fn)(struct request *, int);
|
|
|
|
#define BLK_RL_SYNCFULL (1U << 0)
|
|
#define BLK_RL_ASYNCFULL (1U << 1)
|
|
|
|
struct request_list {
|
|
struct request_queue *q; /* the queue this rl belongs to */
|
|
#ifdef CONFIG_BLK_CGROUP
|
|
struct blkcg_gq *blkg; /* blkg this request pool belongs to */
|
|
#endif
|
|
/*
|
|
* count[], starved[], and wait[] are indexed by
|
|
* BLK_RW_SYNC/BLK_RW_ASYNC
|
|
*/
|
|
int count[2];
|
|
int starved[2];
|
|
mempool_t *rq_pool;
|
|
wait_queue_head_t wait[2];
|
|
unsigned int flags;
|
|
};
|
|
|
|
/*
|
|
* request command types
|
|
*/
|
|
enum rq_cmd_type_bits {
|
|
REQ_TYPE_FS = 1, /* fs request */
|
|
REQ_TYPE_BLOCK_PC, /* scsi command */
|
|
REQ_TYPE_DRV_PRIV, /* driver defined types from here */
|
|
};
|
|
|
|
#define BLK_MAX_CDB 16
|
|
|
|
/*
|
|
* Try to put the fields that are referenced together in the same cacheline.
|
|
*
|
|
* If you modify this structure, make sure to update blk_rq_init() and
|
|
* especially blk_mq_rq_ctx_init() to take care of the added fields.
|
|
*/
|
|
struct request {
|
|
struct list_head queuelist;
|
|
union {
|
|
struct call_single_data csd;
|
|
u64 fifo_time;
|
|
};
|
|
|
|
struct request_queue *q;
|
|
struct blk_mq_ctx *mq_ctx;
|
|
|
|
int cpu;
|
|
unsigned cmd_type;
|
|
u64 cmd_flags;
|
|
unsigned long atomic_flags;
|
|
|
|
/* the following two fields are internal, NEVER access directly */
|
|
unsigned int __data_len; /* total data len */
|
|
sector_t __sector; /* sector cursor */
|
|
|
|
struct bio *bio;
|
|
struct bio *biotail;
|
|
|
|
/*
|
|
* The hash is used inside the scheduler, and killed once the
|
|
* request reaches the dispatch list. The ipi_list is only used
|
|
* to queue the request for softirq completion, which is long
|
|
* after the request has been unhashed (and even removed from
|
|
* the dispatch list).
|
|
*/
|
|
union {
|
|
struct hlist_node hash; /* merge hash */
|
|
struct list_head ipi_list;
|
|
};
|
|
|
|
/*
|
|
* The rb_node is only used inside the io scheduler, requests
|
|
* are pruned when moved to the dispatch queue. So let the
|
|
* completion_data share space with the rb_node.
|
|
*/
|
|
union {
|
|
struct rb_node rb_node; /* sort/lookup */
|
|
void *completion_data;
|
|
};
|
|
|
|
/*
|
|
* Three pointers are available for the IO schedulers, if they need
|
|
* more they have to dynamically allocate it. Flush requests are
|
|
* never put on the IO scheduler. So let the flush fields share
|
|
* space with the elevator data.
|
|
*/
|
|
union {
|
|
struct {
|
|
struct io_cq *icq;
|
|
void *priv[2];
|
|
} elv;
|
|
|
|
struct {
|
|
unsigned int seq;
|
|
struct list_head list;
|
|
rq_end_io_fn *saved_end_io;
|
|
} flush;
|
|
};
|
|
|
|
struct gendisk *rq_disk;
|
|
struct hd_struct *part;
|
|
unsigned long start_time;
|
|
#ifdef CONFIG_BLK_CGROUP
|
|
struct request_list *rl; /* rl this rq is alloced from */
|
|
unsigned long long start_time_ns;
|
|
unsigned long long io_start_time_ns; /* when passed to hardware */
|
|
#endif
|
|
/* Number of scatter-gather DMA addr+len pairs after
|
|
* physical address coalescing is performed.
|
|
*/
|
|
unsigned short nr_phys_segments;
|
|
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
|
unsigned short nr_integrity_segments;
|
|
#endif
|
|
|
|
unsigned short ioprio;
|
|
|
|
void *special; /* opaque pointer available for LLD use */
|
|
|
|
int tag;
|
|
int errors;
|
|
|
|
/*
|
|
* when request is used as a packet command carrier
|
|
*/
|
|
unsigned char __cmd[BLK_MAX_CDB];
|
|
unsigned char *cmd;
|
|
unsigned short cmd_len;
|
|
|
|
unsigned int extra_len; /* length of alignment and padding */
|
|
unsigned int sense_len;
|
|
unsigned int resid_len; /* residual count */
|
|
void *sense;
|
|
|
|
unsigned long deadline;
|
|
struct list_head timeout_list;
|
|
unsigned int timeout;
|
|
int retries;
|
|
|
|
/*
|
|
* completion callback.
|
|
*/
|
|
rq_end_io_fn *end_io;
|
|
void *end_io_data;
|
|
|
|
/* for bidi */
|
|
struct request *next_rq;
|
|
|
|
ktime_t lat_hist_io_start;
|
|
int lat_hist_enabled;
|
|
};
|
|
|
|
#define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS)
|
|
#define req_op(req) ((req)->cmd_flags >> REQ_OP_SHIFT)
|
|
|
|
#define req_set_op(req, op) do { \
|
|
WARN_ON(op >= (1 << REQ_OP_BITS)); \
|
|
(req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1); \
|
|
(req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT); \
|
|
} while (0)
|
|
|
|
#define req_set_op_attrs(req, op, flags) do { \
|
|
req_set_op(req, op); \
|
|
(req)->cmd_flags |= flags; \
|
|
} while (0)
|
|
|
|
static inline unsigned short req_get_ioprio(struct request *req)
|
|
{
|
|
return req->ioprio;
|
|
}
|
|
|
|
#include <linux/elevator.h>
|
|
|
|
struct blk_queue_ctx;
|
|
|
|
typedef void (request_fn_proc) (struct request_queue *q);
|
|
typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
|
|
typedef int (prep_rq_fn) (struct request_queue *, struct request *);
|
|
typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
|
|
|
|
struct bio_vec;
|
|
typedef void (softirq_done_fn)(struct request *);
|
|
typedef int (dma_drain_needed_fn)(struct request *);
|
|
typedef int (lld_busy_fn) (struct request_queue *q);
|
|
typedef int (bsg_job_fn) (struct bsg_job *);
|
|
|
|
enum blk_eh_timer_return {
|
|
BLK_EH_NOT_HANDLED,
|
|
BLK_EH_HANDLED,
|
|
BLK_EH_RESET_TIMER,
|
|
};
|
|
|
|
typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
|
|
|
|
enum blk_queue_state {
|
|
Queue_down,
|
|
Queue_up,
|
|
};
|
|
|
|
struct blk_queue_tag {
|
|
struct request **tag_index; /* map of busy tags */
|
|
unsigned long *tag_map; /* bit map of free/busy tags */
|
|
int busy; /* current depth */
|
|
int max_depth; /* what we will send to device */
|
|
int real_max_depth; /* what the array can hold */
|
|
atomic_t refcnt; /* map can be shared */
|
|
int alloc_policy; /* tag allocation policy */
|
|
int next_tag; /* next tag */
|
|
};
|
|
#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
|
|
#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
|
|
|
|
#define BLK_SCSI_MAX_CMDS (256)
|
|
#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
|
|
|
|
struct queue_limits {
|
|
unsigned long bounce_pfn;
|
|
unsigned long seg_boundary_mask;
|
|
unsigned long virt_boundary_mask;
|
|
|
|
unsigned int max_hw_sectors;
|
|
unsigned int max_dev_sectors;
|
|
unsigned int chunk_sectors;
|
|
unsigned int max_sectors;
|
|
unsigned int max_segment_size;
|
|
unsigned int physical_block_size;
|
|
unsigned int alignment_offset;
|
|
unsigned int io_min;
|
|
unsigned int io_opt;
|
|
unsigned int max_discard_sectors;
|
|
unsigned int max_hw_discard_sectors;
|
|
unsigned int max_write_same_sectors;
|
|
unsigned int discard_granularity;
|
|
unsigned int discard_alignment;
|
|
|
|
unsigned short logical_block_size;
|
|
unsigned short max_segments;
|
|
unsigned short max_integrity_segments;
|
|
|
|
unsigned char misaligned;
|
|
unsigned char discard_misaligned;
|
|
unsigned char cluster;
|
|
unsigned char discard_zeroes_data;
|
|
unsigned char raid_partial_stripes_expensive;
|
|
};
|
|
|
|
struct request_queue {
|
|
/*
|
|
* Together with queue_head for cacheline sharing
|
|
*/
|
|
struct list_head queue_head;
|
|
struct request *last_merge;
|
|
struct elevator_queue *elevator;
|
|
int nr_rqs[2]; /* # allocated [a]sync rqs */
|
|
int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
|
|
|
|
/*
|
|
* If blkcg is not used, @q->root_rl serves all requests. If blkcg
|
|
* is used, root blkg allocates from @q->root_rl and all other
|
|
* blkgs from their own blkg->rl. Which one to use should be
|
|
* determined using bio_request_list().
|
|
*/
|
|
struct request_list root_rl;
|
|
|
|
request_fn_proc *request_fn;
|
|
make_request_fn *make_request_fn;
|
|
prep_rq_fn *prep_rq_fn;
|
|
unprep_rq_fn *unprep_rq_fn;
|
|
softirq_done_fn *softirq_done_fn;
|
|
rq_timed_out_fn *rq_timed_out_fn;
|
|
dma_drain_needed_fn *dma_drain_needed;
|
|
lld_busy_fn *lld_busy_fn;
|
|
|
|
struct blk_mq_ops *mq_ops;
|
|
|
|
unsigned int *mq_map;
|
|
|
|
/* sw queues */
|
|
struct blk_mq_ctx __percpu *queue_ctx;
|
|
unsigned int nr_queues;
|
|
|
|
/* hw dispatch queues */
|
|
struct blk_mq_hw_ctx **queue_hw_ctx;
|
|
unsigned int nr_hw_queues;
|
|
|
|
/*
|
|
* Dispatch queue sorting
|
|
*/
|
|
sector_t end_sector;
|
|
struct request *boundary_rq;
|
|
|
|
/*
|
|
* Delayed queue handling
|
|
*/
|
|
struct delayed_work delay_work;
|
|
|
|
struct backing_dev_info *backing_dev_info;
|
|
|
|
/*
|
|
* The queue owner gets to use this for whatever they like.
|
|
* ll_rw_blk doesn't touch it.
|
|
*/
|
|
void *queuedata;
|
|
|
|
/*
|
|
* various queue flags, see QUEUE_* below
|
|
*/
|
|
unsigned long queue_flags;
|
|
|
|
/*
|
|
* ida allocated id for this queue. Used to index queues from
|
|
* ioctx.
|
|
*/
|
|
int id;
|
|
|
|
/*
|
|
* queue needs bounce pages for pages above this limit
|
|
*/
|
|
gfp_t bounce_gfp;
|
|
|
|
/*
|
|
* protects queue structures from reentrancy. ->__queue_lock should
|
|
* _never_ be used directly, it is queue private. always use
|
|
* ->queue_lock.
|
|
*/
|
|
spinlock_t __queue_lock;
|
|
spinlock_t *queue_lock;
|
|
|
|
/*
|
|
* queue kobject
|
|
*/
|
|
struct kobject kobj;
|
|
|
|
/*
|
|
* mq queue kobject
|
|
*/
|
|
struct kobject mq_kobj;
|
|
|
|
#ifdef CONFIG_BLK_DEV_INTEGRITY
|
|
struct blk_integrity integrity;
|
|
#endif /* CONFIG_BLK_DEV_INTEGRITY */
|
|
|
|
#ifdef CONFIG_PM
|
|
struct device *dev;
|
|
int rpm_status;
|
|
unsigned int nr_pending;
|
|
#endif
|
|
|
|
/*
|
|
* queue settings
|
|
*/
|
|
unsigned long nr_requests; /* Max # of requests */
|
|
unsigned int nr_congestion_on;
|
|
unsigned int nr_congestion_off;
|
|
unsigned int nr_batching;
|
|
|
|
unsigned int dma_drain_size;
|
|
void *dma_drain_buffer;
|
|
unsigned int dma_pad_mask;
|
|
unsigned int dma_alignment;
|
|
|
|
struct blk_queue_tag *queue_tags;
|
|
struct list_head tag_busy_list;
|
|
|
|
unsigned int nr_sorted;
|
|
unsigned int in_flight[2];
|
|
/*
|
|
* Number of active block driver functions for which blk_drain_queue()
|
|
* must wait. Must be incremented around functions that unlock the
|
|
* queue_lock internally, e.g. scsi_request_fn().
|
|
*/
|
|
unsigned int request_fn_active;
|
|
|
|
unsigned int rq_timeout;
|
|
struct timer_list timeout;
|
|
struct work_struct timeout_work;
|
|
struct list_head timeout_list;
|
|
|
|
struct list_head icq_list;
|
|
#ifdef CONFIG_BLK_CGROUP
|
|
DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
|
|
struct blkcg_gq *root_blkg;
|
|
struct list_head blkg_list;
|
|
#endif
|
|
|
|
struct queue_limits limits;
|
|
|
|
/*
|
|
* sg stuff
|
|
*/
|
|
unsigned int sg_timeout;
|
|
unsigned int sg_reserved_size;
|
|
int node;
|
|
#ifdef CONFIG_BLK_DEV_IO_TRACE
|
|
struct blk_trace *blk_trace;
|
|
#endif
|
|
/*
|
|
* for flush operations
|
|
*/
|
|
struct blk_flush_queue *fq;
|
|
|
|
struct list_head requeue_list;
|
|
spinlock_t requeue_lock;
|
|
struct delayed_work requeue_work;
|
|
|
|
struct mutex sysfs_lock;
|
|
|
|
int bypass_depth;
|
|
atomic_t mq_freeze_depth;
|
|
|
|
#if defined(CONFIG_BLK_DEV_BSG)
|
|
bsg_job_fn *bsg_job_fn;
|
|
int bsg_job_size;
|
|
struct bsg_class_device bsg_dev;
|
|
#endif
|
|
|
|
#ifdef CONFIG_BLK_DEV_THROTTLING
|
|
/* Throttle data */
|
|
struct throtl_data *td;
|
|
#endif
|
|
struct rcu_head rcu_head;
|
|
wait_queue_head_t mq_freeze_wq;
|
|
struct percpu_ref q_usage_counter;
|
|
struct list_head all_q_node;
|
|
|
|
struct blk_mq_tag_set *tag_set;
|
|
struct list_head tag_set_list;
|
|
struct bio_set *bio_split;
|
|
|
|
bool mq_sysfs_init_done;
|
|
};
|
|
|
|
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
|
|
#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
|
|
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
|
|
#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
|
|
#define QUEUE_FLAG_DYING 5 /* queue being torn down */
|
|
#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
|
|
#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
|
|
#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
|
|
#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
|
|
#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
|
|
#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
|
|
#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
|
|
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
|
|
#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
|
|
#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
|
|
#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
|
|
#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
|
|
#define QUEUE_FLAG_SECERASE 17 /* supports secure erase */
|
|
#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
|
|
#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
|
|
#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
|
|
#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
|
|
#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */
|
|
#define QUEUE_FLAG_WC 23 /* Write back caching */
|
|
#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
|
|
#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
|
|
#define QUEUE_FLAG_DAX 26 /* device supports DAX */
|
|
#define QUEUE_FLAG_FAST 27 /* fast block device (e.g. ram based) */
|
|
|
|
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
|
(1 << QUEUE_FLAG_STACKABLE) | \
|
|
(1 << QUEUE_FLAG_SAME_COMP) | \
|
|
(1 << QUEUE_FLAG_ADD_RANDOM))
|
|
|
|
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
|
(1 << QUEUE_FLAG_STACKABLE) | \
|
|
(1 << QUEUE_FLAG_SAME_COMP) | \
|
|
(1 << QUEUE_FLAG_POLL))
|
|
|
|
static inline void queue_lockdep_assert_held(struct request_queue *q)
|
|
{
|
|
if (q->queue_lock)
|
|
lockdep_assert_held(q->queue_lock);
|
|
}
|
|
|
|
static inline void queue_flag_set_unlocked(unsigned int flag,
|
|
struct request_queue *q)
|
|
{
|
|
__set_bit(flag, &q->queue_flags);
|
|
}
|
|
|
|
static inline int queue_flag_test_and_clear(unsigned int flag,
|
|
struct request_queue *q)
|
|
{
|
|
queue_lockdep_assert_held(q);
|
|
|
|
if (test_bit(flag, &q->queue_flags)) {
|
|
__clear_bit(flag, &q->queue_flags);
|
|
return 1;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline int queue_flag_test_and_set(unsigned int flag,
|
|
struct request_queue *q)
|
|
{
|
|
queue_lockdep_assert_held(q);
|
|
|
|
if (!test_bit(flag, &q->queue_flags)) {
|
|
__set_bit(flag, &q->queue_flags);
|
|
return 0;
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
|
|
static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
|
|
{
|
|
queue_lockdep_assert_held(q);
|
|
__set_bit(flag, &q->queue_flags);
|
|
}
|
|
|
|
static inline void queue_flag_clear_unlocked(unsigned int flag,
|
|
struct request_queue *q)
|
|
{
|
|
__clear_bit(flag, &q->queue_flags);
|
|
}
|
|
|
|
static inline int queue_in_flight(struct request_queue *q)
|
|
{
|
|
return q->in_flight[0] + q->in_flight[1];
|
|
}
|
|
|
|
static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
|
|
{
|
|
queue_lockdep_assert_held(q);
|
|
__clear_bit(flag, &q->queue_flags);
|
|
}
|
|
|
|
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
|
|
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
|
|
#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
|
|
#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
|
|
#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
|
|
#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
|
|
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
|
|
#define blk_queue_noxmerges(q) \
|
|
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
|
|
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
|
|
#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
|
|
#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
|
|
#define blk_queue_stackable(q) \
|
|
test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
|
|
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
|
|
#define blk_queue_secure_erase(q) \
|
|
(test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
|
|
#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
|
|
#define blk_queue_fast(q) test_bit(QUEUE_FLAG_FAST, &(q)->queue_flags)
|
|
|
|
#define blk_noretry_request(rq) \
|
|
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
|
|
REQ_FAILFAST_DRIVER))
|
|
|
|
#define blk_account_rq(rq) \
|
|
(((rq)->cmd_flags & REQ_STARTED) && \
|
|
((rq)->cmd_type == REQ_TYPE_FS))
|
|
|
|
#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
|
|
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
|
|
/* rq->queuelist of dequeued request must be list_empty() */
|
|
#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
|
|
|
|
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
|
|
|
|
#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
|
|
|
|
/*
|
|
* Driver can handle struct request, if it either has an old style
|
|
* request_fn defined, or is blk-mq based.
|
|
*/
|
|
static inline bool queue_is_rq_based(struct request_queue *q)
|
|
{
|
|
return q->request_fn || q->mq_ops;
|
|
}
|
|
|
|
static inline unsigned int blk_queue_cluster(struct request_queue *q)
|
|
{
|
|
return q->limits.cluster;
|
|
}
|
|
|
|
/*
|
|
* We regard a request as sync, if either a read or a sync write
|
|
*/
|
|
static inline bool rw_is_sync(int op, unsigned int rw_flags)
|
|
{
|
|
return op == REQ_OP_READ || (rw_flags & REQ_SYNC);
|
|
}
|
|
|
|
static inline bool rq_is_sync(struct request *rq)
|
|
{
|
|
return rw_is_sync(req_op(rq), rq->cmd_flags);
|
|
}
|
|
|
|
static inline bool blk_rl_full(struct request_list *rl, bool sync)
|
|
{
|
|
unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
|
|
|
|
return rl->flags & flag;
|
|
}
|
|
|
|
static inline void blk_set_rl_full(struct request_list *rl, bool sync)
|
|
{
|
|
unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
|
|
|
|
rl->flags |= flag;
|
|
}
|
|
|
|
static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
|
|
{
|
|
unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
|
|
|
|
rl->flags &= ~flag;
|
|
}
|
|
|
|
static inline bool rq_mergeable(struct request *rq)
|
|
{
|
|
if (rq->cmd_type != REQ_TYPE_FS)
|
|
return false;
|
|
|
|
if (req_op(rq) == REQ_OP_FLUSH)
|
|
return false;
|
|
|
|
if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
|
|
static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
|
|
{
|
|
if (bio_data(a) == bio_data(b))
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
/*
|
|
* q->prep_rq_fn return values
|
|
*/
|
|
enum {
|
|
BLKPREP_OK, /* serve it */
|
|
BLKPREP_KILL, /* fatal error, kill, return -EIO */
|
|
BLKPREP_DEFER, /* leave on queue */
|
|
BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */
|
|
};
|
|
|
|
extern unsigned long blk_max_low_pfn, blk_max_pfn;
|
|
|
|
/*
|
|
* standard bounce addresses:
|
|
*
|
|
* BLK_BOUNCE_HIGH : bounce all highmem pages
|
|
* BLK_BOUNCE_ANY : don't bounce anything
|
|
* BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
|
|
*/
|
|
|
|
#if BITS_PER_LONG == 32
|
|
#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
|
|
#else
|
|
#define BLK_BOUNCE_HIGH -1ULL
|
|
#endif
|
|
#define BLK_BOUNCE_ANY (-1ULL)
|
|
#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
|
|
|
|
/*
|
|
* default timeout for SG_IO if none specified
|
|
*/
|
|
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
|
|
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
|
|
|
|
#ifdef CONFIG_BOUNCE
|
|
extern int init_emergency_isa_pool(void);
|
|
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
|
#else
|
|
static inline int init_emergency_isa_pool(void)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
|
|
{
|
|
}
|
|
#endif /* CONFIG_MMU */
|
|
|
|
struct rq_map_data {
|
|
struct page **pages;
|
|
int page_order;
|
|
int nr_entries;
|
|
unsigned long offset;
|
|
int null_mapped;
|
|
int from_user;
|
|
};
|
|
|
|
struct req_iterator {
|
|
struct bvec_iter iter;
|
|
struct bio *bio;
|
|
};
|
|
|
|
/* This should not be used directly - use rq_for_each_segment */
|
|
#define for_each_bio(_bio) \
|
|
for (; _bio; _bio = _bio->bi_next)
|
|
#define __rq_for_each_bio(_bio, rq) \
|
|
if ((rq->bio)) \
|
|
for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
|
|
|
|
#define rq_for_each_segment(bvl, _rq, _iter) \
|
|
__rq_for_each_bio(_iter.bio, _rq) \
|
|
bio_for_each_segment(bvl, _iter.bio, _iter.iter)
|
|
|
|
#define rq_iter_last(bvec, _iter) \
|
|
(_iter.bio->bi_next == NULL && \
|
|
bio_iter_last(bvec, _iter.iter))
|
|
|
|
#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
|
|
# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
|
|
#endif
|
|
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
|
|
extern void rq_flush_dcache_pages(struct request *rq);
|
|
#else
|
|
static inline void rq_flush_dcache_pages(struct request *rq)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_PRINTK
|
|
#define vfs_msg(sb, level, fmt, ...) \
|
|
__vfs_msg(sb, level, fmt, ##__VA_ARGS__)
|
|
#else
|
|
#define vfs_msg(sb, level, fmt, ...) \
|
|
do { \
|
|
no_printk(fmt, ##__VA_ARGS__); \
|
|
__vfs_msg(sb, "", " "); \
|
|
} while (0)
|
|
#endif
|
|
|
|
extern int blk_register_queue(struct gendisk *disk);
|
|
extern void blk_unregister_queue(struct gendisk *disk);
|
|
extern blk_qc_t generic_make_request(struct bio *bio);
|
|
extern void blk_rq_init(struct request_queue *q, struct request *rq);
|
|
extern void blk_put_request(struct request *);
|
|
extern void __blk_put_request(struct request_queue *, struct request *);
|
|
extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
|
|
extern void blk_rq_set_block_pc(struct request *);
|
|
extern void blk_requeue_request(struct request_queue *, struct request *);
|
|
extern void blk_add_request_payload(struct request *rq, struct page *page,
|
|
int offset, unsigned int len);
|
|
extern int blk_lld_busy(struct request_queue *q);
|
|
extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
|
|
struct bio_set *bs, gfp_t gfp_mask,
|
|
int (*bio_ctr)(struct bio *, struct bio *, void *),
|
|
void *data);
|
|
extern void blk_rq_unprep_clone(struct request *rq);
|
|
extern int blk_insert_cloned_request(struct request_queue *q,
|
|
struct request *rq);
|
|
extern int blk_rq_append_bio(struct request *rq, struct bio *bio);
|
|
extern void blk_delay_queue(struct request_queue *, unsigned long);
|
|
extern void blk_queue_split(struct request_queue *, struct bio **,
|
|
struct bio_set *);
|
|
extern void blk_recount_segments(struct request_queue *, struct bio *);
|
|
extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
|
|
extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
|
|
unsigned int, void __user *);
|
|
extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
|
|
unsigned int, void __user *);
|
|
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
|
|
struct scsi_ioctl_command __user *);
|
|
|
|
extern void blk_recalc_rq_segments(struct request *rq);
|
|
extern int blk_queue_enter(struct request_queue *q, bool nowait);
|
|
extern void blk_queue_exit(struct request_queue *q);
|
|
extern void blk_start_queue(struct request_queue *q);
|
|
extern void blk_start_queue_async(struct request_queue *q);
|
|
extern void blk_stop_queue(struct request_queue *q);
|
|
extern void blk_sync_queue(struct request_queue *q);
|
|
extern void __blk_stop_queue(struct request_queue *q);
|
|
extern void __blk_run_queue(struct request_queue *q);
|
|
extern void __blk_run_queue_uncond(struct request_queue *q);
|
|
extern void blk_run_queue(struct request_queue *);
|
|
extern void blk_run_queue_async(struct request_queue *q);
|
|
extern int blk_rq_map_user(struct request_queue *, struct request *,
|
|
struct rq_map_data *, void __user *, unsigned long,
|
|
gfp_t);
|
|
extern int blk_rq_unmap_user(struct bio *);
|
|
extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
|
|
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
|
|
struct rq_map_data *, const struct iov_iter *,
|
|
gfp_t);
|
|
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
|
struct request *, int);
|
|
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
|
struct request *, int, rq_end_io_fn *);
|
|
|
|
bool blk_poll(struct request_queue *q, blk_qc_t cookie);
|
|
|
|
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
|
|
{
|
|
return bdev->bd_disk->queue; /* this is never NULL */
|
|
}
|
|
|
|
/*
|
|
* blk_rq_pos() : the current sector
|
|
* blk_rq_bytes() : bytes left in the entire request
|
|
* blk_rq_cur_bytes() : bytes left in the current segment
|
|
* blk_rq_err_bytes() : bytes left till the next error boundary
|
|
* blk_rq_sectors() : sectors left in the entire request
|
|
* blk_rq_cur_sectors() : sectors left in the current segment
|
|
*/
|
|
static inline sector_t blk_rq_pos(const struct request *rq)
|
|
{
|
|
return rq->__sector;
|
|
}
|
|
|
|
static inline unsigned int blk_rq_bytes(const struct request *rq)
|
|
{
|
|
return rq->__data_len;
|
|
}
|
|
|
|
static inline int blk_rq_cur_bytes(const struct request *rq)
|
|
{
|
|
return rq->bio ? bio_cur_bytes(rq->bio) : 0;
|
|
}
|
|
|
|
extern unsigned int blk_rq_err_bytes(const struct request *rq);
|
|
|
|
static inline unsigned int blk_rq_sectors(const struct request *rq)
|
|
{
|
|
return blk_rq_bytes(rq) >> 9;
|
|
}
|
|
|
|
static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
|
|
{
|
|
return blk_rq_cur_bytes(rq) >> 9;
|
|
}
|
|
|
|
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
|
|
int op)
|
|
{
|
|
if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
|
|
return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
|
|
|
|
if (unlikely(op == REQ_OP_WRITE_SAME))
|
|
return q->limits.max_write_same_sectors;
|
|
|
|
return q->limits.max_sectors;
|
|
}
|
|
|
|
/*
|
|
* Return maximum size of a request at given offset. Only valid for
|
|
* file system requests.
|
|
*/
|
|
static inline unsigned int blk_max_size_offset(struct request_queue *q,
|
|
sector_t offset)
|
|
{
|
|
if (!q->limits.chunk_sectors)
|
|
return q->limits.max_sectors;
|
|
|
|
return q->limits.chunk_sectors -
|
|
(offset & (q->limits.chunk_sectors - 1));
|
|
}
|
|
|
|
static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
|
|
sector_t offset)
|
|
{
|
|
struct request_queue *q = rq->q;
|
|
|
|
if (unlikely(rq->cmd_type != REQ_TYPE_FS))
|
|
return q->limits.max_hw_sectors;
|
|
|
|
if (!q->limits.chunk_sectors ||
|
|
req_op(rq) == REQ_OP_DISCARD ||
|
|
req_op(rq) == REQ_OP_SECURE_ERASE)
|
|
return blk_queue_get_max_sectors(q, req_op(rq));
|
|
|
|
return min(blk_max_size_offset(q, offset),
|
|
blk_queue_get_max_sectors(q, req_op(rq)));
|
|
}
|
|
|
|
static inline unsigned int blk_rq_count_bios(struct request *rq)
|
|
{
|
|
unsigned int nr_bios = 0;
|
|
struct bio *bio;
|
|
|
|
__rq_for_each_bio(bio, rq)
|
|
nr_bios++;
|
|
|
|
return nr_bios;
|
|
}
|
|
|
|
/*
|
|
* Request issue related functions.
|
|
*/
|
|
extern struct request *blk_peek_request(struct request_queue *q);
|
|
extern void blk_start_request(struct request *rq);
|
|
extern struct request *blk_fetch_request(struct request_queue *q);
|
|
|
|
/*
|
|
* Request completion related functions.
|
|
*
|
|
* blk_update_request() completes given number of bytes and updates
|
|
* the request without completing it.
|
|
*
|
|
* blk_end_request() and friends. __blk_end_request() must be called
|
|
* with the request queue spinlock acquired.
|
|
*
|
|
* Several drivers define their own end_request and call
|
|
* blk_end_request() for parts of the original function.
|
|
* This prevents code duplication in drivers.
|
|
*/
|
|
extern bool blk_update_request(struct request *rq, int error,
|
|
unsigned int nr_bytes);
|
|
extern void blk_finish_request(struct request *rq, int error);
|
|
extern bool blk_end_request(struct request *rq, int error,
|
|
unsigned int nr_bytes);
|
|
extern void blk_end_request_all(struct request *rq, int error);
|
|
extern bool blk_end_request_cur(struct request *rq, int error);
|
|
extern bool blk_end_request_err(struct request *rq, int error);
|
|
extern bool __blk_end_request(struct request *rq, int error,
|
|
unsigned int nr_bytes);
|
|
extern void __blk_end_request_all(struct request *rq, int error);
|
|
extern bool __blk_end_request_cur(struct request *rq, int error);
|
|
extern bool __blk_end_request_err(struct request *rq, int error);
|
|
|
|
extern void blk_complete_request(struct request *);
|
|
extern void __blk_complete_request(struct request *);
|
|
extern void blk_abort_request(struct request *);
|
|
extern void blk_unprep_request(struct request *);
|
|
|
|
/*
|
|
* Access functions for manipulating queue properties
|
|
*/
|
|
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
|
|
spinlock_t *lock, int node_id);
|
|
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
|
|
extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
|
|
request_fn_proc *, spinlock_t *);
|
|
extern void blk_cleanup_queue(struct request_queue *);
|
|
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
|
|
extern void blk_queue_bounce_limit(struct request_queue *, u64);
|
|
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
|
|
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
|
|
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
|
|
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
|
|
extern void blk_queue_max_discard_sectors(struct request_queue *q,
|
|
unsigned int max_discard_sectors);
|
|
extern void blk_queue_max_write_same_sectors(struct request_queue *q,
|
|
unsigned int max_write_same_sectors);
|
|
extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
|
|
extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
|
|
extern void blk_queue_alignment_offset(struct request_queue *q,
|
|
unsigned int alignment);
|
|
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
|
|
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
|
|
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
|
|
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
|
|
extern void blk_set_default_limits(struct queue_limits *lim);
|
|
extern void blk_set_stacking_limits(struct queue_limits *lim);
|
|
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
|
sector_t offset);
|
|
extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
|
|
sector_t offset);
|
|
extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
|
|
sector_t offset);
|
|
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
|
|
extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
|
|
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
|
|
extern int blk_queue_dma_drain(struct request_queue *q,
|
|
dma_drain_needed_fn *dma_drain_needed,
|
|
void *buf, unsigned int size);
|
|
extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
|
|
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
|
|
extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
|
|
extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
|
|
extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
|
|
extern void blk_queue_dma_alignment(struct request_queue *, int);
|
|
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
|
|
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
|
|
extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
|
|
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
|
|
extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
|
|
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
|
|
|
|
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
|
|
extern int blk_rq_map_sg_no_cluster(struct request_queue *q, struct request *rq,
|
|
struct scatterlist *sglist);
|
|
extern void blk_dump_rq_flags(struct request *, char *);
|
|
extern long nr_blockdev_pages(void);
|
|
|
|
bool __must_check blk_get_queue(struct request_queue *);
|
|
struct request_queue *blk_alloc_queue(gfp_t);
|
|
struct request_queue *blk_alloc_queue_node(gfp_t, int);
|
|
extern void blk_put_queue(struct request_queue *);
|
|
extern void blk_set_queue_dying(struct request_queue *);
|
|
|
|
/*
|
|
* block layer runtime pm functions
|
|
*/
|
|
#ifdef CONFIG_PM
|
|
extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
|
|
extern int blk_pre_runtime_suspend(struct request_queue *q);
|
|
extern void blk_post_runtime_suspend(struct request_queue *q, int err);
|
|
extern void blk_pre_runtime_resume(struct request_queue *q);
|
|
extern void blk_post_runtime_resume(struct request_queue *q, int err);
|
|
extern void blk_set_runtime_active(struct request_queue *q);
|
|
#else
|
|
static inline void blk_pm_runtime_init(struct request_queue *q,
|
|
struct device *dev) {}
|
|
static inline int blk_pre_runtime_suspend(struct request_queue *q)
|
|
{
|
|
return -ENOSYS;
|
|
}
|
|
static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
|
|
static inline void blk_pre_runtime_resume(struct request_queue *q) {}
|
|
static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
|
|
static inline void blk_set_runtime_active(struct request_queue *q) {}
|
|
#endif
|
|
|
|
/*
|
|
* blk_plug permits building a queue of related requests by holding the I/O
|
|
* fragments for a short period. This allows merging of sequential requests
|
|
* into single larger request. As the requests are moved from a per-task list to
|
|
* the device's request_queue in a batch, this results in improved scalability
|
|
* as the lock contention for request_queue lock is reduced.
|
|
*
|
|
* It is ok not to disable preemption when adding the request to the plug list
|
|
* or when attempting a merge, because blk_schedule_flush_list() will only flush
|
|
* the plug list when the task sleeps by itself. For details, please see
|
|
* schedule() where blk_schedule_flush_plug() is called.
|
|
*/
|
|
struct blk_plug {
|
|
struct list_head list; /* requests */
|
|
struct list_head mq_list; /* blk-mq requests */
|
|
struct list_head cb_list; /* md requires an unplug callback */
|
|
};
|
|
#define BLK_MAX_REQUEST_COUNT 16
|
|
|
|
struct blk_plug_cb;
|
|
typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
|
|
struct blk_plug_cb {
|
|
struct list_head list;
|
|
blk_plug_cb_fn callback;
|
|
void *data;
|
|
};
|
|
extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
|
|
void *data, int size);
|
|
extern void blk_start_plug(struct blk_plug *);
|
|
extern void blk_finish_plug(struct blk_plug *);
|
|
extern void blk_flush_plug_list(struct blk_plug *, bool);
|
|
|
|
static inline void blk_flush_plug(struct task_struct *tsk)
|
|
{
|
|
struct blk_plug *plug = tsk->plug;
|
|
|
|
if (plug)
|
|
blk_flush_plug_list(plug, false);
|
|
}
|
|
|
|
static inline void blk_schedule_flush_plug(struct task_struct *tsk)
|
|
{
|
|
struct blk_plug *plug = tsk->plug;
|
|
|
|
if (plug)
|
|
blk_flush_plug_list(plug, true);
|
|
}
|
|
|
|
static inline bool blk_needs_flush_plug(struct task_struct *tsk)
|
|
{
|
|
struct blk_plug *plug = tsk->plug;
|
|
|
|
return plug &&
|
|
(!list_empty(&plug->list) ||
|
|
!list_empty(&plug->mq_list) ||
|
|
!list_empty(&plug->cb_list));
|
|
}
|
|
|
|
/*
|
|
* tag stuff
|
|
*/
|
|
extern int blk_queue_start_tag(struct request_queue *, struct request *);
|
|
extern struct request *blk_queue_find_tag(struct request_queue *, int);
|
|
extern void blk_queue_end_tag(struct request_queue *, struct request *);
|
|
extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
|
|
extern void blk_queue_free_tags(struct request_queue *);
|
|
extern int blk_queue_resize_tags(struct request_queue *, int);
|
|
extern void blk_queue_invalidate_tags(struct request_queue *);
|
|
extern struct blk_queue_tag *blk_init_tags(int, int);
|
|
extern void blk_free_tags(struct blk_queue_tag *);
|
|
|
|
static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
|
|
int tag)
|
|
{
|
|
if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
|
|
return NULL;
|
|
return bqt->tag_index[tag];
|
|
}
|
|
|
|
|
|
#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
|
|
#define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */
|
|
|
|
extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
|
|
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
|
|
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
|
|
extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
|
|
sector_t nr_sects, gfp_t gfp_mask, int flags,
|
|
struct bio **biop);
|
|
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
|
|
sector_t nr_sects, gfp_t gfp_mask, struct page *page);
|
|
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
|
|
sector_t nr_sects, gfp_t gfp_mask, bool discard);
|
|
static inline int sb_issue_discard(struct super_block *sb, sector_t block,
|
|
sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
|
|
{
|
|
return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
|
|
nr_blocks << (sb->s_blocksize_bits - 9),
|
|
gfp_mask, flags);
|
|
}
|
|
static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
|
|
sector_t nr_blocks, gfp_t gfp_mask)
|
|
{
|
|
return blkdev_issue_zeroout(sb->s_bdev,
|
|
block << (sb->s_blocksize_bits - 9),
|
|
nr_blocks << (sb->s_blocksize_bits - 9),
|
|
gfp_mask, true);
|
|
}
|
|
|
|
extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
|
|
|
|
enum blk_default_limits {
|
|
BLK_MAX_SEGMENTS = 128,
|
|
BLK_SAFE_MAX_SECTORS = 255,
|
|
BLK_DEF_MAX_SECTORS = 2560,
|
|
BLK_MAX_SEGMENT_SIZE = 65536,
|
|
BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
|
|
};
|
|
|
|
#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
|
|
|
|
static inline unsigned long queue_bounce_pfn(struct request_queue *q)
|
|
{
|
|
return q->limits.bounce_pfn;
|
|
}
|
|
|
|
static inline unsigned long queue_segment_boundary(struct request_queue *q)
|
|
{
|
|
return q->limits.seg_boundary_mask;
|
|
}
|
|
|
|
static inline unsigned long queue_virt_boundary(struct request_queue *q)
|
|
{
|
|
return q->limits.virt_boundary_mask;
|
|
}
|
|
|
|
static inline unsigned int queue_max_sectors(struct request_queue *q)
|
|
{
|
|
return q->limits.max_sectors;
|
|
}
|
|
|
|
static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
|
|
{
|
|
return q->limits.max_hw_sectors;
|
|
}
|
|
|
|
static inline unsigned short queue_max_segments(struct request_queue *q)
|
|
{
|
|
return q->limits.max_segments;
|
|
}
|
|
|
|
static inline unsigned int queue_max_segment_size(struct request_queue *q)
|
|
{
|
|
return q->limits.max_segment_size;
|
|
}
|
|
|
|
static inline unsigned short queue_logical_block_size(struct request_queue *q)
|
|
{
|
|
int retval = 512;
|
|
|
|
if (q && q->limits.logical_block_size)
|
|
retval = q->limits.logical_block_size;
|
|
|
|
return retval;
|
|
}
|
|
|
|
static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
|
|
{
|
|
return queue_logical_block_size(bdev_get_queue(bdev));
|
|
}
|
|
|
|
static inline unsigned int queue_physical_block_size(struct request_queue *q)
|
|
{
|
|
return q->limits.physical_block_size;
|
|
}
|
|
|
|
static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
|
|
{
|
|
return queue_physical_block_size(bdev_get_queue(bdev));
|
|
}
|
|
|
|
static inline unsigned int queue_io_min(struct request_queue *q)
|
|
{
|
|
return q->limits.io_min;
|
|
}
|
|
|
|
static inline int bdev_io_min(struct block_device *bdev)
|
|
{
|
|
return queue_io_min(bdev_get_queue(bdev));
|
|
}
|
|
|
|
static inline unsigned int queue_io_opt(struct request_queue *q)
|
|
{
|
|
return q->limits.io_opt;
|
|
}
|
|
|
|
static inline int bdev_io_opt(struct block_device *bdev)
|
|
{
|
|
return queue_io_opt(bdev_get_queue(bdev));
|
|
}
|
|
|
|
static inline int queue_alignment_offset(struct request_queue *q)
|
|
{
|
|
if (q->limits.misaligned)
|
|
return -1;
|
|
|
|
return q->limits.alignment_offset;
|
|
}
|
|
|
|
static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
|
|
{
|
|
unsigned int granularity = max(lim->physical_block_size, lim->io_min);
|
|
unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
|
|
|
|
return (granularity + lim->alignment_offset - alignment) % granularity;
|
|
}
|
|
|
|
static inline int bdev_alignment_offset(struct block_device *bdev)
|
|
{
|
|
struct request_queue *q = bdev_get_queue(bdev);
|
|
|
|
if (q->limits.misaligned)
|
|
return -1;
|
|
|
|
if (bdev != bdev->bd_contains)
|
|
return bdev->bd_part->alignment_offset;
|
|
|
|
return q->limits.alignment_offset;
|
|
}
|
|
|
|
static inline int queue_discard_alignment(struct request_queue *q)
|
|
{
|
|
if (q->limits.discard_misaligned)
|
|
return -1;
|
|
|
|
return q->limits.discard_alignment;
|
|
}
|
|
|
|
static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
|
|
{
|
|
unsigned int alignment, granularity, offset;
|
|
|
|
if (!lim->max_discard_sectors)
|
|
return 0;
|
|
|
|
/* Why are these in bytes, not sectors? */
|
|
alignment = lim->discard_alignment >> 9;
|
|
granularity = lim->discard_granularity >> 9;
|
|
if (!granularity)
|
|
return 0;
|
|
|
|
/* Offset of the partition start in 'granularity' sectors */
|
|
offset = sector_div(sector, granularity);
|
|
|
|
/* And why do we do this modulus *again* in blkdev_issue_discard()? */
|
|
offset = (granularity + alignment - offset) % granularity;
|
|
|
|
/* Turn it back into bytes, gaah */
|
|
return offset << 9;
|
|
}
|
|
|
|
static inline int bdev_discard_alignment(struct block_device *bdev)
|
|
{
|
|
struct request_queue *q = bdev_get_queue(bdev);
|
|
|
|
if (bdev != bdev->bd_contains)
|
|
return bdev->bd_part->discard_alignment;
|
|
|
|
return q->limits.discard_alignment;
|
|
}
|
|
|
|
static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
|
|
{
|
|
if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
|
|
return 1;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
|
|
{
|
|
return queue_discard_zeroes_data(bdev_get_queue(bdev));
|
|
}
|
|
|
|
static inline unsigned int bdev_write_same(struct block_device *bdev)
|
|
{
|
|
struct request_queue *q = bdev_get_queue(bdev);
|
|
|
|
if (q)
|
|
return q->limits.max_write_same_sectors;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline int queue_dma_alignment(struct request_queue *q)
|
|
{
|
|
return q ? q->dma_alignment : 511;
|
|
}
|
|
|
|
static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
|
|
unsigned int len)
|
|
{
|
|
unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
|
|
return !(addr & alignment) && !(len & alignment);
|
|
}
|
|
|
|
/* assumes size > 256 */
|
|
static inline unsigned int blksize_bits(unsigned int size)
|
|
{
|
|
unsigned int bits = 8;
|
|
do {
|
|
bits++;
|
|
size >>= 1;
|
|
} while (size > 256);
|
|
return bits;
|
|
}
|
|
|
|
static inline unsigned int block_size(struct block_device *bdev)
|
|
{
|
|
return bdev->bd_block_size;
|
|
}
|
|
|
|
static inline bool queue_flush_queueable(struct request_queue *q)
|
|
{
|
|
return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
|
|
}
|
|
|
|
typedef struct {struct page *v;} Sector;
|
|
|
|
unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
|
|
|
|
static inline void put_dev_sector(Sector p)
|
|
{
|
|
put_page(p.v);
|
|
}
|
|
|
|
static inline bool __bvec_gap_to_prev(struct request_queue *q,
|
|
struct bio_vec *bprv, unsigned int offset)
|
|
{
|
|
return offset ||
|
|
((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
|
|
}
|
|
|
|
/*
|
|
* Check if adding a bio_vec after bprv with offset would create a gap in
|
|
* the SG list. Most drivers don't care about this, but some do.
|
|
*/
|
|
static inline bool bvec_gap_to_prev(struct request_queue *q,
|
|
struct bio_vec *bprv, unsigned int offset)
|
|
{
|
|
if (!queue_virt_boundary(q))
|
|
return false;
|
|
return __bvec_gap_to_prev(q, bprv, offset);
|
|
}
|
|
|
|
static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
|
|
struct bio *next)
|
|
{
|
|
if (bio_has_data(prev) && queue_virt_boundary(q)) {
|
|
struct bio_vec pb, nb;
|
|
|
|
bio_get_last_bvec(prev, &pb);
|
|
bio_get_first_bvec(next, &nb);
|
|
|
|
return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
|
|
{
|
|
return bio_will_gap(req->q, req->biotail, bio);
|
|
}
|
|
|
|
static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
|
|
{
|
|
return bio_will_gap(req->q, bio, req->bio);
|
|
}
|
|
|
|
int kblockd_schedule_work(struct work_struct *work);
|
|
int kblockd_schedule_work_on(int cpu, struct work_struct *work);
|
|
int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
|
|
int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
|
|
|
|
#ifdef CONFIG_BLK_CGROUP
|
|
/*
|
|
* This should not be using sched_clock(). A real patch is in progress
|
|
* to fix this up, until that is in place we need to disable preemption
|
|
* around sched_clock() in this function and set_io_start_time_ns().
|
|
*/
|
|
static inline void set_start_time_ns(struct request *req)
|
|
{
|
|
preempt_disable();
|
|
req->start_time_ns = sched_clock();
|
|
preempt_enable();
|
|
}
|
|
|
|
static inline void set_io_start_time_ns(struct request *req)
|
|
{
|
|
preempt_disable();
|
|
req->io_start_time_ns = sched_clock();
|
|
preempt_enable();
|
|
}
|
|
|
|
static inline uint64_t rq_start_time_ns(struct request *req)
|
|
{
|
|
return req->start_time_ns;
|
|
}
|
|
|
|
static inline uint64_t rq_io_start_time_ns(struct request *req)
|
|
{
|
|
return req->io_start_time_ns;
|
|
}
|
|
#else
|
|
static inline void set_start_time_ns(struct request *req) {}
|
|
static inline void set_io_start_time_ns(struct request *req) {}
|
|
static inline uint64_t rq_start_time_ns(struct request *req)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline uint64_t rq_io_start_time_ns(struct request *req)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
|
|
MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
|
|
#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
|
|
MODULE_ALIAS("block-major-" __stringify(major) "-*")
|
|
|
|
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
|
|
|
enum blk_integrity_flags {
|
|
BLK_INTEGRITY_VERIFY = 1 << 0,
|
|
BLK_INTEGRITY_GENERATE = 1 << 1,
|
|
BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
|
|
BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
|
|
};
|
|
|
|
struct blk_integrity_iter {
|
|
void *prot_buf;
|
|
void *data_buf;
|
|
sector_t seed;
|
|
unsigned int data_size;
|
|
unsigned short interval;
|
|
const char *disk_name;
|
|
};
|
|
|
|
typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
|
|
|
|
struct blk_integrity_profile {
|
|
integrity_processing_fn *generate_fn;
|
|
integrity_processing_fn *verify_fn;
|
|
const char *name;
|
|
};
|
|
|
|
extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
|
|
extern void blk_integrity_unregister(struct gendisk *);
|
|
extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
|
|
extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
|
|
struct scatterlist *);
|
|
extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
|
|
extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
|
|
struct request *);
|
|
extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
|
|
struct bio *);
|
|
|
|
static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
|
|
{
|
|
struct blk_integrity *bi = &disk->queue->integrity;
|
|
|
|
if (!bi->profile)
|
|
return NULL;
|
|
|
|
return bi;
|
|
}
|
|
|
|
static inline
|
|
struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
|
|
{
|
|
return blk_get_integrity(bdev->bd_disk);
|
|
}
|
|
|
|
static inline bool blk_integrity_rq(struct request *rq)
|
|
{
|
|
return rq->cmd_flags & REQ_INTEGRITY;
|
|
}
|
|
|
|
static inline void blk_queue_max_integrity_segments(struct request_queue *q,
|
|
unsigned int segs)
|
|
{
|
|
q->limits.max_integrity_segments = segs;
|
|
}
|
|
|
|
static inline unsigned short
|
|
queue_max_integrity_segments(struct request_queue *q)
|
|
{
|
|
return q->limits.max_integrity_segments;
|
|
}
|
|
|
|
static inline bool integrity_req_gap_back_merge(struct request *req,
|
|
struct bio *next)
|
|
{
|
|
struct bio_integrity_payload *bip = bio_integrity(req->bio);
|
|
struct bio_integrity_payload *bip_next = bio_integrity(next);
|
|
|
|
return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
|
|
bip_next->bip_vec[0].bv_offset);
|
|
}
|
|
|
|
static inline bool integrity_req_gap_front_merge(struct request *req,
|
|
struct bio *bio)
|
|
{
|
|
struct bio_integrity_payload *bip = bio_integrity(bio);
|
|
struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
|
|
|
|
return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
|
|
bip_next->bip_vec[0].bv_offset);
|
|
}
|
|
|
|
#else /* CONFIG_BLK_DEV_INTEGRITY */
|
|
|
|
struct bio;
|
|
struct block_device;
|
|
struct gendisk;
|
|
struct blk_integrity;
|
|
|
|
static inline int blk_integrity_rq(struct request *rq)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline int blk_rq_count_integrity_sg(struct request_queue *q,
|
|
struct bio *b)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline int blk_rq_map_integrity_sg(struct request_queue *q,
|
|
struct bio *b,
|
|
struct scatterlist *s)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
|
|
{
|
|
return NULL;
|
|
}
|
|
static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
|
|
{
|
|
return NULL;
|
|
}
|
|
static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline void blk_integrity_register(struct gendisk *d,
|
|
struct blk_integrity *b)
|
|
{
|
|
}
|
|
static inline void blk_integrity_unregister(struct gendisk *d)
|
|
{
|
|
}
|
|
static inline void blk_queue_max_integrity_segments(struct request_queue *q,
|
|
unsigned int segs)
|
|
{
|
|
}
|
|
static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline bool blk_integrity_merge_rq(struct request_queue *rq,
|
|
struct request *r1,
|
|
struct request *r2)
|
|
{
|
|
return true;
|
|
}
|
|
static inline bool blk_integrity_merge_bio(struct request_queue *rq,
|
|
struct request *r,
|
|
struct bio *b)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline bool integrity_req_gap_back_merge(struct request *req,
|
|
struct bio *next)
|
|
{
|
|
return false;
|
|
}
|
|
static inline bool integrity_req_gap_front_merge(struct request *req,
|
|
struct bio *bio)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
#endif /* CONFIG_BLK_DEV_INTEGRITY */
|
|
|
|
/**
|
|
* struct blk_dax_ctl - control and output parameters for ->direct_access
|
|
* @sector: (input) offset relative to a block_device
|
|
* @addr: (output) kernel virtual address for @sector populated by driver
|
|
* @pfn: (output) page frame number for @addr populated by driver
|
|
* @size: (input) number of bytes requested
|
|
*/
|
|
struct blk_dax_ctl {
|
|
sector_t sector;
|
|
void *addr;
|
|
long size;
|
|
pfn_t pfn;
|
|
};
|
|
|
|
struct block_device_operations {
|
|
int (*open) (struct block_device *, fmode_t);
|
|
void (*release) (struct gendisk *, fmode_t);
|
|
int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
|
|
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
|
|
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
|
|
long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
|
|
long);
|
|
unsigned int (*check_events) (struct gendisk *disk,
|
|
unsigned int clearing);
|
|
/* ->media_changed() is DEPRECATED, use ->check_events() instead */
|
|
int (*media_changed) (struct gendisk *);
|
|
void (*unlock_native_capacity) (struct gendisk *);
|
|
int (*revalidate_disk) (struct gendisk *);
|
|
int (*getgeo)(struct block_device *, struct hd_geometry *);
|
|
/* this callback is with swap_lock and sometimes page table lock held */
|
|
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
|
|
struct module *owner;
|
|
const struct pr_ops *pr_ops;
|
|
};
|
|
|
|
extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
|
|
unsigned long);
|
|
extern int bdev_read_page(struct block_device *, sector_t, struct page *);
|
|
extern int bdev_write_page(struct block_device *, sector_t, struct page *,
|
|
struct writeback_control *);
|
|
extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *);
|
|
extern int bdev_dax_supported(struct super_block *, int);
|
|
extern bool bdev_dax_capable(struct block_device *);
|
|
|
|
/*
|
|
* X-axis for IO latency histogram support.
|
|
*/
|
|
static const u_int64_t latency_x_axis_us[] = {
|
|
100,
|
|
200,
|
|
300,
|
|
400,
|
|
500,
|
|
600,
|
|
700,
|
|
800,
|
|
900,
|
|
1000,
|
|
1200,
|
|
1400,
|
|
1600,
|
|
1800,
|
|
2000,
|
|
2500,
|
|
3000,
|
|
4000,
|
|
5000,
|
|
6000,
|
|
7000,
|
|
9000,
|
|
10000
|
|
};
|
|
|
|
#define BLK_IO_LAT_HIST_DISABLE 0
|
|
#define BLK_IO_LAT_HIST_ENABLE 1
|
|
#define BLK_IO_LAT_HIST_ZERO 2
|
|
|
|
struct io_latency_state {
|
|
u_int64_t latency_y_axis[ARRAY_SIZE(latency_x_axis_us) + 1];
|
|
u_int64_t latency_elems;
|
|
u_int64_t latency_sum;
|
|
};
|
|
|
|
static inline void
|
|
blk_update_latency_hist(struct io_latency_state *s, u_int64_t delta_us)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(latency_x_axis_us); i++)
|
|
if (delta_us < (u_int64_t)latency_x_axis_us[i])
|
|
break;
|
|
s->latency_y_axis[i]++;
|
|
s->latency_elems++;
|
|
s->latency_sum += delta_us;
|
|
}
|
|
|
|
ssize_t blk_latency_hist_show(char* name, struct io_latency_state *s,
|
|
char *buf, int buf_size);
|
|
|
|
#else /* CONFIG_BLOCK */
|
|
|
|
struct block_device;
|
|
|
|
/*
|
|
* stubs for when the block layer is configured out
|
|
*/
|
|
#define buffer_heads_over_limit 0
|
|
|
|
static inline long nr_blockdev_pages(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
struct blk_plug {
|
|
};
|
|
|
|
static inline void blk_start_plug(struct blk_plug *plug)
|
|
{
|
|
}
|
|
|
|
static inline void blk_finish_plug(struct blk_plug *plug)
|
|
{
|
|
}
|
|
|
|
static inline void blk_flush_plug(struct task_struct *task)
|
|
{
|
|
}
|
|
|
|
static inline void blk_schedule_flush_plug(struct task_struct *task)
|
|
{
|
|
}
|
|
|
|
|
|
static inline bool blk_needs_flush_plug(struct task_struct *tsk)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
|
|
sector_t *error_sector)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
#endif /* CONFIG_BLOCK */
|
|
|
|
#endif
|