summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authorOriginCode2020-12-22 07:50:08 +0800
committerOriginCode2020-12-22 07:50:08 +0800
commit12071c90176d538c7d3786e7a37d37e37db1c16d (patch)
tree437076f2697419ce22b60614a9e0bd5a2ad4a026
parent0533208f123519c359e0e3357c1669ed7743169f (diff)
downloadaur-12071c90176d538c7d3786e7a37d37e37db1c16d.tar.gz
linux-froidzen: 5.10.2.zen1-1
-rw-r--r--PKGBUILD13
-rw-r--r--config1642
-rw-r--r--uksm-5.10.patch6935
3 files changed, 7305 insertions, 1285 deletions
diff --git a/PKGBUILD b/PKGBUILD
index 65674c9992d..8823e22e073 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -1,7 +1,7 @@
# Maintainer: OriginCode <self@origincode.me>
pkgbase=linux-froidzen
-pkgver=5.9.13.zen1
+pkgver=5.10.2.zen1
pkgrel=1
pkgdesc='Linux ZEN with Patches and Modifications'
_srctag=v${pkgver%.*}-${pkgver##*.}
@@ -9,7 +9,7 @@ url="https://github.com/zen-kernel/zen-kernel/commits/$_srctag"
arch=(x86_64)
license=(GPL2)
makedepends=(
- bc kmod libelf pahole
+ bc kmod libelf pahole cpio perl tar xz
xmlto python-sphinx python-sphinx_rtd_theme graphviz imagemagick
git
)
@@ -19,7 +19,7 @@ source=(
"$_srcname::git+https://github.com/zen-kernel/zen-kernel?signed#tag=$_srctag"
config # the main kernel config file
sphinx-workaround.patch
- "https://raw.githubusercontent.com/dolohow/uksm/master/v5.x/uksm-5.9.patch" # UKSM Patch
+ "https://raw.githubusercontent.com/dolohow/uksm/master/v5.x/uksm-5.10.patch" # UKSM Patch
)
validpgpkeys=(
'ABAF11C65A2970B130ABE3C479BE3E4300411886' # Linus Torvalds
@@ -27,9 +27,9 @@ validpgpkeys=(
'A2FF3A36AAA56654109064AB19802F8B0D70FC30' # Jan Alexander Steffens (heftig)
)
sha256sums=('SKIP'
- '23f9693a1b35e4d674eced335a2a2cd8a231a157f362000340ccfdb86b4a98e2'
+ 'e5d2541532de126a4f243b4cc6f098403a425eb0d76ee59d3ffaf122ac4d8910'
'8cb21e0b3411327b627a9dd15b8eb773295a0d2782b1a41b2a8839d1b2f5778c'
- '59d2b5e63c30332bd6e7030e4050ce9722f420b0e5b9bc383e7e93aca4268929')
+ '24729e63e08de13039ce7e6637146ec5a5747379ebbd92cdeef744edfad17183')
export KBUILD_BUILD_HOST=archlinux
export KBUILD_BUILD_USER=$pkgbase
@@ -43,9 +43,6 @@ prepare() {
echo "-$pkgrel" > localversion.10-pkgrel
echo "${pkgbase#linux}" > localversion.20-pkgname
- # Workaround for uksm-5.9 patch
- sed -i "s/#include <linux\/vmalloc.h>/#include <linux\/vmalloc.h>\n #include <linux\/io_uring.h>/g" ${srcdir}/uksm-5.9.patch
-
local src
for src in "${source[@]}"; do
src="${src%%::*}"
diff --git a/config b/config
index 7942a7a7d86..b5d82226281 100644
--- a/config
+++ b/config
@@ -1,12 +1,13 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86 5.9.11-zen2 Kernel Configuration
+# Linux/x86 5.10.2-zen1 Kernel Configuration
#
CONFIG_CC_VERSION_TEXT="gcc (GCC) 10.2.0"
CONFIG_CC_IS_GCC=y
CONFIG_GCC_VERSION=100200
CONFIG_LD_VERSION=235010000
CONFIG_CLANG_VERSION=0
+CONFIG_LLD_VERSION=0
CONFIG_CC_CAN_LINK=y
CONFIG_CC_CAN_LINK_STATIC=y
CONFIG_CC_HAS_ASM_GOTO=y
@@ -61,6 +62,7 @@ CONFIG_GENERIC_IRQ_SHOW=y
CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
CONFIG_GENERIC_PENDING_IRQ=y
CONFIG_GENERIC_IRQ_MIGRATION=y
+CONFIG_GENERIC_IRQ_INJECTION=y
CONFIG_HARDIRQS_SW_RESEND=y
CONFIG_GENERIC_IRQ_CHIP=y
CONFIG_IRQ_DOMAIN=y
@@ -136,6 +138,7 @@ CONFIG_TREE_SRCU=y
CONFIG_TASKS_RCU_GENERIC=y
CONFIG_TASKS_RCU=y
CONFIG_TASKS_RUDE_RCU=y
+CONFIG_TASKS_TRACE_RCU=y
CONFIG_RCU_STALL_COMMON=y
CONFIG_RCU_NEED_SEGCBLIST=y
CONFIG_RCU_FANOUT=64
@@ -150,7 +153,7 @@ CONFIG_RCU_BOOST_DELAY=500
CONFIG_BUILD_BIN2C=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
-# CONFIG_IKHEADERS is not set
+CONFIG_IKHEADERS=m
CONFIG_LOG_BUF_SHIFT=17
CONFIG_LOG_CPU_MAX_BUF_SHIFT=12
CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13
@@ -218,16 +221,17 @@ CONFIG_BOOT_CONFIG=y
# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set
CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_LD_ORPHAN_WARN=y
CONFIG_SYSCTL=y
CONFIG_HAVE_UID16=y
CONFIG_SYSCTL_EXCEPTION_TRACE=y
CONFIG_HAVE_PCSPKR_PLATFORM=y
CONFIG_BPF=y
-CONFIG_EXPERT=y
-# CONFIG_UID16 is not set
+# CONFIG_EXPERT is not set
+CONFIG_UID16=y
CONFIG_MULTIUSER=y
CONFIG_SGETMASK_SYSCALL=y
-# CONFIG_SYSFS_SYSCALL is not set
+CONFIG_SYSFS_SYSCALL=y
CONFIG_FHANDLE=y
CONFIG_POSIX_TIMERS=y
CONFIG_PRINTK=y
@@ -257,13 +261,14 @@ CONFIG_BPF_SYSCALL=y
CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y
CONFIG_BPF_JIT_ALWAYS_ON=y
CONFIG_BPF_JIT_DEFAULT_ON=y
+CONFIG_USERMODE_DRIVER=y
+CONFIG_BPF_PRELOAD=y
+CONFIG_BPF_PRELOAD_UMD=m
CONFIG_USERFAULTFD=y
CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y
CONFIG_RSEQ=y
-# CONFIG_DEBUG_RSEQ is not set
# CONFIG_EMBEDDED is not set
CONFIG_HAVE_PERF_EVENTS=y
-# CONFIG_PC104 is not set
#
# Kernel Performance Events And Counters
@@ -274,11 +279,9 @@ CONFIG_PERF_EVENTS=y
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
-# CONFIG_SLUB_MEMCG_SYSFS_ON is not set
# CONFIG_COMPAT_BRK is not set
# CONFIG_SLAB is not set
CONFIG_SLUB=y
-# CONFIG_SLOB is not set
CONFIG_SLAB_MERGE_DEFAULT=y
CONFIG_SLAB_FREELIST_RANDOM=y
CONFIG_SLAB_FREELIST_HARDENED=y
@@ -409,7 +412,6 @@ CONFIG_X86_MINIMUM_CPU_FAMILY=64
CONFIG_X86_DEBUGCTLMSR=y
CONFIG_IA32_FEAT_CTL=y
CONFIG_X86_VMX_FEATURE_NAMES=y
-CONFIG_PROCESSOR_SELECT=y
CONFIG_CPU_SUP_INTEL=y
CONFIG_CPU_SUP_AMD=y
CONFIG_CPU_SUP_HYGON=y
@@ -418,7 +420,7 @@ CONFIG_CPU_SUP_ZHAOXIN=y
CONFIG_HPET_TIMER=y
CONFIG_HPET_EMULATE_RTC=y
CONFIG_DMI=y
-CONFIG_GART_IOMMU=y
+# CONFIG_GART_IOMMU is not set
# CONFIG_MAXSMP is not set
CONFIG_NR_CPUS_RANGE_BEGIN=2
CONFIG_NR_CPUS_RANGE_END=512
@@ -457,12 +459,12 @@ CONFIG_I8K=m
CONFIG_MICROCODE=y
CONFIG_MICROCODE_INTEL=y
CONFIG_MICROCODE_AMD=y
-CONFIG_MICROCODE_OLD_INTERFACE=y
-CONFIG_X86_MSR=m
-CONFIG_X86_CPUID=m
+# CONFIG_MICROCODE_OLD_INTERFACE is not set
+CONFIG_X86_MSR=y
+CONFIG_X86_CPUID=y
CONFIG_X86_5LEVEL=y
CONFIG_X86_DIRECT_GBPAGES=y
-# CONFIG_X86_CPA_STATISTICS is not set
+CONFIG_X86_CPA_STATISTICS=y
CONFIG_AMD_MEM_ENCRYPT=y
# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set
CONFIG_NUMA=y
@@ -473,7 +475,7 @@ CONFIG_NODES_SHIFT=5
CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
-CONFIG_ARCH_MEMORY_PROBE=y
+# CONFIG_ARCH_MEMORY_PROBE is not set
CONFIG_ARCH_PROC_KCORE_TEXT=y
CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
CONFIG_X86_PMEM_LEGACY_DEVICE=y
@@ -497,7 +499,6 @@ CONFIG_X86_INTEL_TSX_MODE_AUTO=y
CONFIG_EFI=y
CONFIG_EFI_STUB=y
CONFIG_EFI_MIXED=y
-CONFIG_SECCOMP=y
# CONFIG_HZ_100 is not set
# CONFIG_HZ_250 is not set
# CONFIG_HZ_300 is not set
@@ -517,7 +518,7 @@ CONFIG_X86_NEED_RELOCS=y
CONFIG_PHYSICAL_ALIGN=0x200000
CONFIG_DYNAMIC_MEMORY_LAYOUT=y
CONFIG_RANDOMIZE_MEMORY=y
-CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0x1
+CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa
CONFIG_HOTPLUG_CPU=y
# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
@@ -545,30 +546,25 @@ CONFIG_ARCH_ENABLE_THP_MIGRATION=y
CONFIG_ARCH_HIBERNATION_HEADER=y
CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
-# CONFIG_SUSPEND_SKIP_SYNC is not set
CONFIG_HIBERNATE_CALLBACKS=y
CONFIG_HIBERNATION=y
CONFIG_HIBERNATION_SNAPSHOT_DEV=y
CONFIG_PM_STD_PARTITION=""
CONFIG_PM_SLEEP=y
CONFIG_PM_SLEEP_SMP=y
-CONFIG_PM_AUTOSLEEP=y
-CONFIG_PM_WAKELOCKS=y
-CONFIG_PM_WAKELOCKS_LIMIT=100
-CONFIG_PM_WAKELOCKS_GC=y
+# CONFIG_PM_AUTOSLEEP is not set
+# CONFIG_PM_WAKELOCKS is not set
CONFIG_PM=y
CONFIG_PM_DEBUG=y
-CONFIG_PM_ADVANCED_DEBUG=y
-# CONFIG_PM_TEST_SUSPEND is not set
+# CONFIG_PM_ADVANCED_DEBUG is not set
+CONFIG_PM_TEST_SUSPEND=y
CONFIG_PM_SLEEP_DEBUG=y
-# CONFIG_DPM_WATCHDOG is not set
CONFIG_PM_TRACE=y
CONFIG_PM_TRACE_RTC=y
CONFIG_PM_CLK=y
CONFIG_PM_GENERIC_DOMAINS=y
CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
CONFIG_PM_GENERIC_DOMAINS_SLEEP=y
-CONFIG_PM_GENERIC_DOMAINS_OF=y
CONFIG_ENERGY_MODEL=y
CONFIG_ARCH_SUPPORTS_ACPI=y
CONFIG_ACPI=y
@@ -580,11 +576,11 @@ CONFIG_ACPI_SPCR_TABLE=y
CONFIG_ACPI_LPIT=y
CONFIG_ACPI_SLEEP=y
CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y
-CONFIG_ACPI_EC_DEBUGFS=y
-CONFIG_ACPI_AC=m
-CONFIG_ACPI_BATTERY=m
+CONFIG_ACPI_EC_DEBUGFS=m
+CONFIG_ACPI_AC=y
+CONFIG_ACPI_BATTERY=y
CONFIG_ACPI_BUTTON=y
-CONFIG_ACPI_VIDEO=y
+CONFIG_ACPI_VIDEO=m
CONFIG_ACPI_FAN=y
CONFIG_ACPI_TAD=m
CONFIG_ACPI_DOCK=y
@@ -595,7 +591,7 @@ CONFIG_ACPI_CPPC_LIB=y
CONFIG_ACPI_PROCESSOR=y
CONFIG_ACPI_IPMI=m
CONFIG_ACPI_HOTPLUG_CPU=y
-CONFIG_ACPI_PROCESSOR_AGGREGATOR=y
+CONFIG_ACPI_PROCESSOR_AGGREGATOR=m
CONFIG_ACPI_THERMAL=y
CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y
CONFIG_ACPI_TABLE_UPGRADE=y
@@ -608,7 +604,6 @@ CONFIG_ACPI_SBS=m
CONFIG_ACPI_HED=y
CONFIG_ACPI_CUSTOM_METHOD=m
CONFIG_ACPI_BGRT=y
-# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set
CONFIG_ACPI_NFIT=m
# CONFIG_NFIT_SECURITY_DEBUG is not set
CONFIG_ACPI_NUMA=y
@@ -621,10 +616,13 @@ CONFIG_ACPI_APEI_PCIEAER=y
CONFIG_ACPI_APEI_MEMORY_FAILURE=y
CONFIG_ACPI_APEI_EINJ=m
CONFIG_ACPI_APEI_ERST_DEBUG=m
+CONFIG_ACPI_DPTF=y
CONFIG_DPTF_POWER=m
+CONFIG_DPTF_PCH_FIVR=m
CONFIG_ACPI_WATCHDOG=y
CONFIG_ACPI_EXTLOG=m
CONFIG_ACPI_ADXL=y
+CONFIG_ACPI_CONFIGFS=m
CONFIG_PMIC_OPREGION=y
CONFIG_BYTCRC_PMIC_OPREGION=y
CONFIG_CHTCRC_PMIC_OPREGION=y
@@ -632,10 +630,9 @@ CONFIG_XPOWER_PMIC_OPREGION=y
CONFIG_BXT_WC_PMIC_OPREGION=y
CONFIG_CHT_WC_PMIC_OPREGION=y
CONFIG_CHT_DC_TI_PMIC_OPREGION=y
-CONFIG_ACPI_CONFIGFS=m
CONFIG_TPS68470_PMIC_OPREGION=y
CONFIG_X86_PM_TIMER=y
-CONFIG_SFI=y
+# CONFIG_SFI is not set
#
# CPU Frequency scaling
@@ -649,17 +646,15 @@ CONFIG_CPU_FREQ_STAT=y
# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y
CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
-CONFIG_CPU_FREQ_GOV_POWERSAVE=m
-CONFIG_CPU_FREQ_GOV_USERSPACE=m
-CONFIG_CPU_FREQ_GOV_ONDEMAND=m
-CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
#
# CPU frequency scaling drivers
#
-CONFIG_CPUFREQ_DT=m
-CONFIG_CPUFREQ_DT_PLATDEV=y
CONFIG_X86_INTEL_PSTATE=y
CONFIG_X86_PCC_CPUFREQ=m
CONFIG_X86_ACPI_CPUFREQ=m
@@ -696,8 +691,6 @@ CONFIG_PCI_DIRECT=y
CONFIG_PCI_MMCONFIG=y
CONFIG_PCI_XEN=y
CONFIG_MMCONF_FAM10H=y
-# CONFIG_PCI_CNB20LE_QUIRK is not set
-# CONFIG_ISA_BUS is not set
CONFIG_ISA_DMA_API=y
CONFIG_AMD_NB=y
# CONFIG_X86_SYSFB is not set
@@ -721,7 +714,7 @@ CONFIG_EDD=m
# CONFIG_EDD_OFF is not set
CONFIG_FIRMWARE_MEMMAP=y
CONFIG_DMIID=y
-CONFIG_DMI_SYSFS=m
+CONFIG_DMI_SYSFS=y
CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y
CONFIG_ISCSI_IBFT_FIND=y
CONFIG_ISCSI_IBFT=m
@@ -741,11 +734,14 @@ CONFIG_GOOGLE_VPD=m
#
# CONFIG_EFI_VARS is not set
CONFIG_EFI_ESRT=y
+CONFIG_EFI_VARS_PSTORE=y
+# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set
CONFIG_EFI_RUNTIME_MAP=y
# CONFIG_EFI_FAKE_MEMMAP is not set
CONFIG_EFI_SOFT_RESERVE=y
CONFIG_EFI_RUNTIME_WRAPPERS=y
CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y
+CONFIG_EFI_BOOTLOADER_CONTROL=m
CONFIG_EFI_CAPSULE_LOADER=m
# CONFIG_EFI_TEST is not set
CONFIG_APPLE_PROPERTIES=y
@@ -759,6 +755,7 @@ CONFIG_UEFI_CPER=y
CONFIG_UEFI_CPER_X86=y
CONFIG_EFI_DEV_PATH_PARSER=y
CONFIG_EFI_EARLYCON=y
+CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y
#
# Tegra firmware driver
@@ -783,7 +780,6 @@ CONFIG_HAVE_KVM_NO_POLL=y
CONFIG_KVM_XFER_TO_GUEST_WORK=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=m
-CONFIG_KVM_WERROR=y
CONFIG_KVM_INTEL=m
CONFIG_KVM_AMD=m
CONFIG_KVM_AMD_SEV=y
@@ -807,6 +803,7 @@ CONFIG_OPROFILE_NMI_TIMER=y
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
# CONFIG_STATIC_KEYS_SELFTEST is not set
+# CONFIG_STATIC_CALL_SELFTEST is not set
CONFIG_OPTPROBES=y
CONFIG_KPROBES_ON_FTRACE=y
CONFIG_UPROBES=y
@@ -850,7 +847,9 @@ CONFIG_HAVE_CMPXCHG_LOCAL=y
CONFIG_HAVE_CMPXCHG_DOUBLE=y
CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y
CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y
+CONFIG_HAVE_ARCH_SECCOMP=y
CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
+CONFIG_SECCOMP=y
CONFIG_SECCOMP_FILTER=y
CONFIG_HAVE_ARCH_STACKLEAK=y
CONFIG_HAVE_STACKPROTECTOR=y
@@ -891,6 +890,9 @@ CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y
CONFIG_ARCH_USE_MEMREMAP_PROT=y
CONFIG_LOCK_EVENT_COUNTS=y
CONFIG_ARCH_HAS_MEM_ENCRYPT=y
+CONFIG_HAVE_STATIC_CALL=y
+CONFIG_HAVE_STATIC_CALL_INLINE=y
+CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y
#
# GCOV-based kernel profiling
@@ -901,7 +903,6 @@ CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y
CONFIG_HAVE_GCC_PLUGINS=y
CONFIG_GCC_PLUGINS=y
-# CONFIG_GCC_PLUGIN_CYC_COMPLEXITY is not set
# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set
# CONFIG_GCC_PLUGIN_RANDSTRUCT is not set
# end of General architecture-dependent options
@@ -1065,7 +1066,10 @@ CONFIG_ARCH_WANTS_THP_SWAP=y
CONFIG_THP_SWAP=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
-# CONFIG_CMA is not set
+CONFIG_CMA=y
+# CONFIG_CMA_DEBUG is not set
+CONFIG_CMA_DEBUGFS=y
+CONFIG_CMA_AREAS=7
CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set
@@ -1084,7 +1088,6 @@ CONFIG_ZPOOL=y
CONFIG_ZBUD=y
CONFIG_Z3FOLD=y
CONFIG_ZSMALLOC=y
-# CONFIG_ZSMALLOC_PGTABLE_MAPPING is not set
# CONFIG_ZSMALLOC_STAT is not set
CONFIG_GENERIC_EARLY_IOREMAP=y
# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set
@@ -1094,6 +1097,7 @@ CONFIG_ZONE_DEVICE=y
CONFIG_DEV_PAGEMAP_OPS=y
CONFIG_HMM_MIRROR=y
CONFIG_DEVICE_PRIVATE=y
+CONFIG_VMAP_PFN=y
CONFIG_FRAME_VECTOR=y
CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y
CONFIG_ARCH_HAS_PKEYS=y
@@ -1115,17 +1119,18 @@ CONFIG_SKB_EXTENSIONS=y
# Networking options
#
CONFIG_PACKET=y
-CONFIG_PACKET_DIAG=y
+CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
CONFIG_UNIX_SCM=y
-CONFIG_UNIX_DIAG=y
+CONFIG_UNIX_DIAG=m
CONFIG_TLS=m
CONFIG_TLS_DEVICE=y
# CONFIG_TLS_TOE is not set
CONFIG_XFRM=y
CONFIG_XFRM_OFFLOAD=y
-CONFIG_XFRM_ALGO=m
-CONFIG_XFRM_USER=m
+CONFIG_XFRM_ALGO=y
+CONFIG_XFRM_USER=y
+# CONFIG_XFRM_USER_COMPAT is not set
CONFIG_XFRM_INTERFACE=m
CONFIG_XFRM_SUB_POLICY=y
CONFIG_XFRM_MIGRATE=y
@@ -1139,11 +1144,11 @@ CONFIG_XFRM_ESPINTCP=y
CONFIG_SMC=m
CONFIG_SMC_DIAG=m
CONFIG_XDP_SOCKETS=y
-CONFIG_XDP_SOCKETS_DIAG=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
-# CONFIG_IP_FIB_TRIE_STATS is not set
+CONFIG_IP_FIB_TRIE_STATS=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
@@ -1153,7 +1158,7 @@ CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IP_TUNNEL=m
CONFIG_NET_IPGRE=m
-# CONFIG_NET_IPGRE_BROADCAST is not set
+CONFIG_NET_IPGRE_BROADCAST=y
CONFIG_IP_MROUTE_COMMON=y
CONFIG_IP_MROUTE=y
CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
@@ -1587,24 +1592,7 @@ CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
# CONFIG_BPFILTER is not set
-CONFIG_IP_DCCP=m
-CONFIG_INET_DCCP_DIAG=m
-
-#
-# DCCP CCIDs Configuration
-#
-# CONFIG_IP_DCCP_CCID2_DEBUG is not set
-CONFIG_IP_DCCP_CCID3=y
-# CONFIG_IP_DCCP_CCID3_DEBUG is not set
-CONFIG_IP_DCCP_TFRC_LIB=y
-# end of DCCP CCIDs Configuration
-
-#
-# DCCP Kernel Hacking
-#
-# CONFIG_IP_DCCP_DEBUG is not set
-# end of DCCP Kernel Hacking
-
+# CONFIG_IP_DCCP is not set
CONFIG_IP_SCTP=m
# CONFIG_SCTP_DBG_OBJCNT is not set
# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set
@@ -1630,7 +1618,7 @@ CONFIG_ATM_MPOA=m
CONFIG_ATM_BR2684=m
# CONFIG_ATM_BR2684_IPFILTER is not set
CONFIG_L2TP=m
-# CONFIG_L2TP_DEBUGFS is not set
+CONFIG_L2TP_DEBUGFS=m
CONFIG_L2TP_V3=y
CONFIG_L2TP_IP=m
CONFIG_L2TP_ETH=m
@@ -1673,7 +1661,7 @@ CONFIG_IPDDP_ENCAP=y
# CONFIG_LAPB is not set
CONFIG_PHONET=m
CONFIG_6LOWPAN=m
-# CONFIG_6LOWPAN_DEBUGFS is not set
+CONFIG_6LOWPAN_DEBUGFS=y
CONFIG_6LOWPAN_NHC=m
CONFIG_6LOWPAN_NHC_DEST=m
CONFIG_6LOWPAN_NHC_FRAGMENT=m
@@ -1878,6 +1866,7 @@ CONFIG_CAN_RAW=m
CONFIG_CAN_BCM=m
CONFIG_CAN_GW=m
CONFIG_CAN_J1939=m
+CONFIG_CAN_ISOTP=m
#
# CAN Device Drivers
@@ -1887,8 +1876,6 @@ CONFIG_CAN_VXCAN=m
CONFIG_CAN_SLCAN=m
CONFIG_CAN_DEV=m
CONFIG_CAN_CALC_BITTIMING=y
-CONFIG_CAN_FLEXCAN=m
-CONFIG_CAN_GRCAN=m
CONFIG_CAN_JANZ_ICAN3=m
CONFIG_CAN_KVASER_PCIEFD=m
CONFIG_CAN_C_CAN=m
@@ -1921,6 +1908,8 @@ CONFIG_CAN_SOFTING_CS=m
#
CONFIG_CAN_HI311X=m
CONFIG_CAN_MCP251X=m
+CONFIG_CAN_MCP251XFD=m
+# CONFIG_CAN_MCP251XFD_SANITY is not set
# end of CAN SPI interfaces
#
@@ -2015,7 +2004,6 @@ CONFIG_WEXT_PRIV=y
CONFIG_CFG80211=m
# CONFIG_NL80211_TESTMODE is not set
# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
-# CONFIG_CFG80211_CERTIFICATION_ONUS is not set
CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y
CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y
CONFIG_CFG80211_DEFAULT_PS=y
@@ -2050,10 +2038,7 @@ CONFIG_NET_9P_VIRTIO=m
CONFIG_NET_9P_XEN=m
CONFIG_NET_9P_RDMA=m
# CONFIG_NET_9P_DEBUG is not set
-CONFIG_CAIF=m
-# CONFIG_CAIF_DEBUG is not set
-CONFIG_CAIF_NETDEV=m
-CONFIG_CAIF_USB=m
+# CONFIG_CAIF is not set
CONFIG_CEPH_LIB=m
CONFIG_CEPH_LIB_PRETTYDEBUG=y
CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y
@@ -2126,7 +2111,7 @@ CONFIG_PCI_DOMAINS=y
CONFIG_PCIEPORTBUS=y
CONFIG_HOTPLUG_PCI_PCIE=y
CONFIG_PCIEAER=y
-# CONFIG_PCIEAER_INJECT is not set
+CONFIG_PCIEAER_INJECT=m
CONFIG_PCIE_ECRC=y
CONFIG_PCIEASPM=y
CONFIG_PCIEASPM_DEFAULT=y
@@ -2142,12 +2127,11 @@ CONFIG_PCI_MSI=y
CONFIG_PCI_MSI_IRQ_DOMAIN=y
CONFIG_PCI_QUIRKS=y
# CONFIG_PCI_DEBUG is not set
-CONFIG_PCI_REALLOC_ENABLE_AUTO=y
+# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set
CONFIG_PCI_STUB=y
CONFIG_PCI_PF_STUB=m
CONFIG_XEN_PCIDEV_FRONTEND=m
CONFIG_PCI_ATS=y
-CONFIG_PCI_ECAM=y
CONFIG_PCI_LOCKLESS_CONFIG=y
CONFIG_PCI_IOV=y
CONFIG_PCI_PRI=y
@@ -2166,10 +2150,6 @@ CONFIG_HOTPLUG_PCI_SHPC=y
#
# PCI controller drivers
#
-CONFIG_PCI_FTPCI100=y
-CONFIG_PCI_HOST_COMMON=y
-CONFIG_PCI_HOST_GENERIC=y
-CONFIG_PCIE_XILINX=y
CONFIG_VMD=m
CONFIG_PCI_HYPERV_INTERFACE=m
@@ -2178,11 +2158,8 @@ CONFIG_PCI_HYPERV_INTERFACE=m
#
CONFIG_PCIE_DW=y
CONFIG_PCIE_DW_HOST=y
-CONFIG_PCIE_DW_EP=y
CONFIG_PCIE_DW_PLAT=y
CONFIG_PCIE_DW_PLAT_HOST=y
-CONFIG_PCIE_DW_PLAT_EP=y
-CONFIG_PCIE_INTEL_GW=y
CONFIG_PCI_MESON=y
# end of DesignWare PCI Core Support
@@ -2194,24 +2171,13 @@ CONFIG_PCI_MESON=y
#
# Cadence PCIe controllers support
#
-CONFIG_PCIE_CADENCE=y
-CONFIG_PCIE_CADENCE_HOST=y
-CONFIG_PCIE_CADENCE_EP=y
-CONFIG_PCIE_CADENCE_PLAT=y
-CONFIG_PCIE_CADENCE_PLAT_HOST=y
-CONFIG_PCIE_CADENCE_PLAT_EP=y
-CONFIG_PCI_J721E=y
-CONFIG_PCI_J721E_HOST=y
-CONFIG_PCI_J721E_EP=y
# end of Cadence PCIe controllers support
# end of PCI controller drivers
#
# PCI Endpoint
#
-CONFIG_PCI_ENDPOINT=y
-CONFIG_PCI_ENDPOINT_CONFIGFS=y
-# CONFIG_PCI_EPF_TEST is not set
+# CONFIG_PCI_ENDPOINT is not set
# end of PCI Endpoint
#
@@ -2237,25 +2203,7 @@ CONFIG_YENTA_TOSHIBA=y
CONFIG_PD6729=m
CONFIG_I82092=m
CONFIG_PCCARD_NONSTATIC=y
-CONFIG_RAPIDIO=m
-CONFIG_RAPIDIO_TSI721=m
-CONFIG_RAPIDIO_DISC_TIMEOUT=30
-CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y
-CONFIG_RAPIDIO_DMA_ENGINE=y
-# CONFIG_RAPIDIO_DEBUG is not set
-CONFIG_RAPIDIO_ENUM_BASIC=m
-CONFIG_RAPIDIO_CHMAN=m
-CONFIG_RAPIDIO_MPORT_CDEV=m
-
-#
-# RapidIO Switch drivers
-#
-CONFIG_RAPIDIO_TSI57X=m
-CONFIG_RAPIDIO_CPS_XX=m
-CONFIG_RAPIDIO_TSI568=m
-CONFIG_RAPIDIO_CPS_GEN2=m
-CONFIG_RAPIDIO_RXS_GEN3=m
-# end of RapidIO Switch drivers
+# CONFIG_RAPIDIO is not set
#
# Generic Driver Options
@@ -2292,13 +2240,12 @@ CONFIG_REGMAP=y
CONFIG_REGMAP_I2C=y
CONFIG_REGMAP_SLIMBUS=m
CONFIG_REGMAP_SPI=y
-CONFIG_REGMAP_SPMI=m
CONFIG_REGMAP_W1=m
CONFIG_REGMAP_MMIO=y
CONFIG_REGMAP_IRQ=y
CONFIG_REGMAP_SOUNDWIRE=m
CONFIG_REGMAP_SCCB=m
-CONFIG_REGMAP_I3C=m
+CONFIG_REGMAP_SPI_AVMM=m
CONFIG_DMA_SHARED_BUFFER=y
# CONFIG_DMA_FENCE_TRACE is not set
# end of Generic Driver Options
@@ -2306,9 +2253,8 @@ CONFIG_DMA_SHARED_BUFFER=y
#
# Bus devices
#
-CONFIG_MOXTET=m
-CONFIG_SIMPLE_PM_BUS=y
CONFIG_MHI_BUS=m
+# CONFIG_MHI_BUS_DEBUG is not set
# end of Bus devices
CONFIG_CONNECTOR=y
@@ -2319,18 +2265,14 @@ CONFIG_GNSS_MTK_SERIAL=m
CONFIG_GNSS_SIRF_SERIAL=m
CONFIG_GNSS_UBX_SERIAL=m
CONFIG_MTD=m
-CONFIG_MTD_TESTS=m
+# CONFIG_MTD_TESTS is not set
#
# Partition parsers
#
-CONFIG_MTD_AR7_PARTS=m
-CONFIG_MTD_CMDLINE_PARTS=m
-CONFIG_MTD_OF_PARTS=m
-CONFIG_MTD_REDBOOT_PARTS=m
-CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
-# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
-# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
+# CONFIG_MTD_AR7_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_REDBOOT_PARTS is not set
# end of Partition parsers
#
@@ -2338,164 +2280,109 @@ CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
#
CONFIG_MTD_BLKDEVS=m
CONFIG_MTD_BLOCK=m
-CONFIG_MTD_BLOCK_RO=m
-CONFIG_FTL=m
-CONFIG_NFTL=m
-CONFIG_NFTL_RW=y
-CONFIG_INFTL=m
-CONFIG_RFD_FTL=m
-CONFIG_SSFDC=m
-CONFIG_SM_FTL=m
-CONFIG_MTD_OOPS=m
+# CONFIG_MTD_BLOCK_RO is not set
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_SM_FTL is not set
+# CONFIG_MTD_OOPS is not set
CONFIG_MTD_PSTORE=m
-CONFIG_MTD_SWAP=m
-CONFIG_MTD_PARTITIONED_MASTER=y
+# CONFIG_MTD_SWAP is not set
+# CONFIG_MTD_PARTITIONED_MASTER is not set
#
# RAM/ROM/Flash chip drivers
#
-CONFIG_MTD_CFI=m
-CONFIG_MTD_JEDECPROBE=m
-CONFIG_MTD_GEN_PROBE=m
-# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+# CONFIG_MTD_CFI is not set
+# CONFIG_MTD_JEDECPROBE is not set
CONFIG_MTD_MAP_BANK_WIDTH_1=y
CONFIG_MTD_MAP_BANK_WIDTH_2=y
CONFIG_MTD_MAP_BANK_WIDTH_4=y
CONFIG_MTD_CFI_I1=y
CONFIG_MTD_CFI_I2=y
-CONFIG_MTD_CFI_INTELEXT=m
-CONFIG_MTD_CFI_AMDSTD=m
-CONFIG_MTD_CFI_STAA=m
-CONFIG_MTD_CFI_UTIL=m
-CONFIG_MTD_RAM=m
-CONFIG_MTD_ROM=m
-CONFIG_MTD_ABSENT=m
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
# end of RAM/ROM/Flash chip drivers
#
# Mapping drivers for chip access
#
-CONFIG_MTD_COMPLEX_MAPPINGS=y
-CONFIG_MTD_PHYSMAP=m
-# CONFIG_MTD_PHYSMAP_COMPAT is not set
-CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_PHYSMAP_VERSATILE=y
-CONFIG_MTD_PHYSMAP_GEMINI=y
-CONFIG_MTD_PHYSMAP_GPIO_ADDR=y
-CONFIG_MTD_SBC_GXX=m
-CONFIG_MTD_AMD76XROM=m
-CONFIG_MTD_ICHXROM=m
-CONFIG_MTD_ESB2ROM=m
-CONFIG_MTD_CK804XROM=m
-CONFIG_MTD_SCB2_FLASH=m
-CONFIG_MTD_NETtel=m
-CONFIG_MTD_L440GX=m
-CONFIG_MTD_PCI=m
-CONFIG_MTD_PCMCIA=m
-# CONFIG_MTD_PCMCIA_ANONYMOUS is not set
-CONFIG_MTD_INTEL_VR_NOR=m
-CONFIG_MTD_PLATRAM=m
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_INTEL_VR_NOR is not set
+# CONFIG_MTD_PLATRAM is not set
# end of Mapping drivers for chip access
#
# Self-contained MTD device drivers
#
-CONFIG_MTD_PMC551=m
-# CONFIG_MTD_PMC551_BUGFIX is not set
-# CONFIG_MTD_PMC551_DEBUG is not set
-CONFIG_MTD_DATAFLASH=m
-# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
-CONFIG_MTD_DATAFLASH_OTP=y
-CONFIG_MTD_MCHP23K256=m
-CONFIG_MTD_SST25L=m
-CONFIG_MTD_SLRAM=m
-CONFIG_MTD_PHRAM=m
-CONFIG_MTD_MTDRAM=m
-CONFIG_MTDRAM_TOTAL_SIZE=4096
-CONFIG_MTDRAM_ERASE_SIZE=128
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_MCHP23K256 is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
CONFIG_MTD_BLOCK2MTD=m
#
# Disk-On-Chip Device Drivers
#
-CONFIG_MTD_DOCG3=m
-CONFIG_BCH_CONST_M=14
-CONFIG_BCH_CONST_T=4
+# CONFIG_MTD_DOCG3 is not set
# end of Self-contained MTD device drivers
#
# NAND
#
CONFIG_MTD_NAND_CORE=m
-CONFIG_MTD_ONENAND=m
-# CONFIG_MTD_ONENAND_VERIFY_WRITE is not set
-CONFIG_MTD_ONENAND_GENERIC=m
-CONFIG_MTD_ONENAND_OTP=y
-CONFIG_MTD_ONENAND_2X_PROGRAM=y
+# CONFIG_MTD_ONENAND is not set
CONFIG_MTD_NAND_ECC_SW_HAMMING=m
CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y
CONFIG_MTD_RAW_NAND=m
-CONFIG_MTD_NAND_ECC_SW_BCH=y
+# CONFIG_MTD_NAND_ECC_SW_BCH is not set
#
# Raw/parallel NAND flash controllers
#
-CONFIG_MTD_NAND_DENALI=m
-CONFIG_MTD_NAND_DENALI_PCI=m
-CONFIG_MTD_NAND_DENALI_DT=m
-CONFIG_MTD_NAND_CAFE=m
-CONFIG_MTD_NAND_MXIC=m
-CONFIG_MTD_NAND_GPIO=m
-CONFIG_MTD_NAND_PLATFORM=m
-CONFIG_MTD_NAND_CADENCE=m
-CONFIG_MTD_NAND_ARASAN=m
+# CONFIG_MTD_NAND_DENALI_PCI is not set
+# CONFIG_MTD_NAND_CAFE is not set
+# CONFIG_MTD_NAND_MXIC is not set
+# CONFIG_MTD_NAND_GPIO is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
+# CONFIG_MTD_NAND_ARASAN is not set
#
# Misc
#
-CONFIG_MTD_SM_COMMON=m
CONFIG_MTD_NAND_NANDSIM=m
-CONFIG_MTD_NAND_RICOH=m
-CONFIG_MTD_NAND_DISKONCHIP=m
-# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
-CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
-CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE=y
-CONFIG_MTD_SPI_NAND=m
+# CONFIG_MTD_NAND_RICOH is not set
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_SPI_NAND is not set
+
+#
+# ECC engine support
+#
+CONFIG_MTD_NAND_ECC=y
+# end of ECC engine support
# end of NAND
#
# LPDDR & LPDDR2 PCM memory drivers
#
-CONFIG_MTD_LPDDR=m
-CONFIG_MTD_QINFO_PROBE=m
+# CONFIG_MTD_LPDDR is not set
# end of LPDDR & LPDDR2 PCM memory drivers
-CONFIG_MTD_SPI_NOR=m
-CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y
-CONFIG_SPI_INTEL_SPI=m
-CONFIG_SPI_INTEL_SPI_PCI=m
-CONFIG_SPI_INTEL_SPI_PLATFORM=m
+# CONFIG_MTD_SPI_NOR is not set
CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_WL_THRESHOLD=4096
CONFIG_MTD_UBI_BEB_LIMIT=20
-CONFIG_MTD_UBI_FASTMAP=y
-CONFIG_MTD_UBI_GLUEBI=m
-CONFIG_MTD_UBI_BLOCK=y
-CONFIG_MTD_HYPERBUS=m
-CONFIG_DTC=y
-CONFIG_OF=y
-# CONFIG_OF_UNITTEST is not set
-CONFIG_OF_FLATTREE=y
-CONFIG_OF_EARLY_FLATTREE=y
-CONFIG_OF_KOBJ=y
-CONFIG_OF_DYNAMIC=y
-CONFIG_OF_ADDRESS=y
-CONFIG_OF_IRQ=y
-CONFIG_OF_NET=y
-CONFIG_OF_MDIO=m
-CONFIG_OF_RESERVED_MEM=y
-CONFIG_OF_RESOLVE=y
-CONFIG_OF_OVERLAY=y
+# CONFIG_MTD_UBI_FASTMAP is not set
+# CONFIG_MTD_UBI_GLUEBI is not set
+# CONFIG_MTD_UBI_BLOCK is not set
+# CONFIG_MTD_HYPERBUS is not set
+# CONFIG_OF is not set
CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y
CONFIG_PARPORT=m
CONFIG_PARPORT_PC=m
@@ -2514,7 +2401,7 @@ CONFIG_PNP_DEBUG_MESSAGES=y
#
CONFIG_PNPACPI=y
CONFIG_BLK_DEV=y
-# CONFIG_BLK_DEV_NULL_BLK is not set
+CONFIG_BLK_DEV_NULL_BLK=m
CONFIG_BLK_DEV_FD=m
CONFIG_CDROM=m
# CONFIG_PARIDE is not set
@@ -2524,7 +2411,7 @@ CONFIG_ZRAM_WRITEBACK=y
# CONFIG_ZRAM_MEMORY_TRACKING is not set
CONFIG_BLK_DEV_UMEM=m
CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=0
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_DRBD=m
# CONFIG_DRBD_FAULT_INJECTION is not set
@@ -2630,20 +2517,6 @@ CONFIG_INTEL_MEI_ME=m
CONFIG_INTEL_MEI_TXE=m
CONFIG_INTEL_MEI_HDCP=m
CONFIG_VMWARE_VMCI=m
-
-#
-# Intel MIC & related support
-#
-CONFIG_INTEL_MIC_BUS=m
-CONFIG_SCIF_BUS=m
-CONFIG_VOP_BUS=m
-CONFIG_INTEL_MIC_HOST=m
-CONFIG_INTEL_MIC_CARD=m
-CONFIG_SCIF=m
-CONFIG_MIC_COSM=m
-CONFIG_VOP=m
-# end of Intel MIC & related support
-
CONFIG_GENWQE=m
CONFIG_GENWQE_PLATFORM_ERROR_RECOVERY=0
CONFIG_ECHO=m
@@ -2828,8 +2701,6 @@ CONFIG_SATA_PMP=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_MOBILE_LPM_POLICY=3
CONFIG_SATA_AHCI_PLATFORM=m
-CONFIG_AHCI_CEVA=m
-CONFIG_AHCI_QORIQ=m
CONFIG_SATA_INIC162X=m
CONFIG_SATA_ACARD_AHCI=m
CONFIG_SATA_SIL24=m
@@ -2906,7 +2777,6 @@ CONFIG_PATA_MPIIX=m
CONFIG_PATA_NS87410=m
CONFIG_PATA_OPTI=m
CONFIG_PATA_PCMCIA=m
-# CONFIG_PATA_PLATFORM is not set
CONFIG_PATA_RZ1000=m
#
@@ -2933,7 +2803,8 @@ CONFIG_BLK_DEV_DM_BUILTIN=y
CONFIG_BLK_DEV_DM=m
CONFIG_DM_DEBUG=y
CONFIG_DM_BUFIO=m
-# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set
+CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y
+# CONFIG_DM_DEBUG_BLOCK_STACK_TRACING is not set
CONFIG_DM_BIO_PRISON=m
CONFIG_DM_PERSISTENT_DATA=m
CONFIG_DM_UNSTRIPED=m
@@ -3027,9 +2898,6 @@ CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_NETPOLL=y
CONFIG_NET_POLL_CONTROLLER=y
CONFIG_NTB_NETDEV=m
-CONFIG_RIONET=m
-CONFIG_RIONET_TX_SIZE=128
-CONFIG_RIONET_RX_SIZE=128
CONFIG_TUN=m
CONFIG_TAP=m
# CONFIG_TUN_VNET_CROSS_LE is not set
@@ -3070,18 +2938,12 @@ CONFIG_ATM_FORE200E_DEBUG=0
CONFIG_ATM_HE=m
CONFIG_ATM_HE_USE_SUNI=y
CONFIG_ATM_SOLOS=m
-CONFIG_CAIF_DRIVERS=y
-CONFIG_CAIF_TTY=m
-CONFIG_CAIF_SPI_SLAVE=m
-CONFIG_CAIF_SPI_SYNC=y
-CONFIG_CAIF_HSI=m
-CONFIG_CAIF_VIRTIO=m
#
# Distributed Switch Architecture drivers
#
CONFIG_B53=m
-# CONFIG_B53_SPI_DRIVER is not set
+CONFIG_B53_SPI_DRIVER=m
CONFIG_B53_MDIO_DRIVER=m
CONFIG_B53_MMAP_DRIVER=m
CONFIG_B53_SRAB_DRIVER=m
@@ -3100,6 +2962,7 @@ CONFIG_NET_DSA_MICROCHIP_KSZ8795_SPI=m
CONFIG_NET_DSA_MV88E6XXX=m
CONFIG_NET_DSA_MV88E6XXX_GLOBAL2=y
CONFIG_NET_DSA_MV88E6XXX_PTP=y
+CONFIG_NET_DSA_MSCC_SEVILLE=m
CONFIG_NET_DSA_AR9331=m
CONFIG_NET_DSA_SJA1105=m
CONFIG_NET_DSA_SJA1105_PTP=y
@@ -3193,10 +3056,12 @@ CONFIG_CHELSIO_T4_DCB=y
CONFIG_CHELSIO_T4_FCOE=y
CONFIG_CHELSIO_T4VF=m
CONFIG_CHELSIO_LIB=m
+CONFIG_CHELSIO_INLINE_CRYPTO=y
+CONFIG_CHELSIO_IPSEC_INLINE=m
+CONFIG_CHELSIO_TLS_DEVICE=m
CONFIG_NET_VENDOR_CISCO=y
CONFIG_ENIC=m
CONFIG_NET_VENDOR_CORTINA=y
-CONFIG_GEMINI_ETHERNET=m
CONFIG_CX_ECAT=m
CONFIG_DNET=m
CONFIG_NET_VENDOR_DEC=y
@@ -3225,7 +3090,6 @@ CONFIG_BE2NET_BE3=y
CONFIG_BE2NET_LANCER=y
CONFIG_BE2NET_SKYHAWK=y
CONFIG_NET_VENDOR_EZCHIP=y
-CONFIG_EZCHIP_NPS_MANAGEMENT_ENET=m
CONFIG_NET_VENDOR_FUJITSU=y
CONFIG_PCMCIA_FMVJ18X=m
CONFIG_NET_VENDOR_GOOGLE=y
@@ -3265,6 +3129,8 @@ CONFIG_SKGE=m
CONFIG_SKGE_GENESIS=y
CONFIG_SKY2=m
# CONFIG_SKY2_DEBUG is not set
+CONFIG_PRESTERA=m
+CONFIG_PRESTERA_PCI=m
CONFIG_NET_VENDOR_MELLANOX=y
CONFIG_MLX4_EN=m
CONFIG_MLX4_EN_DCB=y
@@ -3313,7 +3179,6 @@ CONFIG_ENCX24J600=m
CONFIG_LAN743X=m
CONFIG_NET_VENDOR_MICROSEMI=y
CONFIG_MSCC_OCELOT_SWITCH_LIB=m
-CONFIG_MSCC_OCELOT_SWITCH=m
CONFIG_NET_VENDOR_MYRI=y
CONFIG_MYRI10GE=m
CONFIG_MYRI10GE_DCA=y
@@ -3361,9 +3226,6 @@ CONFIG_QED_ISCSI=y
CONFIG_QED_FCOE=y
CONFIG_QED_OOO=y
CONFIG_NET_VENDOR_QUALCOMM=y
-CONFIG_QCA7000=m
-CONFIG_QCA7000_SPI=m
-CONFIG_QCA7000_UART=m
CONFIG_QCOM_EMAC=m
CONFIG_RMNET=m
CONFIG_NET_VENDOR_RDC=y
@@ -3406,7 +3268,6 @@ CONFIG_NET_VENDOR_STMICRO=y
CONFIG_STMMAC_ETH=m
# CONFIG_STMMAC_SELFTESTS is not set
CONFIG_STMMAC_PLATFORM=m
-CONFIG_DWMAC_DWC_QOS_ETH=m
CONFIG_DWMAC_GENERIC=m
CONFIG_DWMAC_INTEL=m
CONFIG_STMMAC_PCI=m
@@ -3445,58 +3306,33 @@ CONFIG_DEFXX_MMIO=y
CONFIG_SKFP=m
# CONFIG_HIPPI is not set
CONFIG_NET_SB1000=m
-CONFIG_MDIO_DEVICE=m
-CONFIG_MDIO_BUS=m
-CONFIG_MDIO_DEVRES=m
-CONFIG_MDIO_BCM_UNIMAC=m
-CONFIG_MDIO_BITBANG=m
-CONFIG_MDIO_BUS_MUX=m
-CONFIG_MDIO_BUS_MUX_GPIO=m
-CONFIG_MDIO_BUS_MUX_MMIOREG=m
-CONFIG_MDIO_BUS_MUX_MULTIPLEXER=m
-CONFIG_MDIO_CAVIUM=m
-CONFIG_MDIO_GPIO=m
-CONFIG_MDIO_HISI_FEMAC=m
-CONFIG_MDIO_I2C=m
-CONFIG_MDIO_IPQ4019=m
-CONFIG_MDIO_IPQ8064=m
-CONFIG_MDIO_MSCC_MIIM=m
-CONFIG_MDIO_MVUSB=m
-CONFIG_MDIO_OCTEON=m
-CONFIG_MDIO_THUNDER=m
-CONFIG_MDIO_XPCS=m
CONFIG_PHYLINK=m
CONFIG_PHYLIB=m
CONFIG_SWPHY=y
CONFIG_LED_TRIGGER_PHY=y
+CONFIG_FIXED_PHY=m
+CONFIG_SFP=m
#
# MII PHY device drivers
#
-CONFIG_SFP=m
-CONFIG_ADIN_PHY=m
CONFIG_AMD_PHY=m
+CONFIG_ADIN_PHY=m
CONFIG_AQUANTIA_PHY=m
CONFIG_AX88796B_PHY=m
-CONFIG_BCM7XXX_PHY=m
-CONFIG_BCM87XX_PHY=m
-CONFIG_BCM_NET_PHYLIB=m
CONFIG_BROADCOM_PHY=m
CONFIG_BCM54140_PHY=m
+CONFIG_BCM7XXX_PHY=m
CONFIG_BCM84881_PHY=m
+CONFIG_BCM87XX_PHY=m
+CONFIG_BCM_NET_PHYLIB=m
CONFIG_CICADA_PHY=m
CONFIG_CORTINA_PHY=m
CONFIG_DAVICOM_PHY=m
-CONFIG_DP83822_PHY=m
-CONFIG_DP83TC811_PHY=m
-CONFIG_DP83848_PHY=m
-CONFIG_DP83867_PHY=m
-CONFIG_DP83869_PHY=m
-CONFIG_FIXED_PHY=m
CONFIG_ICPLUS_PHY=m
+CONFIG_LXT_PHY=m
CONFIG_INTEL_XWAY_PHY=m
CONFIG_LSI_ET1011C_PHY=m
-CONFIG_LXT_PHY=m
CONFIG_MARVELL_PHY=m
CONFIG_MARVELL_10G_PHY=m
CONFIG_MICREL_PHY=m
@@ -3513,9 +3349,37 @@ CONFIG_ROCKCHIP_PHY=m
CONFIG_SMSC_PHY=m
CONFIG_STE10XP=m
CONFIG_TERANETICS_PHY=m
+CONFIG_DP83822_PHY=m
+CONFIG_DP83TC811_PHY=m
+CONFIG_DP83848_PHY=m
+CONFIG_DP83867_PHY=m
+CONFIG_DP83869_PHY=m
CONFIG_VITESSE_PHY=m
CONFIG_XILINX_GMII2RGMII=m
CONFIG_MICREL_KS8995MA=m
+CONFIG_MDIO_DEVICE=m
+CONFIG_MDIO_BUS=m
+CONFIG_MDIO_DEVRES=m
+CONFIG_MDIO_BITBANG=m
+CONFIG_MDIO_BCM_UNIMAC=m
+CONFIG_MDIO_CAVIUM=m
+CONFIG_MDIO_GPIO=m
+CONFIG_MDIO_I2C=m
+CONFIG_MDIO_MVUSB=m
+CONFIG_MDIO_MSCC_MIIM=m
+CONFIG_MDIO_THUNDER=m
+
+#
+# MDIO Multiplexers
+#
+
+#
+# PCS device drivers
+#
+CONFIG_PCS_XPCS=m
+CONFIG_PCS_LYNX=m
+# end of PCS device drivers
+
CONFIG_PLIP=m
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -3580,7 +3444,6 @@ CONFIG_USB_VL600=m
CONFIG_USB_NET_CH9200=m
CONFIG_USB_NET_AQC111=m
CONFIG_WLAN=y
-# CONFIG_WIRELESS_WDS is not set
CONFIG_WLAN_VENDOR_ADMTEK=y
CONFIG_ADM8211=m
CONFIG_ATH_COMMON=m
@@ -3627,7 +3490,6 @@ CONFIG_WIL6210_DEBUGFS=y
CONFIG_ATH10K=m
CONFIG_ATH10K_CE=y
CONFIG_ATH10K_PCI=m
-CONFIG_ATH10K_AHB=y
CONFIG_ATH10K_SDIO=m
CONFIG_ATH10K_USB=m
CONFIG_ATH10K_DEBUG=y
@@ -3636,6 +3498,13 @@ CONFIG_ATH10K_SPECTRAL=y
CONFIG_ATH10K_TRACING=y
CONFIG_WCN36XX=m
CONFIG_WCN36XX_DEBUGFS=y
+CONFIG_ATH11K=m
+CONFIG_ATH11K_AHB=m
+CONFIG_ATH11K_PCI=m
+CONFIG_ATH11K_DEBUG=y
+CONFIG_ATH11K_DEBUGFS=y
+# CONFIG_ATH11K_TRACING is not set
+CONFIG_ATH11K_SPECTRAL=y
CONFIG_WLAN_VENDOR_ATMEL=y
CONFIG_ATMEL=m
CONFIG_PCI_ATMEL=m
@@ -3871,7 +3740,6 @@ CONFIG_WL1251_SDIO=m
CONFIG_WL12XX=m
CONFIG_WL18XX=m
CONFIG_WLCORE=m
-CONFIG_WLCORE_SPI=m
CONFIG_WLCORE_SDIO=m
CONFIG_WILINK_PLATFORM_DATA=y
CONFIG_WLAN_VENDOR_ZYDAS=y
@@ -3946,7 +3814,7 @@ CONFIG_NVM_PBLK=m
# Input device support
#
CONFIG_INPUT=y
-CONFIG_INPUT_LEDS=m
+CONFIG_INPUT_LEDS=y
CONFIG_INPUT_FF_MEMLESS=m
CONFIG_INPUT_POLLDEV=m
CONFIG_INPUT_SPARSEKMAP=m
@@ -3960,7 +3828,7 @@ CONFIG_INPUT_MOUSEDEV_PSAUX=y
CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
CONFIG_INPUT_JOYDEV=m
-CONFIG_INPUT_EVDEV=m
+CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_EVBUG is not set
#
@@ -3972,7 +3840,7 @@ CONFIG_KEYBOARD_ADP5520=m
CONFIG_KEYBOARD_ADP5588=m
CONFIG_KEYBOARD_ADP5589=m
CONFIG_KEYBOARD_APPLESPI=m
-CONFIG_KEYBOARD_ATKBD=m
+CONFIG_KEYBOARD_ATKBD=y
CONFIG_KEYBOARD_QT1050=m
CONFIG_KEYBOARD_QT1070=m
CONFIG_KEYBOARD_QT2160=m
@@ -3993,19 +3861,14 @@ CONFIG_KEYBOARD_OPENCORES=m
CONFIG_KEYBOARD_SAMSUNG=m
CONFIG_KEYBOARD_STOWAWAY=m
CONFIG_KEYBOARD_SUNKBD=m
-CONFIG_KEYBOARD_STMPE=m
CONFIG_KEYBOARD_IQS62X=m
-CONFIG_KEYBOARD_OMAP4=m
-CONFIG_KEYBOARD_TC3589X=m
CONFIG_KEYBOARD_TM2_TOUCHKEY=m
CONFIG_KEYBOARD_TWL4030=m
CONFIG_KEYBOARD_XTKBD=m
CONFIG_KEYBOARD_CROS_EC=m
-CONFIG_KEYBOARD_CAP11XX=m
-CONFIG_KEYBOARD_BCM=m
CONFIG_KEYBOARD_MTK_PMIC=m
CONFIG_INPUT_MOUSE=y
-CONFIG_MOUSE_PS2=m
+CONFIG_MOUSE_PS2=y
CONFIG_MOUSE_PS2_ALPS=y
CONFIG_MOUSE_PS2_BYD=y
CONFIG_MOUSE_PS2_LOGIPS2PP=y
@@ -4035,6 +3898,7 @@ CONFIG_MOUSE_SYNAPTICS_USB=m
CONFIG_INPUT_JOYSTICK=y
CONFIG_JOYSTICK_ANALOG=m
CONFIG_JOYSTICK_A3D=m
+CONFIG_JOYSTICK_ADC=m
CONFIG_JOYSTICK_ADI=m
CONFIG_JOYSTICK_COBRA=m
CONFIG_JOYSTICK_GF2K=m
@@ -4084,13 +3948,11 @@ CONFIG_TOUCHSCREEN_AD7879=m
CONFIG_TOUCHSCREEN_AD7879_I2C=m
CONFIG_TOUCHSCREEN_AD7879_SPI=m
CONFIG_TOUCHSCREEN_ADC=m
-CONFIG_TOUCHSCREEN_AR1021_I2C=m
CONFIG_TOUCHSCREEN_ATMEL_MXT=m
CONFIG_TOUCHSCREEN_ATMEL_MXT_T37=y
CONFIG_TOUCHSCREEN_AUO_PIXCIR=m
CONFIG_TOUCHSCREEN_BU21013=m
CONFIG_TOUCHSCREEN_BU21029=m
-CONFIG_TOUCHSCREEN_CHIPONE_ICN8318=m
CONFIG_TOUCHSCREEN_CHIPONE_ICN8505=m
CONFIG_TOUCHSCREEN_CY8CTMA140=m
CONFIG_TOUCHSCREEN_CY8CTMG110=m
@@ -4105,7 +3967,6 @@ CONFIG_TOUCHSCREEN_DA9052=m
CONFIG_TOUCHSCREEN_DYNAPRO=m
CONFIG_TOUCHSCREEN_HAMPSHIRE=m
CONFIG_TOUCHSCREEN_EETI=m
-CONFIG_TOUCHSCREEN_EGALAX=m
CONFIG_TOUCHSCREEN_EGALAX_SERIAL=m
CONFIG_TOUCHSCREEN_EXC3000=m
CONFIG_TOUCHSCREEN_FUJITSU=m
@@ -4124,7 +3985,6 @@ CONFIG_TOUCHSCREEN_MCS5000=m
CONFIG_TOUCHSCREEN_MMS114=m
CONFIG_TOUCHSCREEN_MELFAS_MIP4=m
CONFIG_TOUCHSCREEN_MTOUCH=m
-CONFIG_TOUCHSCREEN_IMX6UL_TSC=m
CONFIG_TOUCHSCREEN_INEXIO=m
CONFIG_TOUCHSCREEN_MK712=m
CONFIG_TOUCHSCREEN_PENMOUNT=m
@@ -4173,16 +4033,15 @@ CONFIG_TOUCHSCREEN_SILEAD=m
CONFIG_TOUCHSCREEN_SIS_I2C=m
CONFIG_TOUCHSCREEN_ST1232=m
CONFIG_TOUCHSCREEN_STMFTS=m
-CONFIG_TOUCHSCREEN_STMPE=m
CONFIG_TOUCHSCREEN_SUR40=m
CONFIG_TOUCHSCREEN_SURFACE3_SPI=m
CONFIG_TOUCHSCREEN_SX8654=m
CONFIG_TOUCHSCREEN_TPS6507X=m
CONFIG_TOUCHSCREEN_ZET6223=m
CONFIG_TOUCHSCREEN_ZFORCE=m
-CONFIG_TOUCHSCREEN_COLIBRI_VF50=m
CONFIG_TOUCHSCREEN_ROHM_BU21023=m
CONFIG_TOUCHSCREEN_IQS5XX=m
+CONFIG_TOUCHSCREEN_ZINITIX=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_88PM860X_ONKEY=m
CONFIG_INPUT_88PM80X_ONKEY=m
@@ -4190,11 +4049,9 @@ CONFIG_INPUT_AD714X=m
CONFIG_INPUT_AD714X_I2C=m
CONFIG_INPUT_AD714X_SPI=m
CONFIG_INPUT_ARIZONA_HAPTICS=m
-CONFIG_INPUT_ATMEL_CAPTOUCH=m
CONFIG_INPUT_BMA150=m
CONFIG_INPUT_E3X0_BUTTON=m
CONFIG_INPUT_PCSPKR=m
-CONFIG_INPUT_MAX77650_ONKEY=m
CONFIG_INPUT_MAX77693_HAPTIC=m
CONFIG_INPUT_MAX8925_ONKEY=m
CONFIG_INPUT_MAX8997_HAPTIC=m
@@ -4204,7 +4061,6 @@ CONFIG_INPUT_APANEL=m
CONFIG_INPUT_GPIO_BEEPER=m
CONFIG_INPUT_GPIO_DECODER=m
CONFIG_INPUT_GPIO_VIBRA=m
-CONFIG_INPUT_CPCAP_PWRBUTTON=m
CONFIG_INPUT_ATLAS_BTNS=m
CONFIG_INPUT_ATI_REMOTE2=m
CONFIG_INPUT_KEYSPAN_REMOTE=m
@@ -4214,7 +4070,6 @@ CONFIG_INPUT_YEALINK=m
CONFIG_INPUT_CM109=m
CONFIG_INPUT_REGULATOR_HAPTIC=m
CONFIG_INPUT_RETU_PWRBUTTON=m
-CONFIG_INPUT_TPS65218_PWRBUTTON=m
CONFIG_INPUT_AXP20X_PEK=m
CONFIG_INPUT_TWL4030_PWRBUTTON=m
CONFIG_INPUT_TWL4030_VIBRA=m
@@ -4225,7 +4080,6 @@ CONFIG_INPUT_PCF50633_PMU=m
CONFIG_INPUT_PCF8574=m
CONFIG_INPUT_PWM_BEEPER=m
CONFIG_INPUT_PWM_VIBRA=m
-CONFIG_INPUT_RK805_PWRKEY=m
CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
CONFIG_INPUT_DA9052_ONKEY=m
CONFIG_INPUT_DA9055_ONKEY=m
@@ -4246,7 +4100,6 @@ CONFIG_INPUT_DRV260X_HAPTICS=m
CONFIG_INPUT_DRV2665_HAPTICS=m
CONFIG_INPUT_DRV2667_HAPTICS=m
CONFIG_INPUT_RAVE_SP_PWRBUTTON=m
-CONFIG_INPUT_STPMIC1_ONKEY=m
CONFIG_RMI4_CORE=m
CONFIG_RMI4_I2C=m
CONFIG_RMI4_SPI=m
@@ -4258,25 +4111,25 @@ CONFIG_RMI4_F11=y
CONFIG_RMI4_F12=y
CONFIG_RMI4_F30=y
CONFIG_RMI4_F34=y
+CONFIG_RMI4_F3A=y
# CONFIG_RMI4_F54 is not set
CONFIG_RMI4_F55=y
#
# Hardware I/O ports
#
-CONFIG_SERIO=m
+CONFIG_SERIO=y
CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y
-CONFIG_SERIO_I8042=m
+CONFIG_SERIO_I8042=y
CONFIG_SERIO_SERPORT=m
CONFIG_SERIO_CT82C710=m
CONFIG_SERIO_PARKBD=m
CONFIG_SERIO_PCIPS2=m
-CONFIG_SERIO_LIBPS2=m
+CONFIG_SERIO_LIBPS2=y
CONFIG_SERIO_RAW=m
CONFIG_SERIO_ALTERA_PS2=m
CONFIG_SERIO_PS2MULT=m
CONFIG_SERIO_ARC_PS2=m
-# CONFIG_SERIO_APBPS2 is not set
CONFIG_HYPERV_KEYBOARD=m
CONFIG_SERIO_GPIO_PS2=m
CONFIG_USERIO=m
@@ -4318,10 +4171,9 @@ CONFIG_SERIAL_8250_EXAR=m
CONFIG_SERIAL_8250_CS=m
CONFIG_SERIAL_8250_MEN_MCB=m
CONFIG_SERIAL_8250_NR_UARTS=32
-CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=32
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_ASPEED_VUART=m
CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_SERIAL_8250_DETECT_IRQ is not set
CONFIG_SERIAL_8250_RSA=y
@@ -4330,7 +4182,6 @@ CONFIG_SERIAL_8250_DW=m
CONFIG_SERIAL_8250_RT288X=y
CONFIG_SERIAL_8250_LPSS=y
CONFIG_SERIAL_8250_MID=y
-CONFIG_SERIAL_OF_PLATFORM=m
#
# Non-8250 serial port support
@@ -4342,7 +4193,6 @@ CONFIG_SERIAL_UARTLITE_NR_UARTS=1
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_SERIAL_JSM=m
-CONFIG_SERIAL_SIFIVE=m
CONFIG_SERIAL_LANTIQ=m
CONFIG_SERIAL_SCCNXP=m
CONFIG_SERIAL_SC16IS7XX_CORE=m
@@ -4354,14 +4204,12 @@ CONFIG_SERIAL_ALTERA_UART=m
CONFIG_SERIAL_ALTERA_UART_MAXPORTS=4
CONFIG_SERIAL_ALTERA_UART_BAUDRATE=115200
CONFIG_SERIAL_IFX6X60=m
-CONFIG_SERIAL_XILINX_PS_UART=m
CONFIG_SERIAL_ARC=m
CONFIG_SERIAL_ARC_NR_PORTS=1
CONFIG_SERIAL_RP2=m
CONFIG_SERIAL_RP2_NR_UARTS=32
CONFIG_SERIAL_FSL_LPUART=m
CONFIG_SERIAL_FSL_LINFLEXUART=m
-CONFIG_SERIAL_CONEXANT_DIGICOLOR=m
CONFIG_SERIAL_MEN_Z135=m
CONFIG_SERIAL_SPRD=m
# end of Serial drivers
@@ -4389,9 +4237,8 @@ CONFIG_HVC_XEN=y
CONFIG_HVC_XEN_FRONTEND=y
CONFIG_SERIAL_DEV_BUS=y
CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
-# CONFIG_TTY_PRINTK is not set
CONFIG_PRINTER=m
-# CONFIG_LP_CONSOLE is not set
+CONFIG_LP_CONSOLE=y
CONFIG_PPDEV=m
CONFIG_VIRTIO_CONSOLE=m
CONFIG_IPMI_HANDLER=m
@@ -4411,7 +4258,7 @@ CONFIG_HW_RANDOM_AMD=m
CONFIG_HW_RANDOM_BA431=m
CONFIG_HW_RANDOM_VIA=m
CONFIG_HW_RANDOM_VIRTIO=m
-CONFIG_HW_RANDOM_CCTRNG=m
+CONFIG_HW_RANDOM_XIPHERA=m
CONFIG_APPLICOM=m
#
@@ -4427,13 +4274,12 @@ CONFIG_IPWIRELESS=m
CONFIG_MWAVE=m
CONFIG_DEVMEM=y
# CONFIG_DEVKMEM is not set
-CONFIG_NVRAM=m
+CONFIG_NVRAM=y
CONFIG_RAW_DRIVER=m
-CONFIG_MAX_RAW_DEVS=256
+CONFIG_MAX_RAW_DEVS=8192
CONFIG_DEVPORT=y
CONFIG_HPET=y
-CONFIG_HPET_MMAP=y
-CONFIG_HPET_MMAP_DEFAULT=y
+# CONFIG_HPET_MMAP is not set
CONFIG_HANGCHECK_TIMER=m
CONFIG_TCG_TPM=m
CONFIG_HW_RANDOM_TPM=y
@@ -4456,7 +4302,6 @@ CONFIG_TCG_TIS_ST33ZP24_SPI=m
CONFIG_TELCLOCK=m
CONFIG_XILLYBUS=m
CONFIG_XILLYBUS_PCIE=m
-CONFIG_XILLYBUS_OF=m
# end of Character devices
# CONFIG_RANDOM_TRUST_CPU is not set
@@ -4475,15 +4320,11 @@ CONFIG_I2C_MUX=m
#
# Multiplexer I2C Chip support
#
-CONFIG_I2C_ARB_GPIO_CHALLENGE=m
CONFIG_I2C_MUX_GPIO=m
-CONFIG_I2C_MUX_GPMUX=m
CONFIG_I2C_MUX_LTC4306=m
CONFIG_I2C_MUX_PCA9541=m
CONFIG_I2C_MUX_PCA954x=m
-CONFIG_I2C_MUX_PINCTRL=m
CONFIG_I2C_MUX_REG=m
-CONFIG_I2C_DEMUX_PINCTRL=m
CONFIG_I2C_MUX_MLXCPLD=m
# end of Multiplexer I2C Chip support
@@ -4534,14 +4375,13 @@ CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_SLAVE=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_I2C_DESIGNWARE_BAYTRAIL=y
-CONFIG_I2C_DESIGNWARE_PCI=m
+CONFIG_I2C_DESIGNWARE_PCI=y
CONFIG_I2C_EMEV2=m
CONFIG_I2C_GPIO=m
# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set
CONFIG_I2C_KEMPLD=m
CONFIG_I2C_OCORES=m
CONFIG_I2C_PCA_PLATFORM=m
-CONFIG_I2C_RK3X=m
CONFIG_I2C_SIMTEC=m
CONFIG_I2C_XILINX=m
@@ -4561,20 +4401,18 @@ CONFIG_I2C_VIPERBOARD=m
#
CONFIG_I2C_MLXCPLD=m
CONFIG_I2C_CROS_EC_TUNNEL=m
-CONFIG_I2C_FSI=m
# end of I2C Hardware Bus support
CONFIG_I2C_STUB=m
CONFIG_I2C_SLAVE=y
CONFIG_I2C_SLAVE_EEPROM=m
+CONFIG_I2C_SLAVE_TESTUNIT=m
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
# end of I2C support
-CONFIG_I3C=m
-CONFIG_CDNS_I3C_MASTER=m
-CONFIG_DW_I3C_MASTER=m
+# CONFIG_I3C is not set
CONFIG_SPI=y
# CONFIG_SPI_DEBUG is not set
CONFIG_SPI_MASTER=y
@@ -4593,12 +4431,9 @@ CONFIG_SPI_DW_DMA=y
CONFIG_SPI_DW_PCI=m
CONFIG_SPI_DW_MMIO=m
CONFIG_SPI_DLN2=m
-CONFIG_SPI_FSI=m
CONFIG_SPI_NXP_FLEXSPI=m
CONFIG_SPI_GPIO=m
CONFIG_SPI_LM70_LLP=m
-CONFIG_SPI_FSL_LIB=m
-CONFIG_SPI_FSL_SPI=m
CONFIG_SPI_LANTIQ_SSC=m
CONFIG_SPI_OC_TINY=m
CONFIG_SPI_PXA2XX=m
@@ -4627,18 +4462,8 @@ CONFIG_SPI_SLAVE=y
CONFIG_SPI_SLAVE_TIME=m
CONFIG_SPI_SLAVE_SYSTEM_CONTROL=m
CONFIG_SPI_DYNAMIC=y
-CONFIG_SPMI=m
-CONFIG_HSI=m
-CONFIG_HSI_BOARDINFO=y
-
-#
-# HSI controllers
-#
-
-#
-# HSI clients
-#
-CONFIG_HSI_CHAR=m
+# CONFIG_SPMI is not set
+# CONFIG_HSI is not set
CONFIG_PPS=y
# CONFIG_PPS_DEBUG is not set
@@ -4667,26 +4492,16 @@ CONFIG_PTP_1588_CLOCK_VMW=m
# end of PTP clock support
CONFIG_PINCTRL=y
-CONFIG_GENERIC_PINCTRL_GROUPS=y
CONFIG_PINMUX=y
-CONFIG_GENERIC_PINMUX_FUNCTIONS=y
CONFIG_PINCONF=y
CONFIG_GENERIC_PINCONF=y
# CONFIG_DEBUG_PINCTRL is not set
-CONFIG_PINCTRL_AS3722=m
-CONFIG_PINCTRL_AXP209=m
CONFIG_PINCTRL_AMD=m
CONFIG_PINCTRL_DA9062=m
CONFIG_PINCTRL_MCP23S08_I2C=m
CONFIG_PINCTRL_MCP23S08_SPI=m
CONFIG_PINCTRL_MCP23S08=m
-CONFIG_PINCTRL_SINGLE=m
CONFIG_PINCTRL_SX150X=y
-CONFIG_PINCTRL_STMFX=m
-CONFIG_PINCTRL_MAX77620=m
-CONFIG_PINCTRL_PALMAS=m
-CONFIG_PINCTRL_RK805=m
-CONFIG_PINCTRL_OCELOT=y
CONFIG_PINCTRL_BAYTRAIL=y
CONFIG_PINCTRL_CHERRYVIEW=y
CONFIG_PINCTRL_LYNXPOINT=y
@@ -4702,47 +4517,42 @@ CONFIG_PINCTRL_JASPERLAKE=y
CONFIG_PINCTRL_LEWISBURG=y
CONFIG_PINCTRL_SUNRISEPOINT=y
CONFIG_PINCTRL_TIGERLAKE=y
-CONFIG_PINCTRL_LOCHNAGAR=m
+
+#
+# Renesas pinctrl drivers
+#
+# end of Renesas pinctrl drivers
+
CONFIG_PINCTRL_MADERA=m
CONFIG_PINCTRL_CS47L15=y
CONFIG_PINCTRL_CS47L35=y
CONFIG_PINCTRL_CS47L85=y
CONFIG_PINCTRL_CS47L90=y
CONFIG_PINCTRL_CS47L92=y
-CONFIG_PINCTRL_EQUILIBRIUM=m
CONFIG_GPIOLIB=y
CONFIG_GPIOLIB_FASTPATH_LIMIT=512
-CONFIG_OF_GPIO=y
CONFIG_GPIO_ACPI=y
CONFIG_GPIOLIB_IRQCHIP=y
# CONFIG_DEBUG_GPIO is not set
CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_GENERIC=y
+CONFIG_GPIO_CDEV=y
+CONFIG_GPIO_CDEV_V1=y
+CONFIG_GPIO_GENERIC=m
+CONFIG_GPIO_REGMAP=m
CONFIG_GPIO_MAX730X=m
#
# Memory mapped GPIO drivers
#
-CONFIG_GPIO_74XX_MMIO=m
-CONFIG_GPIO_ALTERA=m
CONFIG_GPIO_AMDPT=m
-CONFIG_GPIO_CADENCE=m
CONFIG_GPIO_DWAPB=m
CONFIG_GPIO_EXAR=m
-CONFIG_GPIO_FTGPIO010=y
CONFIG_GPIO_GENERIC_PLATFORM=m
-CONFIG_GPIO_GRGPIO=m
-CONFIG_GPIO_HLWD=m
CONFIG_GPIO_ICH=m
-CONFIG_GPIO_LOGICVC=m
CONFIG_GPIO_MB86S7X=m
CONFIG_GPIO_MENZ127=m
-CONFIG_GPIO_SAMA5D2_PIOBU=m
-CONFIG_GPIO_SIFIVE=y
CONFIG_GPIO_SIOX=m
-CONFIG_GPIO_SYSCON=m
CONFIG_GPIO_VX855=m
-CONFIG_GPIO_WCD934X=m
CONFIG_GPIO_XILINX=m
CONFIG_GPIO_AMD_FCH=m
# end of Memory mapped GPIO drivers
@@ -4762,8 +4572,6 @@ CONFIG_GPIO_WS16C48=m
# I2C GPIO expanders
#
CONFIG_GPIO_ADP5588=m
-CONFIG_GPIO_ADNP=m
-CONFIG_GPIO_GW_PLD=m
CONFIG_GPIO_MAX7300=m
CONFIG_GPIO_MAX732X=m
CONFIG_GPIO_PCA953X=m
@@ -4778,8 +4586,6 @@ CONFIG_GPIO_TPIC2810=m
#
CONFIG_GPIO_ADP5520=m
CONFIG_GPIO_ARIZONA=m
-CONFIG_GPIO_BD70528=m
-CONFIG_GPIO_BD71828=m
CONFIG_GPIO_BD9571MWV=m
CONFIG_GPIO_CRYSTAL_COVE=m
CONFIG_GPIO_DA9052=m
@@ -4789,17 +4595,12 @@ CONFIG_GPIO_JANZ_TTL=m
CONFIG_GPIO_KEMPLD=m
CONFIG_GPIO_LP3943=m
CONFIG_GPIO_LP873X=m
-CONFIG_GPIO_LP87565=m
CONFIG_GPIO_MADERA=m
-CONFIG_GPIO_MAX77620=m
-CONFIG_GPIO_MAX77650=m
CONFIG_GPIO_MSIC=y
CONFIG_GPIO_PALMAS=y
CONFIG_GPIO_RC5T583=y
-CONFIG_GPIO_STMPE=y
-CONFIG_GPIO_TC3589X=y
+CONFIG_GPIO_SL28CPLD=m
CONFIG_GPIO_TPS65086=m
-CONFIG_GPIO_TPS65218=m
CONFIG_GPIO_TPS6586X=y
CONFIG_GPIO_TPS65910=y
CONFIG_GPIO_TPS65912=m
@@ -4822,19 +4623,16 @@ CONFIG_GPIO_ML_IOH=m
CONFIG_GPIO_PCI_IDIO_16=m
CONFIG_GPIO_PCIE_IDIO_24=m
CONFIG_GPIO_RDC321X=m
-CONFIG_GPIO_SODAVILLE=y
# end of PCI GPIO expanders
#
# SPI GPIO expanders
#
-CONFIG_GPIO_74X164=m
CONFIG_GPIO_MAX3191X=m
CONFIG_GPIO_MAX7301=m
CONFIG_GPIO_MC33880=m
CONFIG_GPIO_PISOSR=m
CONFIG_GPIO_XRA1403=m
-CONFIG_GPIO_MOXTET=m
# end of SPI GPIO expanders
#
@@ -4883,20 +4681,9 @@ CONFIG_W1_SLAVE_DS28E04=m
CONFIG_W1_SLAVE_DS28E17=m
# end of 1-wire Slaves
-CONFIG_POWER_AVS=y
-CONFIG_QCOM_CPR=m
CONFIG_POWER_RESET=y
-CONFIG_POWER_RESET_AS3722=y
-CONFIG_POWER_RESET_GPIO=y
-CONFIG_POWER_RESET_GPIO_RESTART=y
-CONFIG_POWER_RESET_LTC2952=y
CONFIG_POWER_RESET_MT6323=y
CONFIG_POWER_RESET_RESTART=y
-CONFIG_POWER_RESET_SYSCON=y
-CONFIG_POWER_RESET_SYSCON_POWEROFF=y
-CONFIG_REBOOT_MODE=m
-CONFIG_SYSCON_REBOOT_MODE=m
-CONFIG_NVMEM_REBOOT_MODE=m
CONFIG_POWER_SUPPLY=y
# CONFIG_POWER_SUPPLY_DEBUG is not set
CONFIG_POWER_SUPPLY_HWMON=y
@@ -4909,14 +4696,11 @@ CONFIG_WM8350_POWER=m
CONFIG_TEST_POWER=m
CONFIG_BATTERY_88PM860X=m
CONFIG_CHARGER_ADP5061=m
-CONFIG_BATTERY_ACT8945A=m
-CONFIG_BATTERY_CPCAP=m
CONFIG_BATTERY_CW2015=m
CONFIG_BATTERY_DS2760=m
CONFIG_BATTERY_DS2780=m
CONFIG_BATTERY_DS2781=m
CONFIG_BATTERY_DS2782=m
-CONFIG_BATTERY_LEGO_EV3=m
CONFIG_BATTERY_SBS=m
CONFIG_CHARGER_SBS=m
CONFIG_MANAGER_SBS=m
@@ -4949,8 +4733,6 @@ CONFIG_CHARGER_GPIO=m
CONFIG_CHARGER_MANAGER=y
CONFIG_CHARGER_LT3651=m
CONFIG_CHARGER_MAX14577=m
-CONFIG_CHARGER_DETECTOR_MAX14656=m
-CONFIG_CHARGER_MAX77650=m
CONFIG_CHARGER_MAX77693=m
CONFIG_CHARGER_MAX8997=m
CONFIG_CHARGER_MAX8998=m
@@ -4961,15 +4743,13 @@ CONFIG_CHARGER_BQ24257=m
CONFIG_CHARGER_BQ24735=m
CONFIG_CHARGER_BQ2515X=m
CONFIG_CHARGER_BQ25890=m
+CONFIG_CHARGER_BQ25980=m
CONFIG_CHARGER_SMB347=m
CONFIG_CHARGER_TPS65090=m
-CONFIG_CHARGER_TPS65217=m
CONFIG_BATTERY_GAUGE_LTC2941=m
CONFIG_BATTERY_RT5033=m
CONFIG_CHARGER_RT9455=m
CONFIG_CHARGER_CROS_USBPD=m
-CONFIG_CHARGER_UCS1002=m
-CONFIG_CHARGER_BD70528=m
CONFIG_CHARGER_BD99954=m
CONFIG_CHARGER_WILCO=m
CONFIG_HWMON=y
@@ -5020,7 +4800,6 @@ CONFIG_SENSORS_I5K_AMB=m
CONFIG_SENSORS_F71805F=m
CONFIG_SENSORS_F71882FG=m
CONFIG_SENSORS_F75375S=m
-CONFIG_SENSORS_GSC=m
CONFIG_SENSORS_MC13783_ADC=m
CONFIG_SENSORS_FSCHMD=m
CONFIG_SENSORS_FTSTEUTATES=m
@@ -5028,7 +4807,6 @@ CONFIG_SENSORS_GL518SM=m
CONFIG_SENSORS_GL520SM=m
CONFIG_SENSORS_G760A=m
CONFIG_SENSORS_G762=m
-CONFIG_SENSORS_GPIO_FAN=m
CONFIG_SENSORS_HIH6130=m
CONFIG_SENSORS_IBMAEM=m
CONFIG_SENSORS_IBMPEX=m
@@ -5039,7 +4817,6 @@ CONFIG_SENSORS_IT87=m
CONFIG_SENSORS_JC42=m
CONFIG_SENSORS_POWR1220=m
CONFIG_SENSORS_LINEAGE=m
-CONFIG_SENSORS_LOCHNAGAR=m
CONFIG_SENSORS_LTC2945=m
CONFIG_SENSORS_LTC2947=m
CONFIG_SENSORS_LTC2947_I2C=m
@@ -5068,6 +4845,7 @@ CONFIG_SENSORS_MCP3021=m
CONFIG_SENSORS_MLXREG_FAN=m
CONFIG_SENSORS_TC654=m
CONFIG_SENSORS_MENF21BMC_HWMON=m
+CONFIG_SENSORS_MR75203=m
CONFIG_SENSORS_ADCXX=m
CONFIG_SENSORS_LM63=m
CONFIG_SENSORS_LM70=m
@@ -5096,6 +4874,7 @@ CONFIG_SENSORS_NPCM7XX=m
CONFIG_SENSORS_PCF8591=m
CONFIG_PMBUS=m
CONFIG_SENSORS_PMBUS=m
+CONFIG_SENSORS_ADM1266=m
CONFIG_SENSORS_ADM1275=m
CONFIG_SENSORS_BEL_PFE=m
CONFIG_SENSORS_IBM_CFFPS=m
@@ -5115,6 +4894,7 @@ CONFIG_SENSORS_MAX20751=m
CONFIG_SENSORS_MAX31785=m
CONFIG_SENSORS_MAX34440=m
CONFIG_SENSORS_MAX8688=m
+CONFIG_SENSORS_MP2975=m
CONFIG_SENSORS_PXE1610=m
CONFIG_SENSORS_TPS40422=m
CONFIG_SENSORS_TPS53679=m
@@ -5122,7 +4902,7 @@ CONFIG_SENSORS_UCD9000=m
CONFIG_SENSORS_UCD9200=m
CONFIG_SENSORS_XDPE122=m
CONFIG_SENSORS_ZL6100=m
-CONFIG_SENSORS_PWM_FAN=m
+CONFIG_SENSORS_SL28CPLD=m
CONFIG_SENSORS_SHT15=m
CONFIG_SENSORS_SHT21=m
CONFIG_SENSORS_SHT3x=m
@@ -5173,6 +4953,7 @@ CONFIG_SENSORS_W83627EHF=m
CONFIG_SENSORS_WM831X=m
CONFIG_SENSORS_WM8350=m
CONFIG_SENSORS_XGENE=m
+CONFIG_SENSORS_INTEL_M10_BMC_HWMON=m
#
# ACPI drivers
@@ -5184,7 +4965,6 @@ CONFIG_THERMAL_NETLINK=y
# CONFIG_THERMAL_STATISTICS is not set
CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=100
CONFIG_THERMAL_HWMON=y
-CONFIG_THERMAL_OF=y
CONFIG_THERMAL_WRITABLE_TRIPS=y
CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
@@ -5195,14 +4975,8 @@ CONFIG_THERMAL_GOV_STEP_WISE=y
CONFIG_THERMAL_GOV_BANG_BANG=y
CONFIG_THERMAL_GOV_USER_SPACE=y
CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
-CONFIG_CPU_THERMAL=y
-CONFIG_CPU_FREQ_THERMAL=y
-CONFIG_CPU_IDLE_THERMAL=y
CONFIG_DEVFREQ_THERMAL=y
# CONFIG_THERMAL_EMULATION is not set
-CONFIG_THERMAL_MMIO=m
-CONFIG_MAX77620_THERMAL=m
-CONFIG_DA9062_THERMAL=m
#
# Intel thermal drivers
@@ -5225,7 +4999,6 @@ CONFIG_INTEL_BXT_PMIC_THERMAL=m
CONFIG_INTEL_PCH_THERMAL=m
# end of Intel thermal drivers
-# CONFIG_TI_SOC_THERMAL is not set
CONFIG_GENERIC_ADC_THERMAL=m
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_CORE=y
@@ -5249,12 +5022,10 @@ CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC=y
#
CONFIG_SOFT_WATCHDOG=m
# CONFIG_SOFT_WATCHDOG_PRETIMEOUT is not set
-CONFIG_BD70528_WATCHDOG=m
CONFIG_DA9052_WATCHDOG=m
CONFIG_DA9055_WATCHDOG=m
CONFIG_DA9063_WATCHDOG=m
CONFIG_DA9062_WATCHDOG=m
-CONFIG_GPIO_WATCHDOG=m
CONFIG_MENF21BMC_WATCHDOG=m
CONFIG_MENZ069_WATCHDOG=m
CONFIG_WDAT_WDT=m
@@ -5264,14 +5035,12 @@ CONFIG_XILINX_WATCHDOG=m
CONFIG_ZIIRAVE_WATCHDOG=m
CONFIG_RAVE_SP_WATCHDOG=m
CONFIG_MLX_WDT=m
+CONFIG_SL28CPLD_WATCHDOG=m
CONFIG_CADENCE_WATCHDOG=m
CONFIG_DW_WATCHDOG=m
-CONFIG_RN5T618_WATCHDOG=m
CONFIG_TWL4030_WATCHDOG=m
CONFIG_MAX63XX_WATCHDOG=m
-CONFIG_MAX77620_WATCHDOG=m
CONFIG_RETU_WATCHDOG=m
-CONFIG_STPMIC1_WATCHDOG=m
CONFIG_ACQUIRE_WDT=m
CONFIG_ADVANTECH_WDT=m
CONFIG_ALIM1535_WDT=m
@@ -5352,13 +5121,9 @@ CONFIG_BCMA_DRIVER_GPIO=y
# Multifunction device drivers
#
CONFIG_MFD_CORE=y
-CONFIG_MFD_ACT8945A=m
CONFIG_MFD_AS3711=y
-CONFIG_MFD_AS3722=m
CONFIG_PMIC_ADP5520=y
CONFIG_MFD_AAT2870_CORE=y
-CONFIG_MFD_ATMEL_FLEXCOM=m
-CONFIG_MFD_ATMEL_HLCDC=m
CONFIG_MFD_BCM590XX=m
CONFIG_MFD_BD9571MWV=m
CONFIG_MFD_AXP20X=m
@@ -5381,12 +5146,10 @@ CONFIG_MFD_DA9062=m
CONFIG_MFD_DA9063=m
CONFIG_MFD_DA9150=m
CONFIG_MFD_DLN2=m
-CONFIG_MFD_GATEWORKS_GSC=m
CONFIG_MFD_MC13XXX=m
CONFIG_MFD_MC13XXX_SPI=m
CONFIG_MFD_MC13XXX_I2C=m
CONFIG_MFD_MP2629=m
-CONFIG_MFD_HI6421_PMIC=m
CONFIG_HTC_PASIC3=m
CONFIG_HTC_I2CPLD=y
CONFIG_MFD_INTEL_QUARK_I2C_GPIO=m
@@ -5409,9 +5172,6 @@ CONFIG_MFD_88PM800=m
CONFIG_MFD_88PM805=m
CONFIG_MFD_88PM860X=y
CONFIG_MFD_MAX14577=m
-CONFIG_MFD_MAX77620=y
-CONFIG_MFD_MAX77650=m
-CONFIG_MFD_MAX77686=m
CONFIG_MFD_MAX77693=m
CONFIG_MFD_MAX77843=y
CONFIG_MFD_MAX8907=m
@@ -5422,7 +5182,6 @@ CONFIG_MFD_MT6360=m
CONFIG_MFD_MT6397=m
CONFIG_MFD_MENF21BMC=m
CONFIG_EZX_PCAP=y
-CONFIG_MFD_CPCAP=m
CONFIG_MFD_VIPERBOARD=m
CONFIG_MFD_RETU=m
CONFIG_MFD_PCF50633=m
@@ -5432,25 +5191,16 @@ CONFIG_UCB1400_CORE=m
CONFIG_MFD_RDC321X=m
CONFIG_MFD_RT5033=m
CONFIG_MFD_RC5T583=y
-CONFIG_MFD_RK808=m
-CONFIG_MFD_RN5T618=m
CONFIG_MFD_SEC_CORE=y
CONFIG_MFD_SI476X_CORE=m
+CONFIG_MFD_SIMPLE_MFD_I2C=m
+CONFIG_MFD_SL28CPLD=m
CONFIG_MFD_SM501=m
CONFIG_MFD_SM501_GPIO=y
CONFIG_MFD_SKY81452=m
CONFIG_ABX500_CORE=y
CONFIG_AB3100_CORE=y
CONFIG_AB3100_OTP=y
-CONFIG_MFD_STMPE=y
-
-#
-# STMicroelectronics STMPE Interface Drivers
-#
-CONFIG_STMPE_I2C=y
-CONFIG_STMPE_SPI=y
-# end of STMicroelectronics STMPE Interface Drivers
-
CONFIG_MFD_SYSCON=y
CONFIG_MFD_TI_AM335X_TSCADC=m
CONFIG_MFD_LP3943=m
@@ -5462,11 +5212,8 @@ CONFIG_TPS65010=m
CONFIG_TPS6507X=m
CONFIG_MFD_TPS65086=m
CONFIG_MFD_TPS65090=y
-CONFIG_MFD_TPS65217=m
CONFIG_MFD_TPS68470=y
CONFIG_MFD_TI_LP873X=m
-CONFIG_MFD_TI_LP87565=m
-CONFIG_MFD_TPS65218=m
CONFIG_MFD_TPS6586X=y
CONFIG_MFD_TPS65910=y
CONFIG_MFD_TPS65912=m
@@ -5478,10 +5225,8 @@ CONFIG_MFD_TWL4030_AUDIO=y
CONFIG_TWL6040_CORE=y
CONFIG_MFD_WL1273_CORE=m
CONFIG_MFD_LM3533=m
-CONFIG_MFD_TC3589X=y
CONFIG_MFD_TQMX86=m
CONFIG_MFD_VX855=m
-CONFIG_MFD_LOCHNAGAR=y
CONFIG_MFD_ARIZONA=y
CONFIG_MFD_ARIZONA_I2C=m
CONFIG_MFD_ARIZONA_SPI=m
@@ -5497,13 +5242,9 @@ CONFIG_MFD_WM831X_SPI=y
CONFIG_MFD_WM8350=y
CONFIG_MFD_WM8350_I2C=y
CONFIG_MFD_WM8994=m
-CONFIG_MFD_ROHM_BD718XX=m
-CONFIG_MFD_ROHM_BD70528=m
-CONFIG_MFD_ROHM_BD71828=m
-CONFIG_MFD_STPMIC1=m
-CONFIG_MFD_STMFX=m
CONFIG_MFD_WCD934X=m
CONFIG_RAVE_SP_CORE=m
+CONFIG_MFD_INTEL_M10_BMC=m
# end of Multifunction device drivers
CONFIG_REGULATOR=y
@@ -5515,74 +5256,52 @@ CONFIG_REGULATOR_88PG86X=m
CONFIG_REGULATOR_88PM800=m
CONFIG_REGULATOR_88PM8607=m
CONFIG_REGULATOR_ACT8865=m
-CONFIG_REGULATOR_ACT8945A=m
CONFIG_REGULATOR_AD5398=m
CONFIG_REGULATOR_AAT2870=m
CONFIG_REGULATOR_AB3100=m
CONFIG_REGULATOR_ARIZONA_LDO1=m
CONFIG_REGULATOR_ARIZONA_MICSUPP=m
CONFIG_REGULATOR_AS3711=m
-CONFIG_REGULATOR_AS3722=m
CONFIG_REGULATOR_AXP20X=m
CONFIG_REGULATOR_BCM590XX=m
-CONFIG_REGULATOR_BD70528=m
-CONFIG_REGULATOR_BD71828=m
-CONFIG_REGULATOR_BD718XX=m
CONFIG_REGULATOR_BD9571MWV=m
-CONFIG_REGULATOR_CPCAP=m
-CONFIG_REGULATOR_CROS_EC=m
CONFIG_REGULATOR_DA903X=m
CONFIG_REGULATOR_DA9052=m
CONFIG_REGULATOR_DA9055=m
CONFIG_REGULATOR_DA9062=m
-CONFIG_REGULATOR_DA9063=m
CONFIG_REGULATOR_DA9210=m
CONFIG_REGULATOR_DA9211=m
CONFIG_REGULATOR_FAN53555=m
-CONFIG_REGULATOR_FAN53880=m
CONFIG_REGULATOR_GPIO=m
-CONFIG_REGULATOR_HI6421=m
-CONFIG_REGULATOR_HI6421V530=m
CONFIG_REGULATOR_ISL9305=m
CONFIG_REGULATOR_ISL6271A=m
CONFIG_REGULATOR_LM363X=m
-CONFIG_REGULATOR_LOCHNAGAR=m
CONFIG_REGULATOR_LP3971=m
CONFIG_REGULATOR_LP3972=m
CONFIG_REGULATOR_LP872X=m
-CONFIG_REGULATOR_LP873X=m
CONFIG_REGULATOR_LP8755=m
-CONFIG_REGULATOR_LP87565=m
CONFIG_REGULATOR_LP8788=m
CONFIG_REGULATOR_LTC3589=m
CONFIG_REGULATOR_LTC3676=m
CONFIG_REGULATOR_MAX14577=m
CONFIG_REGULATOR_MAX1586=m
-CONFIG_REGULATOR_MAX77620=m
-CONFIG_REGULATOR_MAX77650=m
CONFIG_REGULATOR_MAX8649=m
CONFIG_REGULATOR_MAX8660=m
CONFIG_REGULATOR_MAX8907=m
CONFIG_REGULATOR_MAX8925=m
CONFIG_REGULATOR_MAX8952=m
-CONFIG_REGULATOR_MAX8973=m
CONFIG_REGULATOR_MAX8997=m
CONFIG_REGULATOR_MAX8998=m
-CONFIG_REGULATOR_MAX77686=m
CONFIG_REGULATOR_MAX77693=m
-CONFIG_REGULATOR_MAX77802=m
CONFIG_REGULATOR_MAX77826=m
CONFIG_REGULATOR_MC13XXX_CORE=m
CONFIG_REGULATOR_MC13783=m
CONFIG_REGULATOR_MC13892=m
-CONFIG_REGULATOR_MCP16502=m
-CONFIG_REGULATOR_MP5416=m
CONFIG_REGULATOR_MP8859=m
-CONFIG_REGULATOR_MP886X=m
-CONFIG_REGULATOR_MPQ7920=m
CONFIG_REGULATOR_MT6311=m
CONFIG_REGULATOR_MT6323=m
CONFIG_REGULATOR_MT6358=m
+CONFIG_REGULATOR_MT6360=m
CONFIG_REGULATOR_MT6397=m
CONFIG_REGULATOR_PALMAS=m
CONFIG_REGULATOR_PCA9450=m
@@ -5593,22 +5312,16 @@ CONFIG_REGULATOR_PV88060=m
CONFIG_REGULATOR_PV88080=m
CONFIG_REGULATOR_PV88090=m
CONFIG_REGULATOR_PWM=m
-CONFIG_REGULATOR_QCOM_SPMI=m
-CONFIG_REGULATOR_QCOM_USB_VBUS=m
+CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY=m
CONFIG_REGULATOR_RC5T583=m
-CONFIG_REGULATOR_RK808=m
-CONFIG_REGULATOR_RN5T618=m
-CONFIG_REGULATOR_ROHM=m
+CONFIG_REGULATOR_RT4801=m
CONFIG_REGULATOR_RT5033=m
+CONFIG_REGULATOR_RTMV20=m
CONFIG_REGULATOR_S2MPA01=m
CONFIG_REGULATOR_S2MPS11=m
CONFIG_REGULATOR_S5M8767=m
CONFIG_REGULATOR_SKY81452=m
CONFIG_REGULATOR_SLG51000=m
-CONFIG_REGULATOR_STPMIC1=m
-CONFIG_REGULATOR_SY8106A=m
-CONFIG_REGULATOR_SY8824X=m
-CONFIG_REGULATOR_SY8827N=m
CONFIG_REGULATOR_TPS51632=m
CONFIG_REGULATOR_TPS6105X=m
CONFIG_REGULATOR_TPS62360=m
@@ -5617,23 +5330,20 @@ CONFIG_REGULATOR_TPS6507X=m
CONFIG_REGULATOR_TPS65086=m
CONFIG_REGULATOR_TPS65090=m
CONFIG_REGULATOR_TPS65132=m
-CONFIG_REGULATOR_TPS65217=m
-CONFIG_REGULATOR_TPS65218=m
CONFIG_REGULATOR_TPS6524X=m
CONFIG_REGULATOR_TPS6586X=m
CONFIG_REGULATOR_TPS65910=m
CONFIG_REGULATOR_TPS65912=m
CONFIG_REGULATOR_TPS80031=m
CONFIG_REGULATOR_TWL4030=m
-CONFIG_REGULATOR_VCTRL=m
CONFIG_REGULATOR_WM831X=m
CONFIG_REGULATOR_WM8350=m
CONFIG_REGULATOR_WM8400=m
CONFIG_REGULATOR_WM8994=m
-CONFIG_REGULATOR_QCOM_LABIBB=m
-CONFIG_RC_CORE=m
+CONFIG_RC_CORE=y
CONFIG_RC_MAP=m
CONFIG_LIRC=y
+CONFIG_BPF_LIRC_MODE2=y
CONFIG_RC_DECODERS=y
CONFIG_IR_NEC_DECODER=m
CONFIG_IR_RC5_DECODER=m
@@ -5649,7 +5359,6 @@ CONFIG_IR_RCMM_DECODER=m
CONFIG_RC_DEVICES=y
CONFIG_RC_ATI_REMOTE=m
CONFIG_IR_ENE=m
-CONFIG_IR_HIX5HD2=m
CONFIG_IR_IMON=m
CONFIG_IR_IMON_RAW=m
CONFIG_IR_MCEUSB=m
@@ -5657,16 +5366,12 @@ CONFIG_IR_ITE_CIR=m
CONFIG_IR_FINTEK=m
CONFIG_IR_NUVOTON=m
CONFIG_IR_REDRAT3=m
-CONFIG_IR_SPI=m
CONFIG_IR_STREAMZAP=m
CONFIG_IR_WINBOND_CIR=m
CONFIG_IR_IGORPLUGUSB=m
CONFIG_IR_IGUANA=m
CONFIG_IR_TTUSBIR=m
CONFIG_RC_LOOPBACK=m
-CONFIG_IR_GPIO_CIR=m
-CONFIG_IR_GPIO_TX=m
-CONFIG_IR_PWM_TX=m
CONFIG_IR_SERIAL=m
CONFIG_IR_SERIAL_TRANSMITTER=y
CONFIG_IR_SIR=m
@@ -5686,7 +5391,7 @@ CONFIG_CEC_SECO_RC=y
CONFIG_USB_PULSE8_CEC=m
CONFIG_USB_RAINSHADOW_CEC=m
CONFIG_MEDIA_SUPPORT=m
-# CONFIG_MEDIA_SUPPORT_FILTER is not set
+CONFIG_MEDIA_SUPPORT_FILTER=y
CONFIG_MEDIA_SUBDRV_AUTOSELECT=y
#
@@ -5696,18 +5401,14 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
CONFIG_MEDIA_RADIO_SUPPORT=y
-CONFIG_MEDIA_SDR_SUPPORT=y
+# CONFIG_MEDIA_SDR_SUPPORT is not set
CONFIG_MEDIA_PLATFORM_SUPPORT=y
-CONFIG_MEDIA_TEST_SUPPORT=y
+# CONFIG_MEDIA_TEST_SUPPORT is not set
# end of Media device types
-#
-# Media core support
-#
CONFIG_VIDEO_DEV=m
CONFIG_MEDIA_CONTROLLER=y
CONFIG_DVB_CORE=m
-# end of Media core support
#
# Video4Linux options
@@ -5730,11 +5431,6 @@ CONFIG_VIDEOBUF_VMALLOC=m
# Media controller options
#
CONFIG_MEDIA_CONTROLLER_DVB=y
-CONFIG_MEDIA_CONTROLLER_REQUEST_API=y
-
-#
-# Please notice that the enabled Media controller Request API is EXPERIMENTAL
-#
# end of Media controller options
#
@@ -5743,7 +5439,7 @@ CONFIG_MEDIA_CONTROLLER_REQUEST_API=y
CONFIG_DVB_MMAP=y
CONFIG_DVB_NET=y
CONFIG_DVB_MAX_ADAPTERS=16
-# CONFIG_DVB_DYNAMIC_MINORS is not set
+CONFIG_DVB_DYNAMIC_MINORS=y
# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set
# CONFIG_DVB_ULE_DEBUG is not set
# end of Digital TV options
@@ -5751,6 +5447,10 @@ CONFIG_DVB_MAX_ADAPTERS=16
#
# Media drivers
#
+
+#
+# Drivers filtered as selected at 'Filter media drivers'
+#
CONFIG_TTPCI_EEPROM=m
CONFIG_MEDIA_USB_SUPPORT=y
@@ -5907,13 +5607,6 @@ CONFIG_VIDEO_EM28XX_V4L2=m
CONFIG_VIDEO_EM28XX_ALSA=m
CONFIG_VIDEO_EM28XX_DVB=m
CONFIG_VIDEO_EM28XX_RC=m
-
-#
-# Software defined radio USB devices
-#
-CONFIG_USB_AIRSPY=m
-CONFIG_USB_HACKRF=m
-CONFIG_USB_MSI2500=m
CONFIG_MEDIA_PCI_SUPPORT=y
#
@@ -6033,34 +5726,20 @@ CONFIG_VIDEO_SAA7146_VV=m
CONFIG_SMS_SIANO_MDTV=m
CONFIG_SMS_SIANO_RC=y
# CONFIG_SMS_SIANO_DEBUGFS is not set
-CONFIG_VIDEO_V4L2_TPG=m
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_VIDEO_CAFE_CCIC=m
CONFIG_VIDEO_CADENCE=y
CONFIG_VIDEO_CADENCE_CSI2RX=m
CONFIG_VIDEO_CADENCE_CSI2TX=m
CONFIG_VIDEO_ASPEED=m
-CONFIG_VIDEO_MUX=m
-CONFIG_VIDEO_XILINX=m
-CONFIG_VIDEO_XILINX_CSI2RXSS=m
-CONFIG_VIDEO_XILINX_TPG=m
-CONFIG_VIDEO_XILINX_VTC=m
CONFIG_V4L_MEM2MEM_DRIVERS=y
CONFIG_VIDEO_MEM2MEM_DEINTERLACE=m
CONFIG_DVB_PLATFORM_DRIVERS=y
-CONFIG_SDR_PLATFORM_DRIVERS=y
#
# MMC/SDIO DVB adapters
#
CONFIG_SMS_SDIO_DRV=m
-CONFIG_V4L_TEST_DRIVERS=y
-CONFIG_VIDEO_VIMC=m
-CONFIG_VIDEO_VIVID=m
-CONFIG_VIDEO_VIVID_CEC=y
-CONFIG_VIDEO_VIVID_MAX_DEVS=64
-CONFIG_VIDEO_VIM2M=m
-CONFIG_VIDEO_VICODEC=m
#
# FireWire (IEEE 1394) Adapters
@@ -6069,6 +5748,8 @@ CONFIG_DVB_FIREDTV=m
CONFIG_DVB_FIREDTV_INPUT=y
# end of Media drivers
+CONFIG_MEDIA_HIDE_ANCILLARY_SUBDRV=y
+
#
# Media ancillary drivers
#
@@ -6080,109 +5761,39 @@ CONFIG_MEDIA_ATTACH=y
CONFIG_VIDEO_IR_I2C=m
#
-# Audio decoders, processors and mixers
+# audio, video and radio I2C drivers auto-selected by 'Autoselect ancillary drivers'
#
CONFIG_VIDEO_TVAUDIO=m
CONFIG_VIDEO_TDA7432=m
CONFIG_VIDEO_TDA9840=m
-CONFIG_VIDEO_TDA1997X=m
CONFIG_VIDEO_TEA6415C=m
CONFIG_VIDEO_TEA6420=m
CONFIG_VIDEO_MSP3400=m
CONFIG_VIDEO_CS3308=m
CONFIG_VIDEO_CS5345=m
CONFIG_VIDEO_CS53L32A=m
-CONFIG_VIDEO_TLV320AIC23B=m
CONFIG_VIDEO_UDA1342=m
CONFIG_VIDEO_WM8775=m
CONFIG_VIDEO_WM8739=m
CONFIG_VIDEO_VP27SMPX=m
CONFIG_VIDEO_SONY_BTF_MPX=m
-# end of Audio decoders, processors and mixers
-
-#
-# RDS decoders
-#
CONFIG_VIDEO_SAA6588=m
-# end of RDS decoders
-
-#
-# Video decoders
-#
-CONFIG_VIDEO_ADV7180=m
-CONFIG_VIDEO_ADV7183=m
-CONFIG_VIDEO_ADV748X=m
-CONFIG_VIDEO_ADV7604=m
-CONFIG_VIDEO_ADV7604_CEC=y
-CONFIG_VIDEO_ADV7842=m
-CONFIG_VIDEO_ADV7842_CEC=y
-CONFIG_VIDEO_BT819=m
-CONFIG_VIDEO_BT856=m
-CONFIG_VIDEO_BT866=m
-CONFIG_VIDEO_KS0127=m
-CONFIG_VIDEO_ML86V7667=m
-CONFIG_VIDEO_SAA7110=m
CONFIG_VIDEO_SAA711X=m
-CONFIG_VIDEO_TC358743=m
-CONFIG_VIDEO_TC358743_CEC=y
-CONFIG_VIDEO_TVP514X=m
CONFIG_VIDEO_TVP5150=m
-CONFIG_VIDEO_TVP7002=m
CONFIG_VIDEO_TW2804=m
CONFIG_VIDEO_TW9903=m
CONFIG_VIDEO_TW9906=m
-CONFIG_VIDEO_TW9910=m
-CONFIG_VIDEO_VPX3220=m
-CONFIG_VIDEO_MAX9286=m
#
# Video and audio decoders
#
CONFIG_VIDEO_SAA717X=m
CONFIG_VIDEO_CX25840=m
-# end of Video decoders
-
-#
-# Video encoders
-#
CONFIG_VIDEO_SAA7127=m
-CONFIG_VIDEO_SAA7185=m
-CONFIG_VIDEO_ADV7170=m
-CONFIG_VIDEO_ADV7175=m
-CONFIG_VIDEO_ADV7343=m
-CONFIG_VIDEO_ADV7393=m
-CONFIG_VIDEO_AD9389B=m
-CONFIG_VIDEO_AK881X=m
-CONFIG_VIDEO_THS8200=m
-# end of Video encoders
-
-#
-# Video improvement chips
-#
CONFIG_VIDEO_UPD64031A=m
CONFIG_VIDEO_UPD64083=m
-# end of Video improvement chips
-
-#
-# Audio/Video compression chips
-#
CONFIG_VIDEO_SAA6752HS=m
-# end of Audio/Video compression chips
-
-#
-# SDR tuner chips
-#
-CONFIG_SDR_MAX2175=m
-# end of SDR tuner chips
-
-#
-# Miscellaneous helper chips
-#
-CONFIG_VIDEO_THS7303=m
CONFIG_VIDEO_M52790=m
-CONFIG_VIDEO_I2C=m
-CONFIG_VIDEO_ST_MIPID02=m
-# end of Miscellaneous helper chips
#
# Camera sensor devices
@@ -6202,8 +5813,6 @@ CONFIG_VIDEO_OV2659=m
CONFIG_VIDEO_OV2680=m
CONFIG_VIDEO_OV2685=m
CONFIG_VIDEO_OV2740=m
-CONFIG_VIDEO_OV5640=m
-CONFIG_VIDEO_OV5645=m
CONFIG_VIDEO_OV5647=m
CONFIG_VIDEO_OV6650=m
CONFIG_VIDEO_OV5670=m
@@ -6261,10 +5870,8 @@ CONFIG_VIDEO_LM3646=m
# end of Flash devices
#
-# SPI helper chips
+# SPI I2C drivers auto-selected by 'Autoselect ancillary drivers'
#
-CONFIG_VIDEO_GS1662=m
-# end of SPI helper chips
#
# Media SPI Adapters
@@ -6275,7 +5882,7 @@ CONFIG_CXD2880_SPI_DRV=m
CONFIG_MEDIA_TUNER=m
#
-# Customize TV tuners
+# Tuner drivers auto-selected by 'Autoselect ancillary drivers'
#
CONFIG_MEDIA_TUNER_SIMPLE=m
CONFIG_MEDIA_TUNER_TDA18250=m
@@ -6285,7 +5892,6 @@ CONFIG_MEDIA_TUNER_TDA18271=m
CONFIG_MEDIA_TUNER_TDA9887=m
CONFIG_MEDIA_TUNER_TEA5761=m
CONFIG_MEDIA_TUNER_TEA5767=m
-CONFIG_MEDIA_TUNER_MSI001=m
CONFIG_MEDIA_TUNER_MT20XX=m
CONFIG_MEDIA_TUNER_MT2060=m
CONFIG_MEDIA_TUNER_MT2063=m
@@ -6314,10 +5920,9 @@ CONFIG_MEDIA_TUNER_R820T=m
CONFIG_MEDIA_TUNER_MXL301RF=m
CONFIG_MEDIA_TUNER_QM1D1C0042=m
CONFIG_MEDIA_TUNER_QM1D1B0004=m
-# end of Customize TV tuners
#
-# Customise DVB Frontends
+# DVB Frontend drivers auto-selected by 'Autoselect ancillary drivers'
#
#
@@ -6379,7 +5984,6 @@ CONFIG_DVB_SP8870=m
CONFIG_DVB_SP887X=m
CONFIG_DVB_CX22700=m
CONFIG_DVB_CX22702=m
-CONFIG_DVB_S5H1432=m
CONFIG_DVB_DRXD=m
CONFIG_DVB_L64781=m
CONFIG_DVB_TDA1004X=m
@@ -6390,7 +5994,6 @@ CONFIG_DVB_DIB3000MB=m
CONFIG_DVB_DIB3000MC=m
CONFIG_DVB_DIB7000M=m
CONFIG_DVB_DIB7000P=m
-CONFIG_DVB_DIB9000=m
CONFIG_DVB_TDA10048=m
CONFIG_DVB_AF9013=m
CONFIG_DVB_EC100=m
@@ -6399,12 +6002,10 @@ CONFIG_DVB_CXD2820R=m
CONFIG_DVB_CXD2841ER=m
CONFIG_DVB_RTL2830=m
CONFIG_DVB_RTL2832=m
-CONFIG_DVB_RTL2832_SDR=m
CONFIG_DVB_SI2168=m
CONFIG_DVB_AS102_FE=m
CONFIG_DVB_ZD1301_DEMOD=m
CONFIG_DVB_GP8PSK_FE=m
-CONFIG_DVB_CXD2880=m
#
# DVB-C (cable) frontends
@@ -6442,7 +6043,6 @@ CONFIG_DVB_MB86A20S=m
# ISDB-S (satellite) & ISDB-T (terrestrial) frontends
#
CONFIG_DVB_TC90522=m
-CONFIG_DVB_MN88443X=m
#
# Digital terrestrial only tuners/PLL
@@ -6456,14 +6056,12 @@ CONFIG_DVB_TUNER_DIB0090=m
#
CONFIG_DVB_DRX39XYJ=m
CONFIG_DVB_LNBH25=m
-CONFIG_DVB_LNBH29=m
CONFIG_DVB_LNBP21=m
CONFIG_DVB_LNBP22=m
CONFIG_DVB_ISL6405=m
CONFIG_DVB_ISL6421=m
CONFIG_DVB_ISL6423=m
CONFIG_DVB_A8293=m
-CONFIG_DVB_LGS8GL5=m
CONFIG_DVB_LGS8GXX=m
CONFIG_DVB_ATBM8830=m
CONFIG_DVB_TDA665x=m
@@ -6479,12 +6077,6 @@ CONFIG_DVB_HELENE=m
#
CONFIG_DVB_CXD2099=m
CONFIG_DVB_SP2=m
-# end of Customise DVB Frontends
-
-#
-# Tools to develop new frontends
-#
-CONFIG_DVB_DUMMY_FE=m
# end of Media ancillary drivers
#
@@ -6506,10 +6098,8 @@ CONFIG_DRM_DP_AUX_CHARDEV=y
# CONFIG_DRM_DEBUG_SELFTEST is not set
CONFIG_DRM_KMS_HELPER=m
CONFIG_DRM_KMS_FB_HELPER=y
-# CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS is not set
CONFIG_DRM_FBDEV_EMULATION=y
CONFIG_DRM_FBDEV_OVERALLOC=100
-# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set
CONFIG_DRM_LOAD_EDID_FIRMWARE=y
CONFIG_DRM_DP_CEC=y
CONFIG_DRM_TTM=m
@@ -6533,7 +6123,6 @@ CONFIG_DRM_I2C_NXP_TDA9950=m
#
# ARM devices
#
-CONFIG_DRM_KOMEDA=m
# end of ARM devices
CONFIG_DRM_RADEON=m
@@ -6557,7 +6146,7 @@ CONFIG_DRM_AMD_DC=y
CONFIG_DRM_AMD_DC_DCN=y
CONFIG_DRM_AMD_DC_DCN3_0=y
CONFIG_DRM_AMD_DC_HDCP=y
-# CONFIG_DEBUG_KERNEL_DC is not set
+CONFIG_DRM_AMD_DC_SI=y
# end of Display Engine Configuration
CONFIG_HSA_AMD=y
@@ -6576,25 +6165,6 @@ CONFIG_DRM_I915_COMPRESS_ERROR=y
CONFIG_DRM_I915_USERPTR=y
CONFIG_DRM_I915_GVT=y
CONFIG_DRM_I915_GVT_KVMGT=m
-
-#
-# drm/i915 Debugging
-#
-# CONFIG_DRM_I915_WERROR is not set
-# CONFIG_DRM_I915_DEBUG is not set
-# CONFIG_DRM_I915_DEBUG_MMIO is not set
-# CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS is not set
-# CONFIG_DRM_I915_SW_FENCE_CHECK_DAG is not set
-# CONFIG_DRM_I915_DEBUG_GUC is not set
-# CONFIG_DRM_I915_SELFTEST is not set
-# CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS is not set
-# CONFIG_DRM_I915_DEBUG_VBLANK_EVADE is not set
-# CONFIG_DRM_I915_DEBUG_RUNTIME_PM is not set
-# end of drm/i915 Debugging
-
-#
-# drm/i915 Profile Guided Optimisation
-#
CONFIG_DRM_I915_FENCE_TIMEOUT=10000
CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND=250
CONFIG_DRM_I915_HEARTBEAT_INTERVAL=2500
@@ -6602,8 +6172,6 @@ CONFIG_DRM_I915_PREEMPT_TIMEOUT=640
CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT=8000
CONFIG_DRM_I915_STOP_TIMEOUT=100
CONFIG_DRM_I915_TIMESLICE_DURATION=1
-# end of drm/i915 Profile Guided Optimisation
-
CONFIG_DRM_VGEM=m
CONFIG_DRM_VKMS=m
CONFIG_DRM_VMWGFX=m
@@ -6614,8 +6182,6 @@ CONFIG_DRM_GMA3600=y
CONFIG_DRM_UDL=m
CONFIG_DRM_AST=m
CONFIG_DRM_MGAG200=m
-CONFIG_DRM_RCAR_DW_HDMI=m
-CONFIG_DRM_RCAR_LVDS=m
CONFIG_DRM_QXL=m
CONFIG_DRM_BOCHS=m
CONFIG_DRM_VIRTIO_GPU=m
@@ -6624,57 +6190,7 @@ CONFIG_DRM_PANEL=y
#
# Display Panels
#
-CONFIG_DRM_PANEL_ARM_VERSATILE=m
-CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596=m
-CONFIG_DRM_PANEL_BOE_HIMAX8279D=m
-CONFIG_DRM_PANEL_BOE_TV101WUM_NL6=m
-CONFIG_DRM_PANEL_LVDS=m
-CONFIG_DRM_PANEL_SIMPLE=m
-CONFIG_DRM_PANEL_ELIDA_KD35T133=m
-CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02=m
-CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D=m
-CONFIG_DRM_PANEL_ILITEK_IL9322=m
-CONFIG_DRM_PANEL_ILITEK_ILI9881C=m
-CONFIG_DRM_PANEL_INNOLUX_P079ZCA=m
-CONFIG_DRM_PANEL_JDI_LT070ME05000=m
-CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04=m
-CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W=m
-CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829=m
-CONFIG_DRM_PANEL_SAMSUNG_LD9040=m
-CONFIG_DRM_PANEL_LG_LB035Q02=m
-CONFIG_DRM_PANEL_LG_LG4573=m
-CONFIG_DRM_PANEL_NEC_NL8048HL11=m
-CONFIG_DRM_PANEL_NOVATEK_NT35510=m
-CONFIG_DRM_PANEL_NOVATEK_NT39016=m
-CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO=m
-CONFIG_DRM_PANEL_ORISETECH_OTM8009A=m
-CONFIG_DRM_PANEL_OSD_OSD101T2587_53TS=m
-CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00=m
CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN=m
-CONFIG_DRM_PANEL_RAYDIUM_RM67191=m
-CONFIG_DRM_PANEL_RAYDIUM_RM68200=m
-CONFIG_DRM_PANEL_RONBO_RB070D30=m
-CONFIG_DRM_PANEL_SAMSUNG_S6D16D0=m
-CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2=m
-CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03=m
-CONFIG_DRM_PANEL_SAMSUNG_S6E63M0=m
-CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01=m
-CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m
-CONFIG_DRM_PANEL_SEIKO_43WVF1G=m
-CONFIG_DRM_PANEL_SHARP_LQ101R1SX01=m
-CONFIG_DRM_PANEL_SHARP_LS037V7DW01=m
-CONFIG_DRM_PANEL_SHARP_LS043T1LE01=m
-CONFIG_DRM_PANEL_SITRONIX_ST7701=m
-CONFIG_DRM_PANEL_SITRONIX_ST7703=m
-CONFIG_DRM_PANEL_SITRONIX_ST7789V=m
-CONFIG_DRM_PANEL_SONY_ACX424AKP=m
-CONFIG_DRM_PANEL_SONY_ACX565AKM=m
-CONFIG_DRM_PANEL_TPO_TD028TTEC1=m
-CONFIG_DRM_PANEL_TPO_TD043MTEA1=m
-CONFIG_DRM_PANEL_TPO_TPG110=m
-CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA=m
-CONFIG_DRM_PANEL_VISIONOX_RM69299=m
-CONFIG_DRM_PANEL_XINPENG_XPP055C272=m
# end of Display Panels
CONFIG_DRM_BRIDGE=y
@@ -6683,42 +6199,11 @@ CONFIG_DRM_PANEL_BRIDGE=y
#
# Display Interface Bridges
#
-CONFIG_DRM_CDNS_DSI=m
-CONFIG_DRM_CHRONTEL_CH7033=m
-CONFIG_DRM_DISPLAY_CONNECTOR=m
-CONFIG_DRM_LVDS_CODEC=m
-CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW=m
-CONFIG_DRM_NWL_MIPI_DSI=m
-CONFIG_DRM_NXP_PTN3460=m
-CONFIG_DRM_PARADE_PS8622=m
-CONFIG_DRM_PARADE_PS8640=m
-CONFIG_DRM_SIL_SII8620=m
-CONFIG_DRM_SII902X=m
-CONFIG_DRM_SII9234=m
-CONFIG_DRM_SIMPLE_BRIDGE=m
-CONFIG_DRM_THINE_THC63LVD1024=m
-CONFIG_DRM_TOSHIBA_TC358764=m
-CONFIG_DRM_TOSHIBA_TC358767=m
-CONFIG_DRM_TOSHIBA_TC358768=m
-CONFIG_DRM_TI_TFP410=m
-CONFIG_DRM_TI_SN65DSI86=m
-CONFIG_DRM_TI_TPD12S015=m
-CONFIG_DRM_ANALOGIX_ANX6345=m
CONFIG_DRM_ANALOGIX_ANX78XX=m
CONFIG_DRM_ANALOGIX_DP=m
-CONFIG_DRM_I2C_ADV7511=m
-CONFIG_DRM_I2C_ADV7511_AUDIO=y
-CONFIG_DRM_I2C_ADV7511_CEC=y
-CONFIG_DRM_DW_HDMI=m
-CONFIG_DRM_DW_HDMI_AHB_AUDIO=m
-CONFIG_DRM_DW_HDMI_I2S_AUDIO=m
-CONFIG_DRM_DW_HDMI_CEC=m
# end of Display Interface Bridges
# CONFIG_DRM_ETNAVIV is not set
-CONFIG_DRM_ARCPGU=m
-CONFIG_DRM_MXS=y
-CONFIG_DRM_MXSFB=m
CONFIG_DRM_CIRRUS_QEMU=m
CONFIG_DRM_GM12U320=m
CONFIG_TINYDRM_HX8357D=m
@@ -6752,8 +6237,7 @@ CONFIG_FB_SYS_IMAGEBLIT=m
# CONFIG_FB_FOREIGN_ENDIAN is not set
CONFIG_FB_SYS_FOPS=m
CONFIG_FB_DEFERRED_IO=y
-CONFIG_FB_BACKLIGHT=m
-CONFIG_FB_MODE_HELPERS=y
+# CONFIG_FB_MODE_HELPERS is not set
CONFIG_FB_TILEBLITTING=y
#
@@ -6777,7 +6261,6 @@ CONFIG_FB_EFI=y
# CONFIG_FB_RIVA is not set
# CONFIG_FB_I740 is not set
# CONFIG_FB_LE80578 is not set
-# CONFIG_FB_INTEL is not set
# CONFIG_FB_MATROX is not set
# CONFIG_FB_RADEON is not set
# CONFIG_FB_ATY128 is not set
@@ -6805,7 +6288,6 @@ CONFIG_XEN_FBDEV_FRONTEND=m
# CONFIG_FB_MB862XX is not set
CONFIG_FB_HYPERV=m
CONFIG_FB_SIMPLE=y
-# CONFIG_FB_SSD1307 is not set
# CONFIG_FB_SM712 is not set
# end of Frame buffer Devices
@@ -6826,6 +6308,7 @@ CONFIG_LCD_LMS501KF03=m
CONFIG_LCD_HX8357=m
CONFIG_LCD_OTM3225A=m
CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_KTD253=m
CONFIG_BACKLIGHT_LM3533=m
CONFIG_BACKLIGHT_PWM=m
CONFIG_BACKLIGHT_DA903X=m
@@ -6847,14 +6330,12 @@ CONFIG_BACKLIGHT_LP855X=m
CONFIG_BACKLIGHT_LP8788=m
CONFIG_BACKLIGHT_PANDORA=m
CONFIG_BACKLIGHT_SKY81452=m
-CONFIG_BACKLIGHT_TPS65217=m
CONFIG_BACKLIGHT_AS3711=m
CONFIG_BACKLIGHT_GPIO=m
CONFIG_BACKLIGHT_LV5207LP=m
CONFIG_BACKLIGHT_BD6107=m
CONFIG_BACKLIGHT_ARCXCNN=m
CONFIG_BACKLIGHT_RAVE_SP=m
-CONFIG_BACKLIGHT_LED=m
# end of Backlight & LCD device support
CONFIG_VIDEOMODE_HELPERS=y
@@ -7018,7 +6499,7 @@ CONFIG_SND_HDA_INTEL=m
CONFIG_SND_HDA_HWDEP=y
CONFIG_SND_HDA_RECONFIG=y
CONFIG_SND_HDA_INPUT_BEEP=y
-CONFIG_SND_HDA_INPUT_BEEP_MODE=1
+CONFIG_SND_HDA_INPUT_BEEP_MODE=0
CONFIG_SND_HDA_PATCH_LOADER=y
CONFIG_SND_HDA_CODEC_REALTEK=m
CONFIG_SND_HDA_CODEC_ANALOG=m
@@ -7033,7 +6514,7 @@ CONFIG_SND_HDA_CODEC_CA0132_DSP=y
CONFIG_SND_HDA_CODEC_CMEDIA=m
CONFIG_SND_HDA_CODEC_SI3054=m
CONFIG_SND_HDA_GENERIC=m
-CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0
+CONFIG_SND_HDA_POWER_SAVE_DEFAULT=1
CONFIG_SND_HDA_INTEL_HDMI_SILENT_STREAM=y
# end of HD-Audio
@@ -7090,7 +6571,6 @@ CONFIG_SND_SOC_AMD_RV_RT5682_MACH=m
CONFIG_SND_SOC_AMD_RENOIR=m
CONFIG_SND_SOC_AMD_RENOIR_MACH=m
CONFIG_SND_ATMEL_SOC=m
-CONFIG_SND_SOC_MIKROE_PROTO=m
CONFIG_SND_BCM63XX_I2S_WHISTLER=m
CONFIG_SND_DESIGNWARE_I2S=m
CONFIG_SND_DESIGNWARE_PCM=y
@@ -7121,13 +6601,8 @@ CONFIG_SND_SOC_IMG_SPDIF_IN=m
CONFIG_SND_SOC_IMG_SPDIF_OUT=m
CONFIG_SND_SOC_IMG_PISTACHIO_INTERNAL_DAC=m
CONFIG_SND_SOC_INTEL_SST_TOPLEVEL=y
-CONFIG_SND_SST_IPC=m
-CONFIG_SND_SST_IPC_PCI=m
-CONFIG_SND_SST_IPC_ACPI=m
-CONFIG_SND_SOC_INTEL_SST_ACPI=m
CONFIG_SND_SOC_INTEL_SST=m
-CONFIG_SND_SOC_INTEL_SST_FIRMWARE=m
-CONFIG_SND_SOC_INTEL_HASWELL=m
+CONFIG_SND_SOC_INTEL_CATPT=m
CONFIG_SND_SST_ATOM_HIFI2_PLATFORM=m
CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_PCI=m
CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_ACPI=m
@@ -7146,7 +6621,7 @@ CONFIG_SND_SOC_INTEL_SKYLAKE_SSP_CLK=m
CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON=m
CONFIG_SND_SOC_ACPI_INTEL_MATCH=m
CONFIG_SND_SOC_INTEL_MACH=y
-# CONFIG_SND_SOC_INTEL_USER_FRIENDLY_LONG_NAMES is not set
+CONFIG_SND_SOC_INTEL_USER_FRIENDLY_LONG_NAMES=y
CONFIG_SND_SOC_INTEL_HASWELL_MACH=m
CONFIG_SND_SOC_INTEL_BDW_RT5650_MACH=m
CONFIG_SND_SOC_INTEL_BDW_RT5677_MACH=m
@@ -7183,13 +6658,12 @@ CONFIG_SND_SOC_INTEL_CML_LP_DA7219_MAX98357A_MACH=m
CONFIG_SND_SOC_INTEL_SOF_CML_RT1011_RT5682_MACH=m
CONFIG_SND_SOC_INTEL_SOF_DA7219_MAX98373_MACH=m
CONFIG_SND_SOC_INTEL_EHL_RT5660_MACH=m
+CONFIG_SND_SOC_INTEL_SOUNDWIRE_SOF_MACH=m
CONFIG_SND_SOC_MTK_BTCVSD=m
CONFIG_SND_SOC_SOF_TOPLEVEL=y
CONFIG_SND_SOC_SOF_PCI=m
CONFIG_SND_SOC_SOF_ACPI=m
-CONFIG_SND_SOC_SOF_OF=m
# CONFIG_SND_SOC_SOF_DEBUG_PROBES is not set
-# CONFIG_SND_SOC_SOF_DEVELOPER_SUPPORT is not set
CONFIG_SND_SOC_SOF=m
CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE=y
CONFIG_SND_SOC_SOF_INTEL_TOPLEVEL=y
@@ -7198,6 +6672,7 @@ CONFIG_SND_SOC_SOF_INTEL_PCI=m
CONFIG_SND_SOC_SOF_INTEL_HIFI_EP_IPC=m
CONFIG_SND_SOC_SOF_INTEL_ATOM_HIFI_EP=m
CONFIG_SND_SOC_SOF_INTEL_COMMON=m
+# CONFIG_SND_SOC_SOF_BROADWELL_SUPPORT is not set
CONFIG_SND_SOC_SOF_MERRIFIELD_SUPPORT=y
CONFIG_SND_SOC_SOF_MERRIFIELD=m
CONFIG_SND_SOC_SOF_APOLLOLAKE_SUPPORT=y
@@ -7225,6 +6700,9 @@ CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC=y
# CONFIG_SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1 is not set
CONFIG_SND_SOC_SOF_HDA_LINK_BASELINE=m
CONFIG_SND_SOC_SOF_HDA=m
+CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE_LINK=y
+CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE=m
+CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE=m
CONFIG_SND_SOC_SOF_XTENSA=m
#
@@ -7264,7 +6742,6 @@ CONFIG_SND_SOC_AK5558=m
CONFIG_SND_SOC_ALC5623=m
CONFIG_SND_SOC_BD28623=m
# CONFIG_SND_SOC_BT_SCO is not set
-CONFIG_SND_SOC_CPCAP=m
CONFIG_SND_SOC_CROS_EC_CODEC=m
CONFIG_SND_SOC_CS35L32=m
CONFIG_SND_SOC_CS35L33=m
@@ -7277,6 +6754,7 @@ CONFIG_SND_SOC_CS42L51_I2C=m
CONFIG_SND_SOC_CS42L52=m
CONFIG_SND_SOC_CS42L56=m
CONFIG_SND_SOC_CS42L73=m
+CONFIG_SND_SOC_CS4234=m
CONFIG_SND_SOC_CS4265=m
CONFIG_SND_SOC_CS4270=m
CONFIG_SND_SOC_CS4271=m
@@ -7303,7 +6781,6 @@ CONFIG_SND_SOC_GTM601=m
CONFIG_SND_SOC_HDAC_HDMI=m
CONFIG_SND_SOC_HDAC_HDA=m
CONFIG_SND_SOC_INNO_RK3036=m
-CONFIG_SND_SOC_LOCHNAGAR_SC=m
CONFIG_SND_SOC_MAX98088=m
CONFIG_SND_SOC_MAX98090=m
CONFIG_SND_SOC_MAX98357A=m
@@ -7315,7 +6792,6 @@ CONFIG_SND_SOC_MAX98373_I2C=m
CONFIG_SND_SOC_MAX98373_SDW=m
CONFIG_SND_SOC_MAX98390=m
CONFIG_SND_SOC_MAX9860=m
-CONFIG_SND_SOC_MSM8916_WCD_ANALOG=m
CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=m
CONFIG_SND_SOC_PCM1681=m
CONFIG_SND_SOC_PCM1789=m
@@ -7342,6 +6818,7 @@ CONFIG_SND_SOC_RT286=m
CONFIG_SND_SOC_RT298=m
CONFIG_SND_SOC_RT1011=m
CONFIG_SND_SOC_RT1015=m
+CONFIG_SND_SOC_RT1308=m
CONFIG_SND_SOC_RT1308_SDW=m
CONFIG_SND_SOC_RT5514=m
CONFIG_SND_SOC_RT5514_SPI=m
@@ -7382,6 +6859,7 @@ CONFIG_SND_SOC_STA350=m
CONFIG_SND_SOC_STI_SAS=m
CONFIG_SND_SOC_TAS2552=m
CONFIG_SND_SOC_TAS2562=m
+CONFIG_SND_SOC_TAS2764=m
CONFIG_SND_SOC_TAS2770=m
CONFIG_SND_SOC_TAS5086=m
CONFIG_SND_SOC_TAS571X=m
@@ -7445,7 +6923,6 @@ CONFIG_SND_SOC_TPA6130A2=m
CONFIG_SND_SIMPLE_CARD_UTILS=m
CONFIG_SND_SIMPLE_CARD=m
-CONFIG_SND_AUDIO_GRAPH_CARD=m
CONFIG_SND_X86=y
CONFIG_HDMI_LPE_AUDIO=m
CONFIG_SND_SYNTH_EMUX=m
@@ -7455,11 +6932,11 @@ CONFIG_AC97_BUS=m
#
# HID support
#
-CONFIG_HID=m
+CONFIG_HID=y
CONFIG_HID_BATTERY_STRENGTH=y
CONFIG_HIDRAW=y
CONFIG_UHID=m
-CONFIG_HID_GENERIC=m
+CONFIG_HID_GENERIC=y
#
# Special HID drivers
@@ -7498,6 +6975,7 @@ CONFIG_HID_GLORIOUS=m
CONFIG_HID_HOLTEK=m
CONFIG_HOLTEK_FF=y
CONFIG_HID_GOOGLE_HAMMER=m
+CONFIG_HID_VIVALDI=m
CONFIG_HID_GT683R=m
CONFIG_HID_KEYTOUCH=m
CONFIG_HID_KYE=m
@@ -7583,13 +7061,6 @@ CONFIG_HID_MCP2221=m
CONFIG_USB_HID=m
CONFIG_HID_PID=y
CONFIG_USB_HIDDEV=y
-
-#
-# USB HID Boot Protocol drivers
-#
-# CONFIG_USB_KBD is not set
-# CONFIG_USB_MOUSE is not set
-# end of USB HID Boot Protocol drivers
# end of USB HID support
#
@@ -7621,10 +7092,10 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
# Miscellaneous USB options
#
CONFIG_USB_DEFAULT_PERSIST=y
-CONFIG_USB_DYNAMIC_MINORS=y
+# CONFIG_USB_FEW_INIT_RETRIES is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
# CONFIG_USB_OTG is not set
# CONFIG_USB_OTG_PRODUCTLIST is not set
-# CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB is not set
CONFIG_USB_LEDS_TRIGGER_USBPORT=m
CONFIG_USB_AUTOSUSPEND_DELAY=2
CONFIG_USB_MON=m
@@ -7633,26 +7104,25 @@ CONFIG_USB_MON=m
# USB Host Controller Drivers
#
CONFIG_USB_C67X00_HCD=m
-CONFIG_USB_XHCI_HCD=m
+CONFIG_USB_XHCI_HCD=y
# CONFIG_USB_XHCI_DBGCAP is not set
CONFIG_USB_XHCI_PCI=m
CONFIG_USB_XHCI_PCI_RENESAS=m
CONFIG_USB_XHCI_PLATFORM=m
-CONFIG_USB_EHCI_HCD=m
+CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
CONFIG_USB_EHCI_TT_NEWSCHED=y
-CONFIG_USB_EHCI_PCI=m
+CONFIG_USB_EHCI_PCI=y
CONFIG_USB_EHCI_FSL=m
CONFIG_USB_EHCI_HCD_PLATFORM=m
CONFIG_USB_OXU210HP_HCD=m
CONFIG_USB_ISP116X_HCD=m
CONFIG_USB_FOTG210_HCD=m
CONFIG_USB_MAX3421_HCD=m
-CONFIG_USB_OHCI_HCD=m
-CONFIG_USB_OHCI_HCD_PCI=m
-# CONFIG_USB_OHCI_HCD_SSB is not set
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PCI=y
CONFIG_USB_OHCI_HCD_PLATFORM=m
-CONFIG_USB_UHCI_HCD=m
+CONFIG_USB_UHCI_HCD=y
CONFIG_USB_U132_HCD=m
CONFIG_USB_SL811_HCD=m
# CONFIG_USB_SL811_HCD_ISO is not set
@@ -7705,62 +7175,13 @@ CONFIG_USBIP_VHCI_HCD=m
CONFIG_USBIP_VHCI_HC_PORTS=8
CONFIG_USBIP_VHCI_NR_HCS=1
CONFIG_USBIP_HOST=m
-CONFIG_USBIP_VUDC=m
# CONFIG_USBIP_DEBUG is not set
-CONFIG_USB_CDNS3=m
-CONFIG_USB_CDNS3_GADGET=y
-CONFIG_USB_CDNS3_HOST=y
-CONFIG_USB_CDNS3_PCI_WRAP=m
-CONFIG_USB_MUSB_HDRC=m
-# CONFIG_USB_MUSB_HOST is not set
-# CONFIG_USB_MUSB_GADGET is not set
-CONFIG_USB_MUSB_DUAL_ROLE=y
-
-#
-# Platform Glue Layer
-#
-
-#
-# MUSB DMA mode
-#
-# CONFIG_MUSB_PIO_ONLY is not set
-CONFIG_USB_DWC3=m
-CONFIG_USB_DWC3_ULPI=y
-# CONFIG_USB_DWC3_HOST is not set
-# CONFIG_USB_DWC3_GADGET is not set
-CONFIG_USB_DWC3_DUAL_ROLE=y
-
-#
-# Platform Glue Driver Support
-#
-CONFIG_USB_DWC3_PCI=m
-CONFIG_USB_DWC3_HAPS=m
-CONFIG_USB_DWC3_OF_SIMPLE=m
-CONFIG_USB_DWC2=m
-# CONFIG_USB_DWC2_HOST is not set
-
-#
-# Gadget/Dual-role mode requires USB Gadget support to be enabled
-#
-# CONFIG_USB_DWC2_PERIPHERAL is not set
-CONFIG_USB_DWC2_DUAL_ROLE=y
-CONFIG_USB_DWC2_PCI=m
-# CONFIG_USB_DWC2_DEBUG is not set
-# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set
-CONFIG_USB_CHIPIDEA=m
-CONFIG_USB_CHIPIDEA_UDC=y
-CONFIG_USB_CHIPIDEA_HOST=y
-CONFIG_USB_CHIPIDEA_PCI=m
-CONFIG_USB_CHIPIDEA_MSM=m
-CONFIG_USB_CHIPIDEA_IMX=m
-CONFIG_USB_CHIPIDEA_GENERIC=m
-CONFIG_USB_CHIPIDEA_TEGRA=m
-CONFIG_USB_ISP1760=m
-CONFIG_USB_ISP1760_HCD=y
-CONFIG_USB_ISP1761_UDC=y
-# CONFIG_USB_ISP1760_HOST_ROLE is not set
-# CONFIG_USB_ISP1760_GADGET_ROLE is not set
-CONFIG_USB_ISP1760_DUAL_ROLE=y
+# CONFIG_USB_CDNS3 is not set
+# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_DWC3 is not set
+# CONFIG_USB_DWC2 is not set
+# CONFIG_USB_CHIPIDEA is not set
+# CONFIG_USB_ISP1760 is not set
#
# USB port drivers
@@ -7840,7 +7261,6 @@ CONFIG_USB_FTDI_ELAN=m
CONFIG_USB_APPLEDISPLAY=m
CONFIG_APPLE_MFI_FASTCHARGE=m
CONFIG_USB_SISUSBVGA=m
-CONFIG_USB_SISUSBVGA_CON=y
CONFIG_USB_LD=m
CONFIG_USB_TRANCEVIBRATOR=m
CONFIG_USB_IOWARRIOR=m
@@ -7871,127 +7291,13 @@ CONFIG_TAHVO_USB=m
CONFIG_USB_ISP1301=m
# end of USB Physical Layer drivers
-CONFIG_USB_GADGET=m
-# CONFIG_USB_GADGET_DEBUG is not set
-# CONFIG_USB_GADGET_DEBUG_FILES is not set
-# CONFIG_USB_GADGET_DEBUG_FS is not set
-CONFIG_USB_GADGET_VBUS_DRAW=2
-CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
-CONFIG_U_SERIAL_CONSOLE=y
-
-#
-# USB Peripheral Controller
-#
-CONFIG_USB_FOTG210_UDC=m
-CONFIG_USB_GR_UDC=m
-CONFIG_USB_R8A66597=m
-CONFIG_USB_PXA27X=m
-CONFIG_USB_MV_UDC=m
-CONFIG_USB_MV_U3D=m
-CONFIG_USB_SNP_CORE=m
-CONFIG_USB_SNP_UDC_PLAT=m
-CONFIG_USB_M66592=m
-CONFIG_USB_BDC_UDC=m
-
-#
-# Platform Support
-#
-CONFIG_USB_BDC_PCI=m
-CONFIG_USB_AMD5536UDC=m
-CONFIG_USB_NET2272=m
-CONFIG_USB_NET2272_DMA=y
-CONFIG_USB_NET2280=m
-CONFIG_USB_GOKU=m
-CONFIG_USB_EG20T=m
-CONFIG_USB_GADGET_XILINX=m
-CONFIG_USB_MAX3420_UDC=m
-CONFIG_USB_DUMMY_HCD=m
-# end of USB Peripheral Controller
-
-CONFIG_USB_LIBCOMPOSITE=m
-CONFIG_USB_F_ACM=m
-CONFIG_USB_F_SS_LB=m
-CONFIG_USB_U_SERIAL=m
-CONFIG_USB_U_ETHER=m
-CONFIG_USB_U_AUDIO=m
-CONFIG_USB_F_SERIAL=m
-CONFIG_USB_F_OBEX=m
-CONFIG_USB_F_NCM=m
-CONFIG_USB_F_ECM=m
-CONFIG_USB_F_PHONET=m
-CONFIG_USB_F_EEM=m
-CONFIG_USB_F_SUBSET=m
-CONFIG_USB_F_RNDIS=m
-CONFIG_USB_F_MASS_STORAGE=m
-CONFIG_USB_F_FS=m
-CONFIG_USB_F_UAC1=m
-CONFIG_USB_F_UAC1_LEGACY=m
-CONFIG_USB_F_UAC2=m
-CONFIG_USB_F_UVC=m
-CONFIG_USB_F_MIDI=m
-CONFIG_USB_F_HID=m
-CONFIG_USB_F_PRINTER=m
-CONFIG_USB_F_TCM=m
-CONFIG_USB_CONFIGFS=m
-CONFIG_USB_CONFIGFS_SERIAL=y
-CONFIG_USB_CONFIGFS_ACM=y
-CONFIG_USB_CONFIGFS_OBEX=y
-CONFIG_USB_CONFIGFS_NCM=y
-CONFIG_USB_CONFIGFS_ECM=y
-CONFIG_USB_CONFIGFS_ECM_SUBSET=y
-CONFIG_USB_CONFIGFS_RNDIS=y
-CONFIG_USB_CONFIGFS_EEM=y
-CONFIG_USB_CONFIGFS_PHONET=y
-CONFIG_USB_CONFIGFS_MASS_STORAGE=y
-CONFIG_USB_CONFIGFS_F_LB_SS=y
-CONFIG_USB_CONFIGFS_F_FS=y
-CONFIG_USB_CONFIGFS_F_UAC1=y
-CONFIG_USB_CONFIGFS_F_UAC1_LEGACY=y
-CONFIG_USB_CONFIGFS_F_UAC2=y
-CONFIG_USB_CONFIGFS_F_MIDI=y
-CONFIG_USB_CONFIGFS_F_HID=y
-CONFIG_USB_CONFIGFS_F_UVC=y
-CONFIG_USB_CONFIGFS_F_PRINTER=y
-CONFIG_USB_CONFIGFS_F_TCM=y
-
-#
-# USB Gadget precomposed configurations
-#
-CONFIG_USB_ZERO=m
-CONFIG_USB_AUDIO=m
-# CONFIG_GADGET_UAC1 is not set
-CONFIG_USB_ETH=m
-CONFIG_USB_ETH_RNDIS=y
-CONFIG_USB_ETH_EEM=y
-CONFIG_USB_G_NCM=m
-CONFIG_USB_GADGETFS=m
-CONFIG_USB_FUNCTIONFS=m
-CONFIG_USB_FUNCTIONFS_ETH=y
-CONFIG_USB_FUNCTIONFS_RNDIS=y
-CONFIG_USB_FUNCTIONFS_GENERIC=y
-CONFIG_USB_MASS_STORAGE=m
-CONFIG_USB_GADGET_TARGET=m
-CONFIG_USB_G_SERIAL=m
-CONFIG_USB_MIDI_GADGET=m
-CONFIG_USB_G_PRINTER=m
-CONFIG_USB_CDC_COMPOSITE=m
-CONFIG_USB_G_NOKIA=m
-CONFIG_USB_G_ACM_MS=m
-CONFIG_USB_G_MULTI=m
-CONFIG_USB_G_MULTI_RNDIS=y
-CONFIG_USB_G_MULTI_CDC=y
-CONFIG_USB_G_HID=m
-CONFIG_USB_G_DBGP=m
-# CONFIG_USB_G_DBGP_PRINTK is not set
-CONFIG_USB_G_DBGP_SERIAL=y
-CONFIG_USB_G_WEBCAM=m
-CONFIG_USB_RAW_GADGET=m
-# end of USB Gadget precomposed configurations
-
+# CONFIG_USB_GADGET is not set
CONFIG_TYPEC=m
CONFIG_TYPEC_TCPM=m
CONFIG_TYPEC_TCPCI=m
CONFIG_TYPEC_RT1711H=m
+CONFIG_TYPEC_MT6360=m
+CONFIG_TYPEC_TCPCI_MAXIM=m
CONFIG_TYPEC_FUSB302=m
CONFIG_TYPEC_WCOVE=m
CONFIG_TYPEC_UCSI=m
@@ -7999,6 +7305,7 @@ CONFIG_UCSI_CCG=m
CONFIG_UCSI_ACPI=m
CONFIG_TYPEC_HD3SS3220=m
CONFIG_TYPEC_TPS6598X=m
+CONFIG_TYPEC_STUSB160X=m
#
# USB Type-C Multiplexer/DeMultiplexer Switch support
@@ -8017,9 +7324,6 @@ CONFIG_TYPEC_NVIDIA_ALTMODE=m
CONFIG_USB_ROLE_SWITCH=m
CONFIG_USB_ROLES_INTEL_XHCI=m
CONFIG_MMC=m
-CONFIG_PWRSEQ_EMMC=m
-CONFIG_PWRSEQ_SD8787=m
-CONFIG_PWRSEQ_SIMPLE=m
CONFIG_MMC_BLOCK=m
CONFIG_MMC_BLOCK_MINORS=8
CONFIG_SDIO_UART=m
@@ -8035,13 +7339,7 @@ CONFIG_MMC_SDHCI_PCI=m
CONFIG_MMC_RICOH_MMC=y
CONFIG_MMC_SDHCI_ACPI=m
CONFIG_MMC_SDHCI_PLTFM=m
-CONFIG_MMC_SDHCI_OF_ARASAN=m
-CONFIG_MMC_SDHCI_OF_ASPEED=m
-CONFIG_MMC_SDHCI_OF_AT91=m
-CONFIG_MMC_SDHCI_OF_DWCMSHC=m
-CONFIG_MMC_SDHCI_CADENCE=m
CONFIG_MMC_SDHCI_F_SDH30=m
-CONFIG_MMC_SDHCI_MILBEAUT=m
CONFIG_MMC_WBSD=m
CONFIG_MMC_ALCOR=m
CONFIG_MMC_TIFM_SD=m
@@ -8059,9 +7357,6 @@ CONFIG_MMC_HSQ=m
CONFIG_MMC_TOSHIBA_PCI=m
CONFIG_MMC_MTK=m
CONFIG_MMC_SDHCI_XENON=m
-CONFIG_MMC_SDHCI_OMAP=m
-CONFIG_MMC_SDHCI_AM654=m
-CONFIG_MMC_SDHCI_EXTERNAL_DMA=y
CONFIG_MEMSTICK=m
# CONFIG_MEMSTICK_DEBUG is not set
@@ -8090,21 +7385,12 @@ CONFIG_LEDS_BRIGHTNESS_HW_CHANGED=y
# LED drivers
#
CONFIG_LEDS_88PM860X=m
-CONFIG_LEDS_AAT1290=m
-CONFIG_LEDS_AN30259A=m
CONFIG_LEDS_APU=m
CONFIG_LEDS_AS3645A=m
-CONFIG_LEDS_AW2013=m
-CONFIG_LEDS_BCM6328=m
-CONFIG_LEDS_BCM6358=m
-CONFIG_LEDS_CPCAP=m
-CONFIG_LEDS_CR0014114=m
-CONFIG_LEDS_EL15203000=m
CONFIG_LEDS_LM3530=m
CONFIG_LEDS_LM3532=m
CONFIG_LEDS_LM3533=m
CONFIG_LEDS_LM3642=m
-CONFIG_LEDS_LM3692X=m
CONFIG_LEDS_LM3601X=m
CONFIG_LEDS_MT6323=m
CONFIG_LEDS_PCA9532=m
@@ -8112,9 +7398,8 @@ CONFIG_LEDS_PCA9532_GPIO=y
CONFIG_LEDS_GPIO=m
CONFIG_LEDS_LP3944=m
CONFIG_LEDS_LP3952=m
-# CONFIG_LEDS_LP55XX_COMMON is not set
+CONFIG_LEDS_LP50XX=m
CONFIG_LEDS_LP8788=m
-CONFIG_LEDS_LP8860=m
CONFIG_LEDS_CLEVO_MAIL=m
CONFIG_LEDS_PCA955X=m
CONFIG_LEDS_PCA955X_GPIO=y
@@ -8128,32 +7413,23 @@ CONFIG_LEDS_PWM=m
CONFIG_LEDS_REGULATOR=m
CONFIG_LEDS_BD2802=m
CONFIG_LEDS_INTEL_SS4200=m
-CONFIG_LEDS_LT3593=m
CONFIG_LEDS_ADP5520=m
CONFIG_LEDS_MC13783=m
CONFIG_LEDS_TCA6507=m
CONFIG_LEDS_TLC591XX=m
-CONFIG_LEDS_MAX77650=m
-CONFIG_LEDS_MAX77693=m
CONFIG_LEDS_MAX8997=m
CONFIG_LEDS_LM355x=m
CONFIG_LEDS_MENF21BMC=m
-CONFIG_LEDS_KTD2692=m
-CONFIG_LEDS_IS31FL319X=m
-CONFIG_LEDS_IS31FL32XX=m
#
# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)
#
CONFIG_LEDS_BLINKM=m
-CONFIG_LEDS_SYSCON=y
CONFIG_LEDS_MLXCPLD=m
CONFIG_LEDS_MLXREG=m
CONFIG_LEDS_USER=m
CONFIG_LEDS_NIC78BX=m
-CONFIG_LEDS_SPI_BYTE=m
CONFIG_LEDS_TI_LMU_COMMON=m
-CONFIG_LEDS_LM3697=m
CONFIG_LEDS_LM36274=m
CONFIG_LEDS_TPS6105X=m
CONFIG_LEDS_SGM3140=m
@@ -8293,25 +7569,20 @@ CONFIG_RTC_DRV_88PM80X=m
CONFIG_RTC_DRV_ABB5ZES3=m
CONFIG_RTC_DRV_ABEOZ9=m
CONFIG_RTC_DRV_ABX80X=m
-CONFIG_RTC_DRV_AS3722=m
CONFIG_RTC_DRV_DS1307=m
CONFIG_RTC_DRV_DS1307_CENTURY=y
CONFIG_RTC_DRV_DS1374=m
CONFIG_RTC_DRV_DS1374_WDT=y
CONFIG_RTC_DRV_DS1672=m
-CONFIG_RTC_DRV_HYM8563=m
CONFIG_RTC_DRV_LP8788=m
CONFIG_RTC_DRV_MAX6900=m
CONFIG_RTC_DRV_MAX8907=m
CONFIG_RTC_DRV_MAX8925=m
CONFIG_RTC_DRV_MAX8998=m
CONFIG_RTC_DRV_MAX8997=m
-CONFIG_RTC_DRV_MAX77686=m
-CONFIG_RTC_DRV_RK808=m
CONFIG_RTC_DRV_RS5C372=m
CONFIG_RTC_DRV_ISL1208=m
CONFIG_RTC_DRV_ISL12022=m
-CONFIG_RTC_DRV_ISL12026=m
CONFIG_RTC_DRV_X1205=m
CONFIG_RTC_DRV_PCF8523=m
CONFIG_RTC_DRV_PCF85063=m
@@ -8320,15 +7591,12 @@ CONFIG_RTC_DRV_PCF8563=m
CONFIG_RTC_DRV_PCF8583=m
CONFIG_RTC_DRV_M41T80=m
CONFIG_RTC_DRV_M41T80_WDT=y
-CONFIG_RTC_DRV_BD70528=m
CONFIG_RTC_DRV_BQ32K=m
-CONFIG_RTC_DRV_TWL4030=m
CONFIG_RTC_DRV_PALMAS=m
CONFIG_RTC_DRV_TPS6586X=m
CONFIG_RTC_DRV_TPS65910=m
CONFIG_RTC_DRV_TPS80031=m
CONFIG_RTC_DRV_RC5T583=m
-CONFIG_RTC_DRV_RC5T619=m
CONFIG_RTC_DRV_S35390A=m
CONFIG_RTC_DRV_FM3130=m
CONFIG_RTC_DRV_RX8010=m
@@ -8336,6 +7604,7 @@ CONFIG_RTC_DRV_RX8581=m
CONFIG_RTC_DRV_RX8025=m
CONFIG_RTC_DRV_EM3027=m
CONFIG_RTC_DRV_RV3028=m
+CONFIG_RTC_DRV_RV3032=m
CONFIG_RTC_DRV_RV8803=m
CONFIG_RTC_DRV_S5M=m
CONFIG_RTC_DRV_SD3078=m
@@ -8399,19 +7668,15 @@ CONFIG_RTC_DRV_WM831X=m
CONFIG_RTC_DRV_WM8350=m
CONFIG_RTC_DRV_PCF50633=m
CONFIG_RTC_DRV_AB3100=m
-CONFIG_RTC_DRV_ZYNQMP=m
CONFIG_RTC_DRV_CROS_EC=m
#
# on-CPU RTC drivers
#
-CONFIG_RTC_DRV_CADENCE=m
CONFIG_RTC_DRV_FTRTC010=m
CONFIG_RTC_DRV_PCAP=m
CONFIG_RTC_DRV_MC13XXX=m
CONFIG_RTC_DRV_MT6397=m
-CONFIG_RTC_DRV_R7301=m
-CONFIG_RTC_DRV_CPCAP=m
#
# HID Sensor RTC drivers
@@ -8427,20 +7692,16 @@ CONFIG_DMADEVICES=y
CONFIG_DMA_ENGINE=y
CONFIG_DMA_VIRTUAL_CHANNELS=y
CONFIG_DMA_ACPI=y
-CONFIG_DMA_OF=y
CONFIG_ALTERA_MSGDMA=m
-CONFIG_DW_AXI_DMAC=m
-CONFIG_FSL_EDMA=m
CONFIG_INTEL_IDMA64=m
CONFIG_INTEL_IDXD=m
CONFIG_INTEL_IOATDMA=m
-CONFIG_INTEL_MIC_X100_DMA=m
CONFIG_PLX_DMA=m
CONFIG_XILINX_ZYNQMP_DPDMA=m
CONFIG_QCOM_HIDMA_MGMT=m
CONFIG_QCOM_HIDMA=m
CONFIG_DW_DMAC_CORE=y
-CONFIG_DW_DMAC=y
+CONFIG_DW_DMAC=m
CONFIG_DW_DMAC_PCI=y
CONFIG_DW_EDMA=m
CONFIG_DW_EDMA_PCIE=m
@@ -8464,6 +7725,7 @@ CONFIG_UDMABUF=y
# CONFIG_DMABUF_SELFTESTS is not set
CONFIG_DMABUF_HEAPS=y
CONFIG_DMABUF_HEAPS_SYSTEM=y
+CONFIG_DMABUF_HEAPS_CMA=y
# end of DMABUF options
CONFIG_DCA=m
@@ -8475,7 +7737,6 @@ CONFIG_KS0108_DELAY=2
CONFIG_CFAG12864B=m
CONFIG_CFAG12864B_RATE=20
CONFIG_IMG_ASCII_LCD=m
-CONFIG_HT16K33=m
CONFIG_PARPORT_PANEL=m
CONFIG_PANEL_PARPORT=0
CONFIG_PANEL_PROFILE=5
@@ -8510,6 +7771,7 @@ CONFIG_VFIO_MDEV_DEVICE=m
CONFIG_IRQ_BYPASS_MANAGER=m
CONFIG_VIRT_DRIVERS=y
CONFIG_VBOXGUEST=m
+CONFIG_NITRO_ENCLAVES=m
CONFIG_VIRTIO=y
CONFIG_VIRTIO_MENU=y
CONFIG_VIRTIO_PCI=m
@@ -8521,6 +7783,7 @@ CONFIG_VIRTIO_MEM=m
CONFIG_VIRTIO_INPUT=m
CONFIG_VIRTIO_MMIO=m
CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+CONFIG_VIRTIO_DMA_SHARED_BUFFER=m
CONFIG_VDPA=m
CONFIG_VDPA_SIM=m
CONFIG_IFCVF=m
@@ -8583,97 +7846,7 @@ CONFIG_XEN_UNPOPULATED_ALLOC=y
# CONFIG_GREYBUS is not set
CONFIG_STAGING=y
CONFIG_PRISM2_USB=m
-CONFIG_COMEDI=m
-# CONFIG_COMEDI_DEBUG is not set
-CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB=2048
-CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB=20480
-CONFIG_COMEDI_MISC_DRIVERS=y
-CONFIG_COMEDI_BOND=m
-CONFIG_COMEDI_TEST=m
-CONFIG_COMEDI_PARPORT=m
-# CONFIG_COMEDI_ISA_DRIVERS is not set
-CONFIG_COMEDI_PCI_DRIVERS=m
-CONFIG_COMEDI_8255_PCI=m
-CONFIG_COMEDI_ADDI_WATCHDOG=m
-CONFIG_COMEDI_ADDI_APCI_1032=m
-CONFIG_COMEDI_ADDI_APCI_1500=m
-CONFIG_COMEDI_ADDI_APCI_1516=m
-CONFIG_COMEDI_ADDI_APCI_1564=m
-CONFIG_COMEDI_ADDI_APCI_16XX=m
-CONFIG_COMEDI_ADDI_APCI_2032=m
-CONFIG_COMEDI_ADDI_APCI_2200=m
-CONFIG_COMEDI_ADDI_APCI_3120=m
-CONFIG_COMEDI_ADDI_APCI_3501=m
-CONFIG_COMEDI_ADDI_APCI_3XXX=m
-CONFIG_COMEDI_ADL_PCI6208=m
-CONFIG_COMEDI_ADL_PCI7X3X=m
-CONFIG_COMEDI_ADL_PCI8164=m
-CONFIG_COMEDI_ADL_PCI9111=m
-CONFIG_COMEDI_ADL_PCI9118=m
-CONFIG_COMEDI_ADV_PCI1710=m
-CONFIG_COMEDI_ADV_PCI1720=m
-CONFIG_COMEDI_ADV_PCI1723=m
-CONFIG_COMEDI_ADV_PCI1724=m
-CONFIG_COMEDI_ADV_PCI1760=m
-CONFIG_COMEDI_ADV_PCI_DIO=m
-CONFIG_COMEDI_AMPLC_DIO200_PCI=m
-CONFIG_COMEDI_AMPLC_PC236_PCI=m
-CONFIG_COMEDI_AMPLC_PC263_PCI=m
-CONFIG_COMEDI_AMPLC_PCI224=m
-CONFIG_COMEDI_AMPLC_PCI230=m
-CONFIG_COMEDI_CONTEC_PCI_DIO=m
-CONFIG_COMEDI_DAS08_PCI=m
-CONFIG_COMEDI_DT3000=m
-CONFIG_COMEDI_DYNA_PCI10XX=m
-CONFIG_COMEDI_GSC_HPDI=m
-CONFIG_COMEDI_MF6X4=m
-CONFIG_COMEDI_ICP_MULTI=m
-CONFIG_COMEDI_DAQBOARD2000=m
-CONFIG_COMEDI_JR3_PCI=m
-CONFIG_COMEDI_KE_COUNTER=m
-CONFIG_COMEDI_CB_PCIDAS64=m
-CONFIG_COMEDI_CB_PCIDAS=m
-CONFIG_COMEDI_CB_PCIDDA=m
-CONFIG_COMEDI_CB_PCIMDAS=m
-CONFIG_COMEDI_CB_PCIMDDA=m
-CONFIG_COMEDI_ME4000=m
-CONFIG_COMEDI_ME_DAQ=m
-CONFIG_COMEDI_NI_6527=m
-CONFIG_COMEDI_NI_65XX=m
-CONFIG_COMEDI_NI_660X=m
-CONFIG_COMEDI_NI_670X=m
-CONFIG_COMEDI_NI_LABPC_PCI=m
-CONFIG_COMEDI_NI_PCIDIO=m
-CONFIG_COMEDI_NI_PCIMIO=m
-CONFIG_COMEDI_RTD520=m
-CONFIG_COMEDI_S626=m
-CONFIG_COMEDI_MITE=m
-CONFIG_COMEDI_NI_TIOCMD=m
-CONFIG_COMEDI_PCMCIA_DRIVERS=m
-CONFIG_COMEDI_CB_DAS16_CS=m
-CONFIG_COMEDI_DAS08_CS=m
-CONFIG_COMEDI_NI_DAQ_700_CS=m
-CONFIG_COMEDI_NI_DAQ_DIO24_CS=m
-CONFIG_COMEDI_NI_LABPC_CS=m
-CONFIG_COMEDI_NI_MIO_CS=m
-CONFIG_COMEDI_QUATECH_DAQP_CS=m
-CONFIG_COMEDI_USB_DRIVERS=m
-CONFIG_COMEDI_DT9812=m
-CONFIG_COMEDI_NI_USB6501=m
-CONFIG_COMEDI_USBDUX=m
-CONFIG_COMEDI_USBDUXFAST=m
-CONFIG_COMEDI_USBDUXSIGMA=m
-CONFIG_COMEDI_VMK80XX=m
-CONFIG_COMEDI_8254=m
-CONFIG_COMEDI_8255=m
-CONFIG_COMEDI_8255_SA=m
-CONFIG_COMEDI_KCOMEDILIB=m
-CONFIG_COMEDI_AMPLC_DIO200=m
-CONFIG_COMEDI_AMPLC_PC236=m
-CONFIG_COMEDI_DAS08=m
-CONFIG_COMEDI_NI_LABPC=m
-CONFIG_COMEDI_NI_TIO=m
-CONFIG_COMEDI_NI_ROUTING=m
+# CONFIG_COMEDI is not set
CONFIG_RTL8192U=m
CONFIG_RTLLIB=m
CONFIG_RTLLIB_CRYPTO_CCMP=m
@@ -8754,16 +7927,16 @@ CONFIG_STAGING_MEDIA=y
CONFIG_INTEL_ATOMISP=y
CONFIG_VIDEO_ATOMISP=m
CONFIG_VIDEO_ATOMISP_ISP2401=y
-CONFIG_VIDEO_ATOMISP_OV5693=m
CONFIG_VIDEO_ATOMISP_OV2722=m
CONFIG_VIDEO_ATOMISP_GC2235=m
CONFIG_VIDEO_ATOMISP_MSRLIST_HELPER=m
CONFIG_VIDEO_ATOMISP_MT9M114=m
CONFIG_VIDEO_ATOMISP_GC0310=m
CONFIG_VIDEO_ATOMISP_OV2680=m
+CONFIG_VIDEO_ATOMISP_OV5693=m
CONFIG_VIDEO_ATOMISP_LM3554=m
+# CONFIG_VIDEO_ZORAN is not set
CONFIG_VIDEO_IPU3_IMGU=m
-CONFIG_VIDEO_USBVISION=m
#
# Android
@@ -8772,7 +7945,6 @@ CONFIG_ASHMEM=m
# CONFIG_ION is not set
# end of Android
-CONFIG_STAGING_BOARD=y
CONFIG_LTE_GDM724X=m
CONFIG_FIREWIRE_SERIAL=m
CONFIG_FWTTY_MAX_TOTAL_PORTS=64
@@ -8784,11 +7956,9 @@ CONFIG_UNISYS_VISORINPUT=m
CONFIG_UNISYS_VISORHBA=m
# CONFIG_FB_TFT is not set
CONFIG_MOST_COMPONENTS=m
-CONFIG_MOST_CDEV=m
CONFIG_MOST_NET=m
CONFIG_MOST_SOUND=m
CONFIG_MOST_VIDEO=m
-CONFIG_MOST_DIM2=m
CONFIG_MOST_I2C=m
CONFIG_KS7010=m
CONFIG_PI433=m
@@ -8800,11 +7970,7 @@ CONFIG_STAGING_GASKET_FRAMEWORK=m
CONFIG_STAGING_APEX_DRIVER=m
# end of Gasket devices
-CONFIG_XIL_AXIS_FIFO=m
CONFIG_FIELDBUS_DEV=m
-CONFIG_HMS_ANYBUSS_BUS=m
-CONFIG_ARCX_ANYBUS_CONTROLLER=m
-CONFIG_HMS_PROFINET=m
CONFIG_KPC2000=y
CONFIG_KPC2000_CORE=m
CONFIG_KPC2000_SPI=m
@@ -8918,14 +8084,12 @@ CONFIG_INTEL_SCU_PLATFORM=m
CONFIG_INTEL_SCU_IPC_UTIL=m
CONFIG_INTEL_TELEMETRY=m
CONFIG_PMC_ATOM=y
-CONFIG_MFD_CROS_EC=m
CONFIG_CHROME_PLATFORMS=y
CONFIG_CHROMEOS_LAPTOP=m
CONFIG_CHROMEOS_PSTORE=m
CONFIG_CHROMEOS_TBMC=m
CONFIG_CROS_EC=m
CONFIG_CROS_EC_I2C=m
-CONFIG_CROS_EC_RPMSG=m
CONFIG_CROS_EC_ISHTP=m
CONFIG_CROS_EC_SPI=m
CONFIG_CROS_EC_LPC=m
@@ -8933,7 +8097,6 @@ CONFIG_CROS_EC_PROTO=y
CONFIG_CROS_KBD_LED_BACKLIGHT=m
CONFIG_CROS_EC_CHARDEV=m
CONFIG_CROS_EC_LIGHTBAR=m
-CONFIG_CROS_EC_VBC=m
CONFIG_CROS_EC_DEBUGFS=m
CONFIG_CROS_EC_SENSORHUB=m
CONFIG_CROS_EC_SYSFS=m
@@ -8952,46 +8115,29 @@ CONFIG_CLKDEV_LOOKUP=y
CONFIG_HAVE_CLK_PREPARE=y
CONFIG_COMMON_CLK=y
CONFIG_COMMON_CLK_WM831X=m
-CONFIG_CLK_HSDK=y
-CONFIG_COMMON_CLK_MAX77686=m
CONFIG_COMMON_CLK_MAX9485=m
-CONFIG_COMMON_CLK_RK808=m
CONFIG_COMMON_CLK_SI5341=m
CONFIG_COMMON_CLK_SI5351=m
-CONFIG_COMMON_CLK_SI514=m
CONFIG_COMMON_CLK_SI544=m
-CONFIG_COMMON_CLK_SI570=m
CONFIG_COMMON_CLK_CDCE706=m
-CONFIG_COMMON_CLK_CDCE925=m
CONFIG_COMMON_CLK_CS2000_CP=m
CONFIG_COMMON_CLK_S2MPS11=m
CONFIG_CLK_TWL6040=m
-CONFIG_COMMON_CLK_LOCHNAGAR=m
CONFIG_COMMON_CLK_PALMAS=m
CONFIG_COMMON_CLK_PWM=m
-CONFIG_COMMON_CLK_VC5=m
-CONFIG_COMMON_CLK_BD718XX=m
-CONFIG_COMMON_CLK_FIXED_MMIO=y
-CONFIG_CLK_LGM_CGU=y
CONFIG_HWSPINLOCK=y
#
# Clock Source drivers
#
-CONFIG_TIMER_OF=y
-CONFIG_TIMER_PROBE=y
CONFIG_CLKEVT_I8253=y
CONFIG_I8253_LOCK=y
CONFIG_CLKBLD_I8253=y
-CONFIG_CLKSRC_MMIO=y
-CONFIG_MICROCHIP_PIT64B=y
# end of Clock Source drivers
CONFIG_MAILBOX=y
-CONFIG_PLATFORM_MHU=m
CONFIG_PCC=y
CONFIG_ALTERA_MBOX=m
-CONFIG_MAILBOX_TEST=m
CONFIG_IOMMU_IOVA=y
CONFIG_IOASID=y
CONFIG_IOMMU_API=y
@@ -9004,7 +8150,6 @@ CONFIG_IOMMU_SUPPORT=y
# CONFIG_IOMMU_DEBUGFS is not set
# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set
-CONFIG_OF_IOMMU=y
CONFIG_IOMMU_DMA=y
CONFIG_AMD_IOMMU=y
CONFIG_AMD_IOMMU_V2=y
@@ -9013,7 +8158,7 @@ CONFIG_INTEL_IOMMU=y
CONFIG_INTEL_IOMMU_SVM=y
# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set
CONFIG_INTEL_IOMMU_FLOPPY_WA=y
-# CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON is not set
+CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON=y
CONFIG_IRQ_REMAP=y
CONFIG_HYPERV_IOMMU=y
@@ -9042,6 +8187,7 @@ CONFIG_SOUNDWIRE=m
CONFIG_SOUNDWIRE_CADENCE=m
CONFIG_SOUNDWIRE_INTEL=m
CONFIG_SOUNDWIRE_QCOM=m
+CONFIG_SOUNDWIRE_GENERIC_ALLOCATION=m
#
# SOC (System On Chip) specific Drivers
@@ -9075,6 +8221,7 @@ CONFIG_SOUNDWIRE_QCOM=m
#
# Qualcomm SoC drivers
#
+CONFIG_QCOM_QMI_HELPERS=m
# end of Qualcomm SoC drivers
CONFIG_SOC_TI=y
@@ -9158,7 +8305,6 @@ CONFIG_BMC150_ACCEL_I2C=m
CONFIG_BMC150_ACCEL_SPI=m
CONFIG_DA280=m
CONFIG_DA311=m
-CONFIG_DMARD06=m
CONFIG_DMARD09=m
CONFIG_DMARD10=m
CONFIG_HID_SENSOR_ACCEL_3D=m
@@ -9215,10 +8361,8 @@ CONFIG_ADI_AXI_ADC=m
CONFIG_AXP20X_ADC=m
CONFIG_AXP288_ADC=m
CONFIG_CC10001_ADC=m
-CONFIG_CPCAP_ADC=m
CONFIG_DA9150_GPADC=m
CONFIG_DLN2_ADC=m
-CONFIG_ENVELOPE_DETECTOR=m
CONFIG_HI8435=m
CONFIG_HX711=m
CONFIG_INA2XX_ADC=m
@@ -9241,13 +8385,6 @@ CONFIG_MEN_Z188_ADC=m
CONFIG_MP2629_ADC=m
CONFIG_NAU7802=m
CONFIG_PALMAS_GPADC=m
-CONFIG_QCOM_VADC_COMMON=m
-CONFIG_QCOM_SPMI_IADC=m
-CONFIG_QCOM_SPMI_VADC=m
-CONFIG_QCOM_SPMI_ADC5=m
-CONFIG_RN5T618_ADC=m
-CONFIG_SD_ADC_MODULATOR=m
-CONFIG_STMPE_ADC=m
CONFIG_TI_ADC081C=m
CONFIG_TI_ADC0832=m
CONFIG_TI_ADC084S021=m
@@ -9257,14 +8394,10 @@ CONFIG_TI_ADC128S052=m
CONFIG_TI_ADC161S626=m
CONFIG_TI_ADS1015=m
CONFIG_TI_ADS7950=m
-CONFIG_TI_ADS8344=m
-CONFIG_TI_ADS8688=m
-CONFIG_TI_ADS124S08=m
CONFIG_TI_AM335X_ADC=m
CONFIG_TI_TLC4541=m
CONFIG_TWL4030_MADC=m
CONFIG_TWL6030_GPADC=m
-CONFIG_VF610_ADC=m
CONFIG_VIPERBOARD_ADC=m
CONFIG_XILINX_XADC=m
# end of Analog to digital converters
@@ -9272,7 +8405,6 @@ CONFIG_XILINX_XADC=m
#
# Analog Front Ends
#
-CONFIG_IIO_RESCALE=m
# end of Analog Front Ends
#
@@ -9350,20 +8482,17 @@ CONFIG_AD5770R=m
CONFIG_AD5791=m
CONFIG_AD7303=m
CONFIG_AD8801=m
-CONFIG_DPOT_DAC=m
CONFIG_DS4424=m
CONFIG_LTC1660=m
CONFIG_LTC2632=m
CONFIG_M62332=m
CONFIG_MAX517=m
-CONFIG_MAX5821=m
CONFIG_MCP4725=m
CONFIG_MCP4922=m
CONFIG_TI_DAC082S085=m
CONFIG_TI_DAC5571=m
CONFIG_TI_DAC7311=m
CONFIG_TI_DAC7612=m
-CONFIG_VF610_DAC=m
# end of Digital to analog converters
#
@@ -9397,6 +8526,7 @@ CONFIG_ADIS16080=m
CONFIG_ADIS16130=m
CONFIG_ADIS16136=m
CONFIG_ADIS16260=m
+CONFIG_ADXRS290=m
CONFIG_ADXRS450=m
CONFIG_BMG160=m
CONFIG_BMG160_I2C=m
@@ -9433,6 +8563,7 @@ CONFIG_MAX30102=m
CONFIG_AM2315=m
CONFIG_DHT11=m
CONFIG_HDC100X=m
+CONFIG_HDC2010=m
CONFIG_HID_SENSOR_HUMIDITY=m
CONFIG_HTS221=m
CONFIG_HTS221_I2C=m
@@ -9465,7 +8596,6 @@ CONFIG_INV_MPU6050_SPI=m
CONFIG_IIO_ST_LSM6DSX=m
CONFIG_IIO_ST_LSM6DSX_I2C=m
CONFIG_IIO_ST_LSM6DSX_SPI=m
-CONFIG_IIO_ST_LSM6DSX_I3C=m
# end of Inertial measurement units
CONFIG_IIO_ADIS_LIB=m
@@ -9481,12 +8611,12 @@ CONFIG_AL3010=m
CONFIG_AL3320A=m
CONFIG_APDS9300=m
CONFIG_APDS9960=m
+CONFIG_AS73211=m
CONFIG_BH1750=m
CONFIG_BH1780=m
CONFIG_CM32181=m
CONFIG_CM3232=m
CONFIG_CM3323=m
-CONFIG_CM3605=m
CONFIG_CM36651=m
CONFIG_IIO_CROS_EC_LIGHT_PROX=m
CONFIG_GP2AP002=m
@@ -9531,7 +8661,6 @@ CONFIG_ZOPT2201=m
#
# Magnetometer sensors
#
-CONFIG_AK8974=m
CONFIG_AK8975=m
CONFIG_AK09911=m
CONFIG_BMC150_MAGN=m
@@ -9554,7 +8683,6 @@ CONFIG_SENSORS_RM3100_SPI=m
#
# Multiplexers
#
-CONFIG_IIO_MUX=m
# end of Multiplexers
#
@@ -9687,46 +8815,25 @@ CONFIG_NTB_SWITCHTEC=m
# CONFIG_NTB_PERF is not set
# CONFIG_NTB_MSI_TEST is not set
CONFIG_NTB_TRANSPORT=m
-CONFIG_VME_BUS=y
-
-#
-# VME Bridge Drivers
-#
-CONFIG_VME_CA91CX42=m
-CONFIG_VME_TSI148=m
-# CONFIG_VME_FAKE is not set
-
-#
-# VME Board Drivers
-#
-CONFIG_VMIVME_7805=m
-
-#
-# VME Device Drivers
-#
-CONFIG_VME_USER=m
+# CONFIG_VME_BUS is not set
CONFIG_PWM=y
CONFIG_PWM_SYSFS=y
# CONFIG_PWM_DEBUG is not set
-CONFIG_PWM_ATMEL_HLCDC_PWM=m
CONFIG_PWM_CRC=y
CONFIG_PWM_CROS_EC=m
-CONFIG_PWM_FSL_FTM=m
CONFIG_PWM_IQS620A=m
CONFIG_PWM_LP3943=m
CONFIG_PWM_LPSS=m
CONFIG_PWM_LPSS_PCI=m
CONFIG_PWM_LPSS_PLATFORM=m
CONFIG_PWM_PCA9685=m
-CONFIG_PWM_STMPE=y
+CONFIG_PWM_SL28CPLD=m
CONFIG_PWM_TWL=m
CONFIG_PWM_TWL_LED=m
#
# IRQ chip support
#
-CONFIG_IRQCHIP=y
-CONFIG_AL_FIC=y
CONFIG_MADERA_IRQ=m
# end of IRQ chip support
@@ -9735,32 +8842,21 @@ CONFIG_BOARD_TPCI200=m
CONFIG_SERIAL_IPOCTAL=m
CONFIG_RESET_CONTROLLER=y
CONFIG_RESET_BRCMSTB_RESCAL=y
-CONFIG_RESET_INTEL_GW=y
CONFIG_RESET_TI_SYSCON=m
#
# PHY Subsystem
#
CONFIG_GENERIC_PHY=y
-CONFIG_GENERIC_PHY_MIPI_DPHY=y
+CONFIG_USB_LGM_PHY=m
CONFIG_BCM_KONA_USB2_PHY=m
-CONFIG_PHY_CADENCE_TORRENT=m
-CONFIG_PHY_CADENCE_DPHY=m
-CONFIG_PHY_CADENCE_SIERRA=m
-CONFIG_PHY_CADENCE_SALVO=m
-CONFIG_PHY_FSL_IMX8MQ_USB=m
-CONFIG_PHY_MIXEL_MIPI_DPHY=m
CONFIG_PHY_PXA_28NM_HSIC=m
CONFIG_PHY_PXA_28NM_USB2=m
CONFIG_PHY_CPCAP_USB=m
-CONFIG_PHY_MAPPHONE_MDM6600=m
-CONFIG_PHY_OCELOT_SERDES=m
CONFIG_PHY_QCOM_USB_HS=m
CONFIG_PHY_QCOM_USB_HSIC=m
-CONFIG_PHY_SAMSUNG_USB2=m
CONFIG_PHY_TUSB1210=m
-CONFIG_PHY_INTEL_COMBO=y
-CONFIG_PHY_INTEL_EMMC=m
+CONFIG_PHY_INTEL_LGM_EMMC=m
# end of PHY Subsystem
CONFIG_POWERCAP=y
@@ -9780,6 +8876,7 @@ CONFIG_RAS=y
CONFIG_RAS_CEC=y
# CONFIG_RAS_CEC_DEBUG is not set
CONFIG_USB4=m
+# CONFIG_USB4_DEBUGFS_WRITE is not set
#
# Android
@@ -9800,17 +8897,16 @@ CONFIG_BTT=y
CONFIG_ND_PFN=m
CONFIG_NVDIMM_PFN=y
CONFIG_NVDIMM_DAX=y
-CONFIG_OF_PMEM=m
CONFIG_DAX_DRIVER=y
CONFIG_DAX=y
CONFIG_DEV_DAX=m
CONFIG_DEV_DAX_PMEM=m
CONFIG_DEV_DAX_HMEM=m
+CONFIG_DEV_DAX_HMEM_DEVICES=y
CONFIG_DEV_DAX_KMEM=m
CONFIG_DEV_DAX_PMEM_COMPAT=m
CONFIG_NVMEM=y
CONFIG_NVMEM_SYSFS=y
-CONFIG_NVMEM_SPMI_SDAM=m
CONFIG_RAVE_SP_EEPROM=m
#
@@ -9835,17 +8931,14 @@ CONFIG_INTEL_TH_PTI=m
CONFIG_FPGA=m
CONFIG_ALTERA_PR_IP_CORE=m
-CONFIG_ALTERA_PR_IP_CORE_PLAT=m
CONFIG_FPGA_MGR_ALTERA_PS_SPI=m
CONFIG_FPGA_MGR_ALTERA_CVP=m
CONFIG_FPGA_MGR_XILINX_SPI=m
-CONFIG_FPGA_MGR_ICE40_SPI=m
CONFIG_FPGA_MGR_MACHXO2_SPI=m
CONFIG_FPGA_BRIDGE=m
CONFIG_ALTERA_FREEZE_BRIDGE=m
CONFIG_XILINX_PR_DECOUPLER=m
CONFIG_FPGA_REGION=m
-CONFIG_OF_FPGA_REGION=m
CONFIG_FPGA_DFL=m
CONFIG_FPGA_DFL_FME=m
CONFIG_FPGA_DFL_FME_MGR=m
@@ -9853,14 +8946,6 @@ CONFIG_FPGA_DFL_FME_BRIDGE=m
CONFIG_FPGA_DFL_FME_REGION=m
CONFIG_FPGA_DFL_AFU=m
CONFIG_FPGA_DFL_PCI=m
-CONFIG_FSI=m
-CONFIG_FSI_NEW_DEV_NODE=y
-CONFIG_FSI_MASTER_GPIO=m
-CONFIG_FSI_MASTER_HUB=m
-CONFIG_FSI_MASTER_ASPEED=m
-CONFIG_FSI_SCOM=m
-CONFIG_FSI_SBEFIFO=m
-CONFIG_FSI_OCC=m
CONFIG_TEE=m
#
@@ -9877,7 +8962,6 @@ CONFIG_MULTIPLEXER=m
CONFIG_MUX_ADG792A=m
CONFIG_MUX_ADGS1408=m
CONFIG_MUX_GPIO=m
-CONFIG_MUX_MMIO=m
# end of Multiplexer drivers
CONFIG_PM_OPP=y
@@ -9888,10 +8972,9 @@ CONFIG_SLIMBUS=m
CONFIG_SLIM_QCOM_CTRL=m
CONFIG_INTERCONNECT=y
CONFIG_COUNTER=m
-CONFIG_FTM_QUADDEC=m
-CONFIG_MICROCHIP_TCB_CAPTURE=m
CONFIG_MOST=m
CONFIG_MOST_USB_HDM=m
+CONFIG_MOST_CDEV=m
# end of Device Drivers
#
@@ -9922,6 +9005,7 @@ CONFIG_JFS_SECURITY=y
# CONFIG_JFS_DEBUG is not set
CONFIG_JFS_STATISTICS=y
CONFIG_XFS_FS=m
+CONFIG_XFS_SUPPORT_V4=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y
@@ -9985,12 +9069,12 @@ CONFIG_QUOTA_TREE=m
CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m
CONFIG_QUOTACTL=y
-CONFIG_QUOTACTL_COMPAT=y
CONFIG_AUTOFS4_FS=y
CONFIG_AUTOFS_FS=y
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_VIRTIO_FS=m
+CONFIG_FUSE_DAX=y
CONFIG_OVERLAY_FS=m
CONFIG_OVERLAY_FS_REDIRECT_DIR=y
# CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set
@@ -10003,9 +9087,9 @@ CONFIG_OVERLAY_FS_METACOPY=y
#
CONFIG_FSCACHE=m
CONFIG_FSCACHE_STATS=y
-CONFIG_FSCACHE_HISTOGRAM=y
+# CONFIG_FSCACHE_HISTOGRAM is not set
# CONFIG_FSCACHE_DEBUG is not set
-# CONFIG_FSCACHE_OBJECT_LIST is not set
+CONFIG_FSCACHE_OBJECT_LIST=y
CONFIG_CACHEFILES=m
# CONFIG_CACHEFILES_DEBUG is not set
# CONFIG_CACHEFILES_HISTOGRAM is not set
@@ -10027,13 +9111,11 @@ CONFIG_FAT_FS=m
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_FAT_DEFAULT_CODEPAGE=437
-CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
CONFIG_FAT_DEFAULT_UTF8=y
CONFIG_EXFAT_FS=m
CONFIG_EXFAT_DEFAULT_IOCHARSET="utf8"
-CONFIG_NTFS_FS=m
-# CONFIG_NTFS_DEBUG is not set
-CONFIG_NTFS_RW=y
+# CONFIG_NTFS_FS is not set
# end of DOS/FAT/EXFAT/NT Filesystems
#
@@ -10101,8 +9183,8 @@ CONFIG_SQUASHFS=m
# CONFIG_SQUASHFS_FILE_CACHE is not set
CONFIG_SQUASHFS_FILE_DIRECT=y
# CONFIG_SQUASHFS_DECOMP_SINGLE is not set
-CONFIG_SQUASHFS_DECOMP_MULTI=y
-# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set
+# CONFIG_SQUASHFS_DECOMP_MULTI is not set
+CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
CONFIG_SQUASHFS_XATTR=y
CONFIG_SQUASHFS_ZLIB=y
CONFIG_SQUASHFS_LZ4=y
@@ -10140,7 +9222,7 @@ CONFIG_PSTORE_COMPRESS_DEFAULT="zstd"
# CONFIG_PSTORE_CONSOLE is not set
# CONFIG_PSTORE_PMSG is not set
# CONFIG_PSTORE_FTRACE is not set
-CONFIG_PSTORE_RAM=y
+CONFIG_PSTORE_RAM=m
CONFIG_PSTORE_ZONE=m
CONFIG_PSTORE_BLK=m
CONFIG_PSTORE_BLK_BLKDEV=""
@@ -10178,6 +9260,7 @@ CONFIG_NFS_FSCACHE=y
CONFIG_NFS_USE_KERNEL_DNS=y
CONFIG_NFS_DEBUG=y
# CONFIG_NFS_DISABLE_UDP_SUPPORT is not set
+# CONFIG_NFS_V4_2_READ_PLUS is not set
CONFIG_NFSD=m
CONFIG_NFSD_V2_ACL=y
CONFIG_NFSD_V3=y
@@ -10230,7 +9313,7 @@ CONFIG_9P_FS_POSIX_ACL=y
CONFIG_9P_FS_SECURITY=y
CONFIG_NLS=y
CONFIG_NLS_DEFAULT="utf8"
-CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=m
CONFIG_NLS_CODEPAGE_775=m
CONFIG_NLS_CODEPAGE_850=m
@@ -10253,7 +9336,7 @@ CONFIG_NLS_CODEPAGE_874=m
CONFIG_NLS_ISO8859_8=m
CONFIG_NLS_CODEPAGE_1250=m
CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=m
+CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_ISO8859_2=m
CONFIG_NLS_ISO8859_3=m
@@ -10280,7 +9363,7 @@ CONFIG_NLS_MAC_ROMANIAN=m
CONFIG_NLS_MAC_TURKISH=m
CONFIG_NLS_UTF8=m
CONFIG_DLM=m
-# CONFIG_DLM_DEBUG is not set
+CONFIG_DLM_DEBUG=y
CONFIG_UNICODE=y
# CONFIG_UNICODE_NORMALIZATION_SELFTEST is not set
CONFIG_IO_WQ=y
@@ -10309,7 +9392,6 @@ CONFIG_LSM_MMAP_MIN_ADDR=65536
CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_HARDENED_USERCOPY_FALLBACK=y
-# CONFIG_HARDENED_USERCOPY_PAGESPAN is not set
CONFIG_FORTIFY_SOURCE=y
# CONFIG_STATIC_USERMODEHELPER is not set
CONFIG_SECURITY_SELINUX=y
@@ -10385,7 +9467,7 @@ CONFIG_CRYPTO=y
#
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
-CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD=m
CONFIG_CRYPTO_AEAD2=y
CONFIG_CRYPTO_SKCIPHER=y
CONFIG_CRYPTO_SKCIPHER2=y
@@ -10393,7 +9475,7 @@ CONFIG_CRYPTO_HASH=y
CONFIG_CRYPTO_HASH2=y
CONFIG_CRYPTO_RNG=y
CONFIG_CRYPTO_RNG2=y
-CONFIG_CRYPTO_RNG_DEFAULT=y
+CONFIG_CRYPTO_RNG_DEFAULT=m
CONFIG_CRYPTO_AKCIPHER2=y
CONFIG_CRYPTO_AKCIPHER=y
CONFIG_CRYPTO_KPP2=y
@@ -10403,8 +9485,8 @@ CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_MANAGER2=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
-CONFIG_CRYPTO_GF128MUL=y
-CONFIG_CRYPTO_NULL=y
+CONFIG_CRYPTO_GF128MUL=m
+CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_NULL2=y
CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
@@ -10422,6 +9504,7 @@ CONFIG_CRYPTO_DH=y
CONFIG_CRYPTO_ECC=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
CONFIG_CRYPTO_CURVE25519_X86=m
@@ -10429,11 +9512,11 @@ CONFIG_CRYPTO_CURVE25519_X86=m
# Authenticated Encryption with Associated Data
#
CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
CONFIG_CRYPTO_AEGIS128_AESNI_SSE2=m
-CONFIG_CRYPTO_SEQIV=y
+CONFIG_CRYPTO_SEQIV=m
CONFIG_CRYPTO_ECHAINIV=m
#
@@ -10476,7 +9559,7 @@ CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_BLAKE2S_X86=m
CONFIG_CRYPTO_CRCT10DIF=y
CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m
-CONFIG_CRYPTO_GHASH=y
+CONFIG_CRYPTO_GHASH=m
CONFIG_CRYPTO_POLY1305=m
CONFIG_CRYPTO_POLY1305_X86_64=m
CONFIG_CRYPTO_MD4=m
@@ -10505,8 +9588,6 @@ CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_AES_NI_INTEL=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_BLOWFISH_COMMON=m
CONFIG_CRYPTO_BLOWFISH_X86_64=m
@@ -10522,17 +9603,14 @@ CONFIG_CRYPTO_CAST6_AVX_X86_64=m
CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_DES3_EDE_X86_64=m
CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_CHACHA20=m
CONFIG_CRYPTO_CHACHA20_X86_64=m
-CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m
CONFIG_CRYPTO_SERPENT_AVX_X86_64=m
CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_TWOFISH_COMMON=m
CONFIG_CRYPTO_TWOFISH_X86_64=m
@@ -10553,18 +9631,20 @@ CONFIG_CRYPTO_ZSTD=y
# Random Number Generation
#
CONFIG_CRYPTO_ANSI_CPRNG=m
-CONFIG_CRYPTO_DRBG_MENU=y
+CONFIG_CRYPTO_DRBG_MENU=m
CONFIG_CRYPTO_DRBG_HMAC=y
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DRBG_CTR=y
-CONFIG_CRYPTO_DRBG=y
+CONFIG_CRYPTO_DRBG=m
CONFIG_CRYPTO_JITTERENTROPY=y
CONFIG_CRYPTO_USER_API=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
+# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set
CONFIG_CRYPTO_USER_API_AEAD=m
-# CONFIG_CRYPTO_STATS is not set
+# CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE is not set
+CONFIG_CRYPTO_STATS=y
CONFIG_CRYPTO_HASH_INFO=y
#
@@ -10611,11 +9691,8 @@ CONFIG_CRYPTO_DEV_QAT_C62XVF=m
CONFIG_CRYPTO_DEV_NITROX=m
CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m
CONFIG_CRYPTO_DEV_CHELSIO=m
-CONFIG_CHELSIO_IPSEC_INLINE=y
-CONFIG_CHELSIO_TLS_DEVICE=y
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_CRYPTO_DEV_SAFEXCEL=m
-CONFIG_CRYPTO_DEV_CCREE=m
CONFIG_CRYPTO_DEV_AMLOGIC_GXL=m
CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG=y
CONFIG_ASYMMETRIC_KEY_TYPE=y
@@ -10653,7 +9730,7 @@ CONFIG_RAID6_PQ=m
# CONFIG_RAID6_FORCE_ALGO is not set
# end of RAID 6
-CONFIG_RAID6_PQ_BENCHMARK=y
+# CONFIG_RAID6_PQ_BENCHMARK is not set
CONFIG_LINEAR_RANGES=y
CONFIG_PACKING=y
CONFIG_BITREVERSE=y
@@ -10714,11 +9791,9 @@ CONFIG_DECOMPRESS_LZO=y
CONFIG_DECOMPRESS_LZ4=y
CONFIG_DECOMPRESS_ZSTD=y
CONFIG_GENERIC_ALLOCATOR=y
-CONFIG_REED_SOLOMON=y
+CONFIG_REED_SOLOMON=m
CONFIG_REED_SOLOMON_ENC8=y
CONFIG_REED_SOLOMON_DEC8=y
-CONFIG_REED_SOLOMON_DEC16=y
-CONFIG_BCH=m
CONFIG_TEXTSEARCH=y
CONFIG_TEXTSEARCH_KMP=m
CONFIG_TEXTSEARCH_BM=m
@@ -10734,14 +9809,24 @@ CONFIG_DMA_OPS=y
CONFIG_NEED_SG_DMA_LENGTH=y
CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_ARCH_DMA_ADDR_T_64BIT=y
-CONFIG_DMA_DECLARE_COHERENT=y
CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y
CONFIG_DMA_VIRT_OPS=y
CONFIG_SWIOTLB=y
CONFIG_DMA_COHERENT_POOL=y
+CONFIG_DMA_CMA=y
+# CONFIG_DMA_PERNUMA_CMA is not set
+
+#
+# Default contiguous memory area size:
+#
+CONFIG_CMA_SIZE_MBYTES=0
+CONFIG_CMA_SIZE_SEL_MBYTES=y
+# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set
+# CONFIG_CMA_SIZE_SEL_MIN is not set
+# CONFIG_CMA_SIZE_SEL_MAX is not set
+CONFIG_CMA_ALIGNMENT=8
# CONFIG_DMA_API_DEBUG is not set
CONFIG_SGL_ALLOC=y
-CONFIG_IOMMU_HELPER=y
CONFIG_CHECK_SIGNATURE=y
CONFIG_CPU_RMAP=y
CONFIG_DQL=y
@@ -10753,7 +9838,6 @@ CONFIG_CLZ_TAB=y
CONFIG_IRQ_POLL=y
CONFIG_MPILIB=y
CONFIG_DIMLIB=y
-CONFIG_LIBFDT=y
CONFIG_OID_REGISTRY=y
CONFIG_UCS2_STRING=y
CONFIG_HAVE_GENERIC_VDSO=y
@@ -10773,6 +9857,7 @@ CONFIG_FONT_8x16=y
# CONFIG_FONT_SUN8x16 is not set
# CONFIG_FONT_SUN12x22 is not set
CONFIG_FONT_TER16x32=y
+# CONFIG_FONT_6x8 is not set
CONFIG_SG_POOL=y
CONFIG_ARCH_HAS_PMEM_API=y
CONFIG_MEMREGION=y
@@ -10799,7 +9884,7 @@ CONFIG_PRINTK_TIME=y
CONFIG_CONSOLE_LOGLEVEL_DEFAULT=4
CONFIG_CONSOLE_LOGLEVEL_QUIET=1
CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4
-# CONFIG_BOOT_PRINTK_DELAY is not set
+CONFIG_BOOT_PRINTK_DELAY=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DYNAMIC_DEBUG_CORE=y
CONFIG_SYMBOLIC_ERRNAME=y
@@ -10816,14 +9901,13 @@ CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_DEBUG_INFO_BTF=y
# CONFIG_GDB_SCRIPTS is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=2048
CONFIG_STRIP_ASM_SYMS=y
# CONFIG_READABLE_ASM is not set
# CONFIG_HEADERS_INSTALL is not set
# CONFIG_DEBUG_SECTION_MISMATCH is not set
CONFIG_SECTION_MISMATCH_WARN_ONLY=y
-# CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_32B is not set
CONFIG_STACK_VALIDATION=y
# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# end of Compile-time checks and compiler options
@@ -10847,7 +9931,7 @@ CONFIG_HAVE_ARCH_KCSAN=y
# end of Generic Kernel Debugging Instruments
CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_MISC=y
+# CONFIG_DEBUG_MISC is not set
#
# Memory Debugging
@@ -10859,7 +9943,7 @@ CONFIG_PAGE_POISONING=y
CONFIG_PAGE_POISONING_NO_SANITY=y
CONFIG_PAGE_POISONING_ZERO=y
# CONFIG_DEBUG_PAGE_REF is not set
-# CONFIG_DEBUG_RODATA_TEST is not set
+CONFIG_DEBUG_RODATA_TEST=y
CONFIG_ARCH_HAS_DEBUG_WX=y
CONFIG_DEBUG_WX=y
CONFIG_GENERIC_PTDUMP=y
@@ -10871,9 +9955,12 @@ CONFIG_PTDUMP_CORE=y
CONFIG_HAVE_DEBUG_KMEMLEAK=y
# CONFIG_DEBUG_KMEMLEAK is not set
# CONFIG_DEBUG_STACK_USAGE is not set
-CONFIG_SCHED_STACK_END_CHECK=y
+# CONFIG_SCHED_STACK_END_CHECK is not set
CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y
-# CONFIG_DEBUG_VM is not set
+CONFIG_DEBUG_VM=y
+# CONFIG_DEBUG_VM_VMACACHE is not set
+# CONFIG_DEBUG_VM_RB is not set
+# CONFIG_DEBUG_VM_PGFLAGS is not set
# CONFIG_DEBUG_VM_PGTABLE is not set
CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y
# CONFIG_DEBUG_VIRTUAL is not set
@@ -10886,7 +9973,7 @@ CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y
# CONFIG_KASAN is not set
# end of Memory Debugging
-# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DEBUG_SHIRQ=y
#
# Debug Oops, Lockups and Hangs
@@ -10920,7 +10007,7 @@ CONFIG_SCHEDSTATS=y
# end of Scheduler Debugging
# CONFIG_DEBUG_TIMEKEEPING is not set
-CONFIG_DEBUG_PREEMPT=y
+# CONFIG_DEBUG_PREEMPT is not set
#
# Lock Debugging (spinlocks, mutexes, etc...)
@@ -10938,6 +10025,8 @@ CONFIG_LOCK_DEBUGGING_SUPPORT=y
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_LOCK_TORTURE_TEST is not set
# CONFIG_WW_MUTEX_SELFTEST is not set
+# CONFIG_SCF_TORTURE_TEST is not set
+# CONFIG_CSD_LOCK_WAIT_DEBUG is not set
# end of Lock Debugging (spinlocks, mutexes, etc...)
CONFIG_STACKTRACE=y
@@ -10959,12 +10048,13 @@ CONFIG_STACKTRACE=y
#
# RCU Debugging
#
-# CONFIG_RCU_PERF_TEST is not set
+# CONFIG_RCU_SCALE_TEST is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_REF_SCALE_TEST is not set
CONFIG_RCU_CPU_STALL_TIMEOUT=60
# CONFIG_RCU_TRACE is not set
# CONFIG_RCU_EQS_DEBUG is not set
+# CONFIG_RCU_STRICT_GRACE_PERIOD is not set
# end of RCU Debugging
# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set
@@ -10992,7 +10082,7 @@ CONFIG_TRACING=y
CONFIG_GENERIC_TRACER=y
CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
-# CONFIG_BOOTTIME_TRACING is not set
+CONFIG_BOOTTIME_TRACING=y
CONFIG_FUNCTION_TRACER=y
CONFIG_FUNCTION_GRAPH_TRACER=y
CONFIG_DYNAMIC_FTRACE=y
@@ -11044,15 +10134,15 @@ CONFIG_IO_STRICT_DEVMEM=y
#
CONFIG_TRACE_IRQFLAGS_SUPPORT=y
CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y
+CONFIG_EARLY_PRINTK_USB=y
# CONFIG_X86_VERBOSE_BOOTUP is not set
CONFIG_EARLY_PRINTK=y
-# CONFIG_EARLY_PRINTK_DBGP is not set
-# CONFIG_EARLY_PRINTK_USB_XDBC is not set
+CONFIG_EARLY_PRINTK_DBGP=y
+CONFIG_EARLY_PRINTK_USB_XDBC=y
# CONFIG_EFI_PGT_DUMP is not set
# CONFIG_DEBUG_TLBFLUSH is not set
-# CONFIG_IOMMU_DEBUG is not set
CONFIG_HAVE_MMIOTRACE_SUPPORT=y
-# CONFIG_X86_DECODER_SELFTEST is not set
+CONFIG_X86_DECODER_SELFTEST=y
CONFIG_IO_DELAY_0X80=y
# CONFIG_IO_DELAY_0XED is not set
# CONFIG_IO_DELAY_UDELAY is not set
@@ -11065,7 +10155,6 @@ CONFIG_DEBUG_BOOT_PARAMS=y
# CONFIG_PUNIT_ATOM_DEBUG is not set
CONFIG_UNWINDER_ORC=y
# CONFIG_UNWINDER_FRAME_POINTER is not set
-# CONFIG_UNWINDER_GUESS is not set
# end of x86 Debugging
#
@@ -11079,7 +10168,7 @@ CONFIG_ARCH_HAS_KCOV=y
CONFIG_CC_HAS_SANCOV_TRACE_PC=y
# CONFIG_KCOV is not set
CONFIG_RUNTIME_TESTING_MENU=y
-CONFIG_LKDTM=m
+# CONFIG_LKDTM is not set
# CONFIG_TEST_LIST_SORT is not set
# CONFIG_TEST_MIN_HEAP is not set
# CONFIG_TEST_SORT is not set
@@ -11089,15 +10178,14 @@ CONFIG_LKDTM=m
# CONFIG_REED_SOLOMON_TEST is not set
# CONFIG_INTERVAL_TREE_TEST is not set
# CONFIG_PERCPU_TEST is not set
-# CONFIG_ATOMIC64_SELFTEST is not set
-# CONFIG_ASYNC_RAID6_TEST is not set
+CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_ASYNC_RAID6_TEST=m
# CONFIG_TEST_HEXDUMP is not set
# CONFIG_TEST_STRING_HELPERS is not set
# CONFIG_TEST_STRSCPY is not set
-# CONFIG_TEST_KSTRTOX is not set
+CONFIG_TEST_KSTRTOX=y
# CONFIG_TEST_PRINTF is not set
# CONFIG_TEST_BITMAP is not set
-# CONFIG_TEST_BITFIELD is not set
# CONFIG_TEST_UUID is not set
# CONFIG_TEST_XARRAY is not set
# CONFIG_TEST_OVERFLOW is not set
diff --git a/uksm-5.10.patch b/uksm-5.10.patch
new file mode 100644
index 00000000000..eacd7b693e9
--- /dev/null
+++ b/uksm-5.10.patch
@@ -0,0 +1,6935 @@
+diff --git a/Documentation/vm/uksm.txt b/Documentation/vm/uksm.txt
+new file mode 100644
+index 000000000000..be19a3127001
+--- /dev/null
++++ b/Documentation/vm/uksm.txt
+@@ -0,0 +1,61 @@
++The Ultra Kernel Samepage Merging feature
++----------------------------------------------
++/*
++ * Ultra KSM. Copyright (C) 2011-2012 Nai Xia
++ *
++ * This is an improvement upon KSM. Some basic data structures and routines
++ * are borrowed from ksm.c .
++ *
++ * Its new features:
++ * 1. Full system scan:
++ * It automatically scans all user processes' anonymous VMAs. Kernel-user
++ * interaction to submit a memory area to KSM is no longer needed.
++ *
++ * 2. Rich area detection:
++ * It automatically detects rich areas containing abundant duplicated
++ * pages based. Rich areas are given a full scan speed. Poor areas are
++ * sampled at a reasonable speed with very low CPU consumption.
++ *
++ * 3. Ultra Per-page scan speed improvement:
++ * A new hash algorithm is proposed. As a result, on a machine with
++ * Core(TM)2 Quad Q9300 CPU in 32-bit mode and 800MHZ DDR2 main memory, it
++ * can scan memory areas that does not contain duplicated pages at speed of
++ * 627MB/sec ~ 2445MB/sec and can merge duplicated areas at speed of
++ * 477MB/sec ~ 923MB/sec.
++ *
++ * 4. Thrashing area avoidance:
++ * Thrashing area(an VMA that has frequent Ksm page break-out) can be
++ * filtered out. My benchmark shows it's more efficient than KSM's per-page
++ * hash value based volatile page detection.
++ *
++ *
++ * 5. Misc changes upon KSM:
++ * * It has a fully x86-opitmized memcmp dedicated for 4-byte-aligned page
++ * comparison. It's much faster than default C version on x86.
++ * * rmap_item now has an struct *page member to loosely cache a
++ * address-->page mapping, which reduces too much time-costly
++ * follow_page().
++ * * The VMA creation/exit procedures are hooked to let the Ultra KSM know.
++ * * try_to_merge_two_pages() now can revert a pte if it fails. No break_
++ * ksm is needed for this case.
++ *
++ * 6. Full Zero Page consideration(contributed by Figo Zhang)
++ * Now uksmd consider full zero pages as special pages and merge them to an
++ * special unswappable uksm zero page.
++ */
++
++ChangeLog:
++
++2012-05-05 The creation of this Doc
++2012-05-08 UKSM 0.1.1.1 libc crash bug fix, api clean up, doc clean up.
++2012-05-28 UKSM 0.1.1.2 bug fix release
++2012-06-26 UKSM 0.1.2-beta1 first beta release for 0.1.2
++2012-07-2 UKSM 0.1.2-beta2
++2012-07-10 UKSM 0.1.2-beta3
++2012-07-26 UKSM 0.1.2 Fine grained speed control, more scan optimization.
++2012-10-13 UKSM 0.1.2.1 Bug fixes.
++2012-12-31 UKSM 0.1.2.2 Minor bug fixes.
++2014-07-02 UKSM 0.1.2.3 Fix a " __this_cpu_read() in preemptible bug".
++2015-04-22 UKSM 0.1.2.4 Fix a race condition that can sometimes trigger anonying warnings.
++2016-09-10 UKSM 0.1.2.5 Fix a bug in dedup ratio calculation.
++2017-02-26 UKSM 0.1.2.6 Fix a bug in hugetlbpage handling and a race bug with page migration.
+diff --git a/fs/exec.c b/fs/exec.c
+index 547a2390baf5..fc64a20db6bd 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -64,6 +64,7 @@
+ #include <linux/compat.h>
+ #include <linux/vmalloc.h>
+ #include <linux/io_uring.h>
++#include <linux/ksm.h>
+
+ #include <linux/uaccess.h>
+ #include <asm/mmu_context.h>
+diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
+index 887a5532e449..581a6762868e 100644
+--- a/fs/proc/meminfo.c
++++ b/fs/proc/meminfo.c
+@@ -108,7 +108,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
+ #endif
+ show_val_kb(m, "PageTables: ",
+ global_zone_page_state(NR_PAGETABLE));
+-
++#ifdef CONFIG_UKSM
++ show_val_kb(m, "KsmZeroPages: ",
++ global_zone_page_state(NR_UKSM_ZERO_PAGES));
++#endif
+ show_val_kb(m, "NFS_Unstable: ", 0);
+ show_val_kb(m, "Bounce: ",
+ global_zone_page_state(NR_BOUNCE));
+diff --git a/include/linux/ksm.h b/include/linux/ksm.h
+index 161e8164abcf..f0dbdf3c986a 100644
+--- a/include/linux/ksm.h
++++ b/include/linux/ksm.h
+@@ -21,20 +21,16 @@ struct mem_cgroup;
+ #ifdef CONFIG_KSM
+ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, int advice, unsigned long *vm_flags);
+-int __ksm_enter(struct mm_struct *mm);
+-void __ksm_exit(struct mm_struct *mm);
+
+-static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
++static inline struct stable_node *page_stable_node(struct page *page)
+ {
+- if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
+- return __ksm_enter(mm);
+- return 0;
++ return PageKsm(page) ? page_rmapping(page) : NULL;
+ }
+
+-static inline void ksm_exit(struct mm_struct *mm)
++static inline void set_page_stable_node(struct page *page,
++ struct stable_node *stable_node)
+ {
+- if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
+- __ksm_exit(mm);
++ page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
+ }
+
+ /*
+@@ -54,6 +50,33 @@ struct page *ksm_might_need_to_copy(struct page *page,
+ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
+ void ksm_migrate_page(struct page *newpage, struct page *oldpage);
+
++#ifdef CONFIG_KSM_LEGACY
++int __ksm_enter(struct mm_struct *mm);
++void __ksm_exit(struct mm_struct *mm);
++static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
++{
++ if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
++ return __ksm_enter(mm);
++ return 0;
++}
++
++static inline void ksm_exit(struct mm_struct *mm)
++{
++ if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
++ __ksm_exit(mm);
++}
++
++#elif defined(CONFIG_UKSM)
++static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
++{
++ return 0;
++}
++
++static inline void ksm_exit(struct mm_struct *mm)
++{
++}
++#endif /* !CONFIG_UKSM */
++
+ #else /* !CONFIG_KSM */
+
+ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+@@ -89,4 +112,6 @@ static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
+ #endif /* CONFIG_MMU */
+ #endif /* !CONFIG_KSM */
+
++#include <linux/uksm.h>
++
+ #endif /* __LINUX_KSM_H */
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 5a9238f6caad..5dd1ccf5cb69 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -371,6 +371,9 @@ struct vm_area_struct {
+ struct mempolicy *vm_policy; /* NUMA policy for the VMA */
+ #endif
+ struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
++#ifdef CONFIG_UKSM
++ struct vma_slot *uksm_vma_slot;
++#endif
+ } __randomize_layout;
+
+ struct core_thread {
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index fb3bf696c05e..e4477c3a9a4b 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -159,6 +159,9 @@ enum zone_stat_item {
+ NR_ZSPAGES, /* allocated in zsmalloc */
+ #endif
+ NR_FREE_CMA_PAGES,
++#ifdef CONFIG_UKSM
++ NR_UKSM_ZERO_PAGES,
++#endif
+ NR_VM_ZONE_STAT_ITEMS };
+
+ enum node_stat_item {
+diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
+index e237004d498d..092cdfb7090b 100644
+--- a/include/linux/pgtable.h
++++ b/include/linux/pgtable.h
+@@ -1060,12 +1060,25 @@ extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
+ extern void untrack_pfn_moved(struct vm_area_struct *vma);
+ #endif
+
++#ifdef CONFIG_UKSM
++static inline int is_uksm_zero_pfn(unsigned long pfn)
++{
++ extern unsigned long uksm_zero_pfn;
++ return pfn == uksm_zero_pfn;
++}
++#else
++static inline int is_uksm_zero_pfn(unsigned long pfn)
++{
++ return 0;
++}
++#endif
++
+ #ifdef __HAVE_COLOR_ZERO_PAGE
+ static inline int is_zero_pfn(unsigned long pfn)
+ {
+ extern unsigned long zero_pfn;
+ unsigned long offset_from_zero_pfn = pfn - zero_pfn;
+- return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
++ return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT) || is_uksm_zero_pfn(pfn);
+ }
+
+ #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
+@@ -1074,7 +1087,7 @@ static inline int is_zero_pfn(unsigned long pfn)
+ static inline int is_zero_pfn(unsigned long pfn)
+ {
+ extern unsigned long zero_pfn;
+- return pfn == zero_pfn;
++ return (pfn == zero_pfn) || (is_uksm_zero_pfn(pfn));
+ }
+
+ static inline unsigned long my_zero_pfn(unsigned long addr)
+diff --git a/include/linux/sradix-tree.h b/include/linux/sradix-tree.h
+new file mode 100644
+index 000000000000..d71edba6b63f
+--- /dev/null
++++ b/include/linux/sradix-tree.h
+@@ -0,0 +1,77 @@
++#ifndef _LINUX_SRADIX_TREE_H
++#define _LINUX_SRADIX_TREE_H
++
++
++#define INIT_SRADIX_TREE(root, mask) \
++do { \
++ (root)->height = 0; \
++ (root)->gfp_mask = (mask); \
++ (root)->rnode = NULL; \
++} while (0)
++
++#define ULONG_BITS (sizeof(unsigned long) * 8)
++#define SRADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
++//#define SRADIX_TREE_MAP_SHIFT 6
++//#define SRADIX_TREE_MAP_SIZE (1UL << SRADIX_TREE_MAP_SHIFT)
++//#define SRADIX_TREE_MAP_MASK (SRADIX_TREE_MAP_SIZE-1)
++
++struct sradix_tree_node {
++ unsigned int height; /* Height from the bottom */
++ unsigned int count;
++ unsigned int fulls; /* Number of full sublevel trees */
++ struct sradix_tree_node *parent;
++ void *stores[0];
++};
++
++/* A simple radix tree implementation */
++struct sradix_tree_root {
++ unsigned int height;
++ struct sradix_tree_node *rnode;
++
++ /* Where found to have available empty stores in its sublevels */
++ struct sradix_tree_node *enter_node;
++ unsigned int shift;
++ unsigned int stores_size;
++ unsigned int mask;
++ unsigned long min; /* The first hole index */
++ unsigned long num;
++ //unsigned long *height_to_maxindex;
++
++ /* How the node is allocated and freed. */
++ struct sradix_tree_node *(*alloc)(void);
++ void (*free)(struct sradix_tree_node *node);
++
++ /* When a new node is added and removed */
++ void (*extend)(struct sradix_tree_node *parent, struct sradix_tree_node *child);
++ void (*assign)(struct sradix_tree_node *node, unsigned int index, void *item);
++ void (*rm)(struct sradix_tree_node *node, unsigned int offset);
++};
++
++struct sradix_tree_path {
++ struct sradix_tree_node *node;
++ int offset;
++};
++
++static inline
++void init_sradix_tree_root(struct sradix_tree_root *root, unsigned long shift)
++{
++ root->height = 0;
++ root->rnode = NULL;
++ root->shift = shift;
++ root->stores_size = 1UL << shift;
++ root->mask = root->stores_size - 1;
++}
++
++
++extern void *sradix_tree_next(struct sradix_tree_root *root,
++ struct sradix_tree_node *node, unsigned long index,
++ int (*iter)(void *, unsigned long));
++
++extern int sradix_tree_enter(struct sradix_tree_root *root, void **item, int num);
++
++extern void sradix_tree_delete_from_leaf(struct sradix_tree_root *root,
++ struct sradix_tree_node *node, unsigned long index);
++
++extern void *sradix_tree_lookup(struct sradix_tree_root *root, unsigned long index);
++
++#endif /* _LINUX_SRADIX_TREE_H */
+diff --git a/include/linux/uksm.h b/include/linux/uksm.h
+new file mode 100644
+index 000000000000..bb8651f534f2
+--- /dev/null
++++ b/include/linux/uksm.h
+@@ -0,0 +1,149 @@
++#ifndef __LINUX_UKSM_H
++#define __LINUX_UKSM_H
++/*
++ * Memory merging support.
++ *
++ * This code enables dynamic sharing of identical pages found in different
++ * memory areas, even if they are not shared by fork().
++ */
++
++/* if !CONFIG_UKSM this file should not be compiled at all. */
++#ifdef CONFIG_UKSM
++
++#include <linux/bitops.h>
++#include <linux/mm.h>
++#include <linux/pagemap.h>
++#include <linux/rmap.h>
++#include <linux/sched.h>
++
++extern unsigned long zero_pfn __read_mostly;
++extern unsigned long uksm_zero_pfn __read_mostly;
++extern struct page *empty_uksm_zero_page;
++
++/* must be done before linked to mm */
++extern void uksm_vma_add_new(struct vm_area_struct *vma);
++extern void uksm_remove_vma(struct vm_area_struct *vma);
++
++#define UKSM_SLOT_NEED_SORT (1 << 0)
++#define UKSM_SLOT_NEED_RERAND (1 << 1)
++#define UKSM_SLOT_SCANNED (1 << 2) /* It's scanned in this round */
++#define UKSM_SLOT_FUL_SCANNED (1 << 3)
++#define UKSM_SLOT_IN_UKSM (1 << 4)
++
++struct vma_slot {
++ struct sradix_tree_node *snode;
++ unsigned long sindex;
++
++ struct list_head slot_list;
++ unsigned long fully_scanned_round;
++ unsigned long dedup_num;
++ unsigned long pages_scanned;
++ unsigned long this_sampled;
++ unsigned long last_scanned;
++ unsigned long pages_to_scan;
++ struct scan_rung *rung;
++ struct page **rmap_list_pool;
++ unsigned int *pool_counts;
++ unsigned long pool_size;
++ struct vm_area_struct *vma;
++ struct mm_struct *mm;
++ unsigned long ctime_j;
++ unsigned long pages;
++ unsigned long flags;
++ unsigned long pages_cowed; /* pages cowed this round */
++ unsigned long pages_merged; /* pages merged this round */
++ unsigned long pages_bemerged;
++
++ /* when it has page merged in this eval round */
++ struct list_head dedup_list;
++};
++
++static inline void uksm_unmap_zero_page(pte_t pte)
++{
++ if (pte_pfn(pte) == uksm_zero_pfn)
++ __dec_zone_page_state(empty_uksm_zero_page, NR_UKSM_ZERO_PAGES);
++}
++
++static inline void uksm_map_zero_page(pte_t pte)
++{
++ if (pte_pfn(pte) == uksm_zero_pfn)
++ __inc_zone_page_state(empty_uksm_zero_page, NR_UKSM_ZERO_PAGES);
++}
++
++static inline void uksm_cow_page(struct vm_area_struct *vma, struct page *page)
++{
++ if (vma->uksm_vma_slot && PageKsm(page))
++ vma->uksm_vma_slot->pages_cowed++;
++}
++
++static inline void uksm_cow_pte(struct vm_area_struct *vma, pte_t pte)
++{
++ if (vma->uksm_vma_slot && pte_pfn(pte) == uksm_zero_pfn)
++ vma->uksm_vma_slot->pages_cowed++;
++}
++
++static inline int uksm_flags_can_scan(unsigned long vm_flags)
++{
++#ifdef VM_SAO
++ if (vm_flags & VM_SAO)
++ return 0;
++#endif
++
++ return !(vm_flags & (VM_PFNMAP | VM_IO | VM_DONTEXPAND |
++ VM_HUGETLB | VM_MIXEDMAP | VM_SHARED
++ | VM_MAYSHARE | VM_GROWSUP | VM_GROWSDOWN));
++}
++
++static inline void uksm_vm_flags_mod(unsigned long *vm_flags_p)
++{
++ if (uksm_flags_can_scan(*vm_flags_p))
++ *vm_flags_p |= VM_MERGEABLE;
++}
++
++/*
++ * Just a wrapper for BUG_ON for where ksm_zeropage must not be. TODO: it will
++ * be removed when uksm zero page patch is stable enough.
++ */
++static inline void uksm_bugon_zeropage(pte_t pte)
++{
++ BUG_ON(pte_pfn(pte) == uksm_zero_pfn);
++}
++#else
++static inline void uksm_vma_add_new(struct vm_area_struct *vma)
++{
++}
++
++static inline void uksm_remove_vma(struct vm_area_struct *vma)
++{
++}
++
++static inline void uksm_unmap_zero_page(pte_t pte)
++{
++}
++
++static inline void uksm_map_zero_page(pte_t pte)
++{
++}
++
++static inline void uksm_cow_page(struct vm_area_struct *vma, struct page *page)
++{
++}
++
++static inline void uksm_cow_pte(struct vm_area_struct *vma, pte_t pte)
++{
++}
++
++static inline int uksm_flags_can_scan(unsigned long vm_flags)
++{
++ return 0;
++}
++
++static inline void uksm_vm_flags_mod(unsigned long *vm_flags_p)
++{
++}
++
++static inline void uksm_bugon_zeropage(pte_t pte)
++{
++}
++#endif /* !CONFIG_UKSM */
++#endif /* __LINUX_UKSM_H */
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 6d266388d380..bb52f8731dd6 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -587,7 +587,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
+ __vma_link_rb(mm, tmp, rb_link, rb_parent);
+ rb_link = &tmp->vm_rb.rb_right;
+ rb_parent = &tmp->vm_rb;
+-
++ uksm_vma_add_new(tmp);
+ mm->map_count++;
+ if (!(tmp->vm_flags & VM_WIPEONFORK))
+ retval = copy_page_range(tmp, mpnt);
+diff --git a/lib/Makefile b/lib/Makefile
+index d415fc7067c5..e4045ebec8cd 100644
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -31,7 +31,7 @@ endif
+ KCSAN_SANITIZE_random32.o := n
+
+ lib-y := ctype.o string.o vsprintf.o cmdline.o \
+- rbtree.o radix-tree.o timerqueue.o xarray.o \
++ rbtree.o radix-tree.o sradix-tree.o timerqueue.o xarray.o \
+ idr.o extable.o sha1.o irq_regs.o argv_split.o \
+ flex_proportions.o ratelimit.o show_mem.o \
+ is_single_threaded.o plist.o decompress.o kobject_uevent.o \
+diff --git a/lib/sradix-tree.c b/lib/sradix-tree.c
+new file mode 100644
+index 000000000000..ab21e6309b93
+--- /dev/null
++++ b/lib/sradix-tree.c
+@@ -0,0 +1,476 @@
++#include <linux/errno.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/spinlock.h>
++#include <linux/slab.h>
++#include <linux/gcd.h>
++#include <linux/sradix-tree.h>
++
++static inline int sradix_node_full(struct sradix_tree_root *root, struct sradix_tree_node *node)
++{
++ return node->fulls == root->stores_size ||
++ (node->height == 1 && node->count == root->stores_size);
++}
++
++/*
++ * Extend a sradix tree so it can store key @index.
++ */
++static int sradix_tree_extend(struct sradix_tree_root *root, unsigned long index)
++{
++ struct sradix_tree_node *node;
++ unsigned int height;
++
++ if (unlikely(root->rnode == NULL)) {
++ if (!(node = root->alloc()))
++ return -ENOMEM;
++
++ node->height = 1;
++ root->rnode = node;
++ root->height = 1;
++ }
++
++ /* Figure out what the height should be. */
++ height = root->height;
++ index >>= root->shift * height;
++
++ while (index) {
++ index >>= root->shift;
++ height++;
++ }
++
++ while (height > root->height) {
++ unsigned int newheight;
++
++ if (!(node = root->alloc()))
++ return -ENOMEM;
++
++ /* Increase the height. */
++ node->stores[0] = root->rnode;
++ root->rnode->parent = node;
++ if (root->extend)
++ root->extend(node, root->rnode);
++
++ newheight = root->height + 1;
++ node->height = newheight;
++ node->count = 1;
++ if (sradix_node_full(root, root->rnode))
++ node->fulls = 1;
++
++ root->rnode = node;
++ root->height = newheight;
++ }
++
++ return 0;
++}
++
++/*
++ * Search the next item from the current node, that is not NULL
++ * and can satify root->iter().
++ */
++void *sradix_tree_next(struct sradix_tree_root *root,
++ struct sradix_tree_node *node, unsigned long index,
++ int (*iter)(void *item, unsigned long height))
++{
++ unsigned long offset;
++ void *item;
++
++ if (unlikely(node == NULL)) {
++ node = root->rnode;
++ for (offset = 0; offset < root->stores_size; offset++) {
++ item = node->stores[offset];
++ if (item && (!iter || iter(item, node->height)))
++ break;
++ }
++
++ if (unlikely(offset >= root->stores_size))
++ return NULL;
++
++ if (node->height == 1)
++ return item;
++ else
++ goto go_down;
++ }
++
++ while (node) {
++ offset = (index & root->mask) + 1;
++ for (; offset < root->stores_size; offset++) {
++ item = node->stores[offset];
++ if (item && (!iter || iter(item, node->height)))
++ break;
++ }
++
++ if (offset < root->stores_size)
++ break;
++
++ node = node->parent;
++ index >>= root->shift;
++ }
++
++ if (!node)
++ return NULL;
++
++ while (node->height > 1) {
++go_down:
++ node = item;
++ for (offset = 0; offset < root->stores_size; offset++) {
++ item = node->stores[offset];
++ if (item && (!iter || iter(item, node->height)))
++ break;
++ }
++
++ if (unlikely(offset >= root->stores_size))
++ return NULL;
++ }
++
++ BUG_ON(offset > root->stores_size);
++
++ return item;
++}
++
++/*
++ * Blindly insert the item to the tree. Typically, we reuse the
++ * first empty store item.
++ */
++int sradix_tree_enter(struct sradix_tree_root *root, void **item, int num)
++{
++ unsigned long index;
++ unsigned int height;
++ struct sradix_tree_node *node, *tmp = NULL;
++ int offset, offset_saved;
++ void **store = NULL;
++ int error, i, j, shift;
++
++go_on:
++ index = root->min;
++
++ if (root->enter_node && !sradix_node_full(root, root->enter_node)) {
++ node = root->enter_node;
++ BUG_ON((index >> (root->shift * root->height)));
++ } else {
++ node = root->rnode;
++ if (node == NULL || (index >> (root->shift * root->height))
++ || sradix_node_full(root, node)) {
++ error = sradix_tree_extend(root, index);
++ if (error)
++ return error;
++
++ node = root->rnode;
++ }
++ }
++
++
++ height = node->height;
++ shift = (height - 1) * root->shift;
++ offset = (index >> shift) & root->mask;
++ while (shift > 0) {
++ offset_saved = offset;
++ for (; offset < root->stores_size; offset++) {
++ store = &node->stores[offset];
++ tmp = *store;
++
++ if (!tmp || !sradix_node_full(root, tmp))
++ break;
++ }
++ BUG_ON(offset >= root->stores_size);
++
++ if (offset != offset_saved) {
++ index += (offset - offset_saved) << shift;
++ index &= ~((1UL << shift) - 1);
++ }
++
++ if (!tmp) {
++ if (!(tmp = root->alloc()))
++ return -ENOMEM;
++
++ tmp->height = shift / root->shift;
++ *store = tmp;
++ tmp->parent = node;
++ node->count++;
++// if (root->extend)
++// root->extend(node, tmp);
++ }
++
++ node = tmp;
++ shift -= root->shift;
++ offset = (index >> shift) & root->mask;
++ }
++
++ BUG_ON(node->height != 1);
++
++
++ store = &node->stores[offset];
++ for (i = 0, j = 0;
++ j < root->stores_size - node->count &&
++ i < root->stores_size - offset && j < num; i++) {
++ if (!store[i]) {
++ store[i] = item[j];
++ if (root->assign)
++ root->assign(node, index + i, item[j]);
++ j++;
++ }
++ }
++
++ node->count += j;
++ root->num += j;
++ num -= j;
++
++ while (sradix_node_full(root, node)) {
++ node = node->parent;
++ if (!node)
++ break;
++
++ node->fulls++;
++ }
++
++ if (unlikely(!node)) {
++ /* All nodes are full */
++ root->min = 1 << (root->height * root->shift);
++ root->enter_node = NULL;
++ } else {
++ root->min = index + i - 1;
++ root->min |= (1UL << (node->height - 1)) - 1;
++ root->min++;
++ root->enter_node = node;
++ }
++
++ if (num) {
++ item += j;
++ goto go_on;
++ }
++
++ return 0;
++}
++
++
++/**
++ * sradix_tree_shrink - shrink height of a sradix tree to minimal
++ * @root sradix tree root
++ *
++ */
++static inline void sradix_tree_shrink(struct sradix_tree_root *root)
++{
++ /* try to shrink tree height */
++ while (root->height > 1) {
++ struct sradix_tree_node *to_free = root->rnode;
++
++ /*
++ * The candidate node has more than one child, or its child
++ * is not at the leftmost store, we cannot shrink.
++ */
++ if (to_free->count != 1 || !to_free->stores[0])
++ break;
++
++ root->rnode = to_free->stores[0];
++ root->rnode->parent = NULL;
++ root->height--;
++ if (unlikely(root->enter_node == to_free))
++ root->enter_node = NULL;
++ root->free(to_free);
++ }
++}
++
++/*
++ * Del the item on the known leaf node and index
++ */
++void sradix_tree_delete_from_leaf(struct sradix_tree_root *root,
++ struct sradix_tree_node *node, unsigned long index)
++{
++ unsigned int offset;
++ struct sradix_tree_node *start, *end;
++
++ BUG_ON(node->height != 1);
++
++ start = node;
++ while (node && !(--node->count))
++ node = node->parent;
++
++ end = node;
++ if (!node) {
++ root->rnode = NULL;
++ root->height = 0;
++ root->min = 0;
++ root->num = 0;
++ root->enter_node = NULL;
++ } else {
++ offset = (index >> (root->shift * (node->height - 1))) & root->mask;
++ if (root->rm)
++ root->rm(node, offset);
++ node->stores[offset] = NULL;
++ root->num--;
++ if (root->min > index) {
++ root->min = index;
++ root->enter_node = node;
++ }
++ }
++
++ if (start != end) {
++ do {
++ node = start;
++ start = start->parent;
++ if (unlikely(root->enter_node == node))
++ root->enter_node = end;
++ root->free(node);
++ } while (start != end);
++
++ /*
++ * Note that shrink may free "end", so enter_node still need to
++ * be checked inside.
++ */
++ sradix_tree_shrink(root);
++ } else if (node->count == root->stores_size - 1) {
++ /* It WAS a full leaf node. Update the ancestors */
++ node = node->parent;
++ while (node) {
++ node->fulls--;
++ if (node->fulls != root->stores_size - 1)
++ break;
++
++ node = node->parent;
++ }
++ }
++}
++
++void *sradix_tree_lookup(struct sradix_tree_root *root, unsigned long index)
++{
++ unsigned int height, offset;
++ struct sradix_tree_node *node;
++ int shift;
++
++ node = root->rnode;
++ if (node == NULL || (index >> (root->shift * root->height)))
++ return NULL;
++
++ height = root->height;
++ shift = (height - 1) * root->shift;
++
++ do {
++ offset = (index >> shift) & root->mask;
++ node = node->stores[offset];
++ if (!node)
++ return NULL;
++
++ shift -= root->shift;
++ } while (shift >= 0);
++
++ return node;
++}
++
++/*
++ * Return the item if it exists, otherwise create it in place
++ * and return the created item.
++ */
++void *sradix_tree_lookup_create(struct sradix_tree_root *root,
++ unsigned long index, void *(*item_alloc)(void))
++{
++ unsigned int height, offset;
++ struct sradix_tree_node *node, *tmp;
++ void *item;
++ int shift, error;
++
++ if (root->rnode == NULL || (index >> (root->shift * root->height))) {
++ if (item_alloc) {
++ error = sradix_tree_extend(root, index);
++ if (error)
++ return NULL;
++ } else {
++ return NULL;
++ }
++ }
++
++ node = root->rnode;
++ height = root->height;
++ shift = (height - 1) * root->shift;
++
++ do {
++ offset = (index >> shift) & root->mask;
++ if (!node->stores[offset]) {
++ if (!(tmp = root->alloc()))
++ return NULL;
++
++ tmp->height = shift / root->shift;
++ node->stores[offset] = tmp;
++ tmp->parent = node;
++ node->count++;
++ node = tmp;
++ } else {
++ node = node->stores[offset];
++ }
++
++ shift -= root->shift;
++ } while (shift > 0);
++
++ BUG_ON(node->height != 1);
++ offset = index & root->mask;
++ if (node->stores[offset]) {
++ return node->stores[offset];
++ } else if (item_alloc) {
++ if (!(item = item_alloc()))
++ return NULL;
++
++ node->stores[offset] = item;
++
++ /*
++ * NOTE: we do NOT call root->assign here, since this item is
++ * newly created by us having no meaning. Caller can call this
++ * if it's necessary to do so.
++ */
++
++ node->count++;
++ root->num++;
++
++ while (sradix_node_full(root, node)) {
++ node = node->parent;
++ if (!node)
++ break;
++
++ node->fulls++;
++ }
++
++ if (unlikely(!node)) {
++ /* All nodes are full */
++ root->min = 1 << (root->height * root->shift);
++ } else {
++ if (root->min == index) {
++ root->min |= (1UL << (node->height - 1)) - 1;
++ root->min++;
++ root->enter_node = node;
++ }
++ }
++
++ return item;
++ } else {
++ return NULL;
++ }
++
++}
++
++int sradix_tree_delete(struct sradix_tree_root *root, unsigned long index)
++{
++ unsigned int height, offset;
++ struct sradix_tree_node *node;
++ int shift;
++
++ node = root->rnode;
++ if (node == NULL || (index >> (root->shift * root->height)))
++ return -ENOENT;
++
++ height = root->height;
++ shift = (height - 1) * root->shift;
++
++ do {
++ offset = (index >> shift) & root->mask;
++ node = node->stores[offset];
++ if (!node)
++ return -ENOENT;
++
++ shift -= root->shift;
++ } while (shift > 0);
++
++ offset = index & root->mask;
++ if (!node->stores[offset])
++ return -ENOENT;
++
++ sradix_tree_delete_from_leaf(root, node, index);
++
++ return 0;
++}
+diff --git a/mm/Kconfig b/mm/Kconfig
+index 390165ffbb0f..50d02cfa0b68 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -317,6 +317,32 @@ config KSM
+ See Documentation/vm/ksm.rst for more information: KSM is inactive
+ until a program has madvised that an area is MADV_MERGEABLE, and
+ root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
++choice
++ prompt "Choose UKSM/KSM strategy"
++ default UKSM
++ depends on KSM
++ help
++ This option allows to select a UKSM/KSM stragety.
++
++config UKSM
++ bool "Ultra-KSM for page merging"
++ depends on KSM
++ help
++ UKSM is inspired by the Linux kernel project \u2014 KSM(Kernel Same
++ page Merging), but with a fundamentally rewritten core algorithm. With
++ an advanced algorithm, UKSM now can transparently scans all anonymously
++ mapped user space applications with an significantly improved scan speed
++ and CPU efficiency. Since KVM is friendly to KSM, KVM can also benefit from
++ UKSM. Now UKSM has its first stable release and first real world enterprise user.
++ For more information, please goto its project page.
++ (github.com/dolohow/uksm)
++
++config KSM_LEGACY
++ bool "Legacy KSM implementation"
++ depends on KSM
++ help
++ The legacy KSM implementation from Red Hat.
++endchoice
+
+ config DEFAULT_MMAP_MIN_ADDR
+ int "Low address space to protect from user allocation"
+diff --git a/mm/Makefile b/mm/Makefile
+index d73aed0fc99c..d6612b76c5da 100644
+--- a/mm/Makefile
++++ b/mm/Makefile
+@@ -76,7 +76,8 @@ obj-$(CONFIG_SPARSEMEM) += sparse.o
+ obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
+ obj-$(CONFIG_SLOB) += slob.o
+ obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
+-obj-$(CONFIG_KSM) += ksm.o
++obj-$(CONFIG_KSM_LEGACY) += ksm.o
++obj-$(CONFIG_UKSM) += uksm.o
+ obj-$(CONFIG_PAGE_POISONING) += page_poison.o
+ obj-$(CONFIG_SLAB) += slab.o
+ obj-$(CONFIG_SLUB) += slub.o
+diff --git a/mm/ksm.c b/mm/ksm.c
+index 0960750bb316..ae17158cb67a 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -858,17 +858,6 @@ static int unmerge_ksm_pages(struct vm_area_struct *vma,
+ return err;
+ }
+
+-static inline struct stable_node *page_stable_node(struct page *page)
+-{
+- return PageKsm(page) ? page_rmapping(page) : NULL;
+-}
+-
+-static inline void set_page_stable_node(struct page *page,
+- struct stable_node *stable_node)
+-{
+- page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
+-}
+-
+ #ifdef CONFIG_SYSFS
+ /*
+ * Only called through the sysfs control interface:
+diff --git a/mm/memory.c b/mm/memory.c
+index c48f8df6e502..db47ee177008 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -146,6 +146,25 @@ EXPORT_SYMBOL(zero_pfn);
+
+ unsigned long highest_memmap_pfn __read_mostly;
+
++#ifdef CONFIG_UKSM
++unsigned long uksm_zero_pfn __read_mostly;
++EXPORT_SYMBOL_GPL(uksm_zero_pfn);
++struct page *empty_uksm_zero_page;
++
++static int __init setup_uksm_zero_page(void)
++{
++ empty_uksm_zero_page = alloc_pages(__GFP_ZERO & ~__GFP_MOVABLE, 0);
++ if (!empty_uksm_zero_page)
++ panic("Oh boy, that early out of memory?");
++
++ SetPageReserved(empty_uksm_zero_page);
++ uksm_zero_pfn = page_to_pfn(empty_uksm_zero_page);
++
++ return 0;
++}
++core_initcall(setup_uksm_zero_page);
++#endif
++
+ /*
+ * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
+ */
+@@ -161,6 +180,7 @@ void mm_trace_rss_stat(struct mm_struct *mm, int member, long count)
+ trace_rss_stat(mm, member, count);
+ }
+
++
+ #if defined(SPLIT_RSS_COUNTING)
+
+ void sync_mm_rss(struct mm_struct *mm)
+@@ -869,6 +889,11 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
+ get_page(page);
+ page_dup_rmap(page, false);
+ rss[mm_counter(page)]++;
++
++ /* Should return NULL in vm_normal_page() */
++ uksm_bugon_zeropage(pte);
++ } else {
++ uksm_map_zero_page(pte);
+ }
+
+ /*
+@@ -1237,8 +1262,10 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
+ ptent = ptep_get_and_clear_full(mm, addr, pte,
+ tlb->fullmm);
+ tlb_remove_tlb_entry(tlb, pte, addr);
+- if (unlikely(!page))
++ if (unlikely(!page)) {
++ uksm_unmap_zero_page(ptent);
+ continue;
++ }
+
+ if (!PageAnon(page)) {
+ if (pte_dirty(ptent)) {
+@@ -2586,6 +2613,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
+
+ if (likely(src)) {
+ copy_user_highpage(dst, src, addr, vma);
++ uksm_cow_page(vma, src);
+ return true;
+ }
+
+@@ -2832,6 +2860,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
+ vmf->address);
+ if (!new_page)
+ goto oom;
++ uksm_cow_pte(vma, vmf->orig_pte);
+ } else {
+ new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
+ vmf->address);
+@@ -2874,7 +2903,9 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
+ mm_counter_file(old_page));
+ inc_mm_counter_fast(mm, MM_ANONPAGES);
+ }
++ uksm_bugon_zeropage(vmf->orig_pte);
+ } else {
++ uksm_unmap_zero_page(vmf->orig_pte);
+ inc_mm_counter_fast(mm, MM_ANONPAGES);
+ }
+ flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 5c8b4485860d..b8dd56dd900d 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -46,6 +46,7 @@
+ #include <linux/moduleparam.h>
+ #include <linux/pkeys.h>
+ #include <linux/oom.h>
++#include <linux/ksm.h>
+ #include <linux/sched/mm.h>
+
+ #include <linux/uaccess.h>
+@@ -181,6 +182,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
+ if (vma->vm_file)
+ fput(vma->vm_file);
+ mpol_put(vma_policy(vma));
++ uksm_remove_vma(vma);
+ vm_area_free(vma);
+ return next;
+ }
+@@ -750,9 +752,16 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
+ long adjust_next = 0;
+ int remove_next = 0;
+
++/*
++ * to avoid deadlock, ksm_remove_vma must be done before any spin_lock is
++ * acquired
++ */
++ uksm_remove_vma(vma);
++
+ if (next && !insert) {
+ struct vm_area_struct *exporter = NULL, *importer = NULL;
+
++ uksm_remove_vma(next);
+ if (end >= next->vm_end) {
+ /*
+ * vma expands, overlapping all the next, and
+@@ -883,6 +892,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
+ end_changed = true;
+ }
+ vma->vm_pgoff = pgoff;
++
+ if (adjust_next) {
+ next->vm_start += adjust_next;
+ next->vm_pgoff += adjust_next >> PAGE_SHIFT;
+@@ -987,6 +997,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
+ if (remove_next == 2) {
+ remove_next = 1;
+ end = next->vm_end;
++ uksm_remove_vma(next);
+ goto again;
+ }
+ else if (next)
+@@ -1013,10 +1024,14 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
+ */
+ VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
+ }
++ } else {
++ if (next && !insert)
++ uksm_vma_add_new(next);
+ }
+ if (insert && file)
+ uprobe_mmap(insert);
+
++ uksm_vma_add_new(vma);
+ validate_mm(mm);
+
+ return 0;
+@@ -1472,6 +1487,9 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
+ vm_flags = calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
+ mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
+
++ /* If uksm is enabled, we add VM_MERGEABLE to new VMAs. */
++ uksm_vm_flags_mod(&vm_flags);
++
+ if (flags & MAP_LOCKED)
+ if (!can_do_mlock())
+ return -EPERM;
+@@ -1867,6 +1885,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+ allow_write_access(file);
+ }
+ file = vma->vm_file;
++ uksm_vma_add_new(vma);
+ out:
+ perf_event_mmap(vma);
+
+@@ -1909,6 +1928,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+ if (vm_flags & VM_DENYWRITE)
+ allow_write_access(file);
+ free_vma:
++ uksm_remove_vma(vma);
+ vm_area_free(vma);
+ unacct_error:
+ if (charged)
+@@ -2768,6 +2788,8 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+ else
+ err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
+
++ uksm_vma_add_new(new);
++
+ /* Success. */
+ if (!err)
+ return 0;
+@@ -3075,6 +3097,7 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla
+ if ((flags & (~VM_EXEC)) != 0)
+ return -EINVAL;
+ flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
++ uksm_vm_flags_mod(&flags);
+
+ mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
+ if (IS_ERR_VALUE(mapped_addr))
+@@ -3120,6 +3143,7 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla
+ vma->vm_flags = flags;
+ vma->vm_page_prot = vm_get_page_prot(flags);
+ vma_link(mm, vma, prev, rb_link, rb_parent);
++ uksm_vma_add_new(vma);
+ out:
+ perf_event_mmap(vma);
+ mm->total_vm += len >> PAGE_SHIFT;
+@@ -3197,6 +3221,12 @@ void exit_mmap(struct mm_struct *mm)
+ mmap_write_unlock(mm);
+ }
+
++ /*
++ * Taking write lock on mmap does not harm others,
++ * but it's crucial for uksm to avoid races.
++ */
++ mmap_write_lock(mm);
++
+ if (mm->locked_vm) {
+ vma = mm->mmap;
+ while (vma) {
+@@ -3232,6 +3262,11 @@ void exit_mmap(struct mm_struct *mm)
+ cond_resched();
+ }
+ vm_unacct_memory(nr_accounted);
++
++ mm->mmap = NULL;
++ mm->mm_rb = RB_ROOT;
++ vmacache_invalidate(mm);
++ mmap_write_unlock(mm);
+ }
+
+ /* Insert vm structure into process list sorted by address
+@@ -3339,6 +3374,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+ new_vma->vm_ops->open(new_vma);
+ vma_link(mm, new_vma, prev, rb_link, rb_parent);
+ *need_rmap_locks = false;
++ uksm_vma_add_new(new_vma);
+ }
+ return new_vma;
+
+@@ -3491,6 +3527,7 @@ static struct vm_area_struct *__install_special_mapping(
+ vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
+
+ perf_event_mmap(vma);
++ uksm_vma_add_new(vma);
+
+ return vma;
+
+diff --git a/mm/uksm.c b/mm/uksm.c
+new file mode 100644
+index 000000000000..e4732c00be69
+--- /dev/null
++++ b/mm/uksm.c
+@@ -0,0 +1,5614 @@
++/*
++ * Ultra KSM. Copyright (C) 2011-2012 Nai Xia
++ *
++ * This is an improvement upon KSM. Some basic data structures and routines
++ * are borrowed from ksm.c .
++ *
++ * Its new features:
++ * 1. Full system scan:
++ * It automatically scans all user processes' anonymous VMAs. Kernel-user
++ * interaction to submit a memory area to KSM is no longer needed.
++ *
++ * 2. Rich area detection:
++ * It automatically detects rich areas containing abundant duplicated
++ * pages based. Rich areas are given a full scan speed. Poor areas are
++ * sampled at a reasonable speed with very low CPU consumption.
++ *
++ * 3. Ultra Per-page scan speed improvement:
++ * A new hash algorithm is proposed. As a result, on a machine with
++ * Core(TM)2 Quad Q9300 CPU in 32-bit mode and 800MHZ DDR2 main memory, it
++ * can scan memory areas that does not contain duplicated pages at speed of
++ * 627MB/sec ~ 2445MB/sec and can merge duplicated areas at speed of
++ * 477MB/sec ~ 923MB/sec.
++ *
++ * 4. Thrashing area avoidance:
++ * Thrashing area(an VMA that has frequent Ksm page break-out) can be
++ * filtered out. My benchmark shows it's more efficient than KSM's per-page
++ * hash value based volatile page detection.
++ *
++ *
++ * 5. Misc changes upon KSM:
++ * * It has a fully x86-opitmized memcmp dedicated for 4-byte-aligned page
++ * comparison. It's much faster than default C version on x86.
++ * * rmap_item now has an struct *page member to loosely cache a
++ * address-->page mapping, which reduces too much time-costly
++ * follow_page().
++ * * The VMA creation/exit procedures are hooked to let the Ultra KSM know.
++ * * try_to_merge_two_pages() now can revert a pte if it fails. No break_
++ * ksm is needed for this case.
++ *
++ * 6. Full Zero Page consideration(contributed by Figo Zhang)
++ * Now uksmd consider full zero pages as special pages and merge them to an
++ * special unswappable uksm zero page.
++ */
++
++#include <linux/errno.h>
++#include <linux/mm.h>
++#include <linux/fs.h>
++#include <linux/mman.h>
++#include <linux/sched.h>
++#include <linux/sched/mm.h>
++#include <linux/sched/coredump.h>
++#include <linux/sched/cputime.h>
++#include <linux/rwsem.h>
++#include <linux/pagemap.h>
++#include <linux/rmap.h>
++#include <linux/spinlock.h>
++#include <linux/jhash.h>
++#include <linux/delay.h>
++#include <linux/kthread.h>
++#include <linux/wait.h>
++#include <linux/slab.h>
++#include <linux/rbtree.h>
++#include <linux/memory.h>
++#include <linux/mmu_notifier.h>
++#include <linux/swap.h>
++#include <linux/ksm.h>
++#include <linux/crypto.h>
++#include <linux/scatterlist.h>
++#include <crypto/hash.h>
++#include <linux/random.h>
++#include <linux/math64.h>
++#include <linux/gcd.h>
++#include <linux/freezer.h>
++#include <linux/oom.h>
++#include <linux/numa.h>
++#include <linux/sradix-tree.h>
++
++#include <asm/tlbflush.h>
++#include "internal.h"
++
++#ifdef CONFIG_X86
++#undef memcmp
++
++#ifdef CONFIG_X86_32
++#define memcmp memcmpx86_32
++/*
++ * Compare 4-byte-aligned address s1 and s2, with length n
++ */
++int memcmpx86_32(void *s1, void *s2, size_t n)
++{
++ size_t num = n / 4;
++ register int res;
++
++ __asm__ __volatile__
++ (
++ "testl %3,%3\n\t"
++ "repe; cmpsd\n\t"
++ "je 1f\n\t"
++ "sbbl %0,%0\n\t"
++ "orl $1,%0\n"
++ "1:"
++ : "=&a" (res), "+&S" (s1), "+&D" (s2), "+&c" (num)
++ : "0" (0)
++ : "cc");
++
++ return res;
++}
++
++/*
++ * Check the page is all zero ?
++ */
++static int is_full_zero(const void *s1, size_t len)
++{
++ unsigned char same;
++
++ len /= 4;
++
++ __asm__ __volatile__
++ ("repe; scasl;"
++ "sete %0"
++ : "=qm" (same), "+D" (s1), "+c" (len)
++ : "a" (0)
++ : "cc");
++
++ return same;
++}
++
++
++#elif defined(CONFIG_X86_64)
++#define memcmp memcmpx86_64
++/*
++ * Compare 8-byte-aligned address s1 and s2, with length n
++ */
++int memcmpx86_64(void *s1, void *s2, size_t n)
++{
++ size_t num = n / 8;
++ register int res;
++
++ __asm__ __volatile__
++ (
++ "testq %q3,%q3\n\t"
++ "repe; cmpsq\n\t"
++ "je 1f\n\t"
++ "sbbq %q0,%q0\n\t"
++ "orq $1,%q0\n"
++ "1:"
++ : "=&a" (res), "+&S" (s1), "+&D" (s2), "+&c" (num)
++ : "0" (0)
++ : "cc");
++
++ return res;
++}
++
++static int is_full_zero(const void *s1, size_t len)
++{
++ unsigned char same;
++
++ len /= 8;
++
++ __asm__ __volatile__
++ ("repe; scasq;"
++ "sete %0"
++ : "=qm" (same), "+D" (s1), "+c" (len)
++ : "a" (0)
++ : "cc");
++
++ return same;
++}
++
++#endif
++#else
++static int is_full_zero(const void *s1, size_t len)
++{
++ unsigned long *src = s1;
++ int i;
++
++ len /= sizeof(*src);
++
++ for (i = 0; i < len; i++) {
++ if (src[i])
++ return 0;
++ }
++
++ return 1;
++}
++#endif
++
++#define UKSM_RUNG_ROUND_FINISHED (1 << 0)
++#define TIME_RATIO_SCALE 10000
++
++#define SLOT_TREE_NODE_SHIFT 8
++#define SLOT_TREE_NODE_STORE_SIZE (1UL << SLOT_TREE_NODE_SHIFT)
++struct slot_tree_node {
++ unsigned long size;
++ struct sradix_tree_node snode;
++ void *stores[SLOT_TREE_NODE_STORE_SIZE];
++};
++
++static struct kmem_cache *slot_tree_node_cachep;
++
++static struct sradix_tree_node *slot_tree_node_alloc(void)
++{
++ struct slot_tree_node *p;
++
++ p = kmem_cache_zalloc(slot_tree_node_cachep, GFP_KERNEL |
++ __GFP_NORETRY | __GFP_NOWARN);
++ if (!p)
++ return NULL;
++
++ return &p->snode;
++}
++
++static void slot_tree_node_free(struct sradix_tree_node *node)
++{
++ struct slot_tree_node *p;
++
++ p = container_of(node, struct slot_tree_node, snode);
++ kmem_cache_free(slot_tree_node_cachep, p);
++}
++
++static void slot_tree_node_extend(struct sradix_tree_node *parent,
++ struct sradix_tree_node *child)
++{
++ struct slot_tree_node *p, *c;
++
++ p = container_of(parent, struct slot_tree_node, snode);
++ c = container_of(child, struct slot_tree_node, snode);
++
++ p->size += c->size;
++}
++
++void slot_tree_node_assign(struct sradix_tree_node *node,
++ unsigned int index, void *item)
++{
++ struct vma_slot *slot = item;
++ struct slot_tree_node *cur;
++
++ slot->snode = node;
++ slot->sindex = index;
++
++ while (node) {
++ cur = container_of(node, struct slot_tree_node, snode);
++ cur->size += slot->pages;
++ node = node->parent;
++ }
++}
++
++void slot_tree_node_rm(struct sradix_tree_node *node, unsigned int offset)
++{
++ struct vma_slot *slot;
++ struct slot_tree_node *cur;
++ unsigned long pages;
++
++ if (node->height == 1) {
++ slot = node->stores[offset];
++ pages = slot->pages;
++ } else {
++ cur = container_of(node->stores[offset],
++ struct slot_tree_node, snode);
++ pages = cur->size;
++ }
++
++ while (node) {
++ cur = container_of(node, struct slot_tree_node, snode);
++ cur->size -= pages;
++ node = node->parent;
++ }
++}
++
++unsigned long slot_iter_index;
++int slot_iter(void *item, unsigned long height)
++{
++ struct slot_tree_node *node;
++ struct vma_slot *slot;
++
++ if (height == 1) {
++ slot = item;
++ if (slot_iter_index < slot->pages) {
++ /*in this one*/
++ return 1;
++ } else {
++ slot_iter_index -= slot->pages;
++ return 0;
++ }
++
++ } else {
++ node = container_of(item, struct slot_tree_node, snode);
++ if (slot_iter_index < node->size) {
++ /*in this one*/
++ return 1;
++ } else {
++ slot_iter_index -= node->size;
++ return 0;
++ }
++ }
++}
++
++
++static inline void slot_tree_init_root(struct sradix_tree_root *root)
++{
++ init_sradix_tree_root(root, SLOT_TREE_NODE_SHIFT);
++ root->alloc = slot_tree_node_alloc;
++ root->free = slot_tree_node_free;
++ root->extend = slot_tree_node_extend;
++ root->assign = slot_tree_node_assign;
++ root->rm = slot_tree_node_rm;
++}
++
++void slot_tree_init(void)
++{
++ slot_tree_node_cachep = kmem_cache_create("slot_tree_node",
++ sizeof(struct slot_tree_node), 0,
++ SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
++ NULL);
++}
++
++
++/* Each rung of this ladder is a list of VMAs having a same scan ratio */
++struct scan_rung {
++ //struct list_head scanned_list;
++ struct sradix_tree_root vma_root;
++ struct sradix_tree_root vma_root2;
++
++ struct vma_slot *current_scan;
++ unsigned long current_offset;
++
++ /*
++ * The initial value for current_offset, it should loop over
++ * [0~ step - 1] to let all slot have its chance to be scanned.
++ */
++ unsigned long offset_init;
++ unsigned long step; /* dynamic step for current_offset */
++ unsigned int flags;
++ unsigned long pages_to_scan;
++ //unsigned long fully_scanned_slots;
++ /*
++ * a little bit tricky - if cpu_time_ratio > 0, then the value is the
++ * the cpu time ratio it can spend in rung_i for every scan
++ * period. if < 0, then it is the cpu time ratio relative to the
++ * max cpu percentage user specified. Both in unit of
++ * 1/TIME_RATIO_SCALE
++ */
++ int cpu_ratio;
++
++ /*
++ * How long it will take for all slots in this rung to be fully
++ * scanned? If it's zero, we don't care about the cover time:
++ * it's fully scanned.
++ */
++ unsigned int cover_msecs;
++ //unsigned long vma_num;
++ //unsigned long pages; /* Sum of all slot's pages in rung */
++};
++
++/**
++ * node of either the stable or unstale rbtree
++ *
++ */
++struct tree_node {
++ struct rb_node node; /* link in the main (un)stable rbtree */
++ struct rb_root sub_root; /* rb_root for sublevel collision rbtree */
++ u32 hash;
++ unsigned long count; /* TODO: merged with sub_root */
++ struct list_head all_list; /* all tree nodes in stable/unstable tree */
++};
++
++/**
++ * struct stable_node - node of the stable rbtree
++ * @node: rb node of this ksm page in the stable tree
++ * @hlist: hlist head of rmap_items using this ksm page
++ * @kpfn: page frame number of this ksm page
++ */
++struct stable_node {
++ struct rb_node node; /* link in sub-rbtree */
++ struct tree_node *tree_node; /* it's tree node root in stable tree, NULL if it's in hell list */
++ struct hlist_head hlist;
++ unsigned long kpfn;
++ u32 hash_max; /* if ==0 then it's not been calculated yet */
++ struct list_head all_list; /* in a list for all stable nodes */
++};
++
++/**
++ * struct node_vma - group rmap_items linked in a same stable
++ * node together.
++ */
++struct node_vma {
++ union {
++ struct vma_slot *slot;
++ unsigned long key; /* slot is used as key sorted on hlist */
++ };
++ struct hlist_node hlist;
++ struct hlist_head rmap_hlist;
++ struct stable_node *head;
++};
++
++/**
++ * struct rmap_item - reverse mapping item for virtual addresses
++ * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
++ * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
++ * @mm: the memory structure this rmap_item is pointing into
++ * @address: the virtual address this rmap_item tracks (+ flags in low bits)
++ * @node: rb node of this rmap_item in the unstable tree
++ * @head: pointer to stable_node heading this list in the stable tree
++ * @hlist: link into hlist of rmap_items hanging off that stable_node
++ */
++struct rmap_item {
++ struct vma_slot *slot;
++ struct page *page;
++ unsigned long address; /* + low bits used for flags below */
++ unsigned long hash_round;
++ unsigned long entry_index;
++ union {
++ struct {/* when in unstable tree */
++ struct rb_node node;
++ struct tree_node *tree_node;
++ u32 hash_max;
++ };
++ struct { /* when in stable tree */
++ struct node_vma *head;
++ struct hlist_node hlist;
++ struct anon_vma *anon_vma;
++ };
++ };
++} __aligned(4);
++
++struct rmap_list_entry {
++ union {
++ struct rmap_item *item;
++ unsigned long addr;
++ };
++ /* lowest bit is used for is_addr tag */
++} __aligned(4); /* 4 aligned to fit in to pages*/
++
++
++/* Basic data structure definition ends */
++
++
++/*
++ * Flags for rmap_item to judge if it's listed in the stable/unstable tree.
++ * The flags use the low bits of rmap_item.address
++ */
++#define UNSTABLE_FLAG 0x1
++#define STABLE_FLAG 0x2
++#define get_rmap_addr(x) ((x)->address & PAGE_MASK)
++
++/*
++ * rmap_list_entry helpers
++ */
++#define IS_ADDR_FLAG 1
++#define is_addr(ptr) ((unsigned long)(ptr) & IS_ADDR_FLAG)
++#define set_is_addr(ptr) ((ptr) |= IS_ADDR_FLAG)
++#define get_clean_addr(ptr) (((ptr) & ~(__typeof__(ptr))IS_ADDR_FLAG))
++
++
++/*
++ * High speed caches for frequently allocated and freed structs
++ */
++static struct kmem_cache *rmap_item_cache;
++static struct kmem_cache *stable_node_cache;
++static struct kmem_cache *node_vma_cache;
++static struct kmem_cache *vma_slot_cache;
++static struct kmem_cache *tree_node_cache;
++#define UKSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("uksm_"#__struct,\
++ sizeof(struct __struct), __alignof__(struct __struct),\
++ (__flags), NULL)
++
++/* Array of all scan_rung, uksm_scan_ladder[0] having the minimum scan ratio */
++#define SCAN_LADDER_SIZE 4
++static struct scan_rung uksm_scan_ladder[SCAN_LADDER_SIZE];
++
++/* The evaluation rounds uksmd has finished */
++static unsigned long long uksm_eval_round = 1;
++
++/*
++ * we add 1 to this var when we consider we should rebuild the whole
++ * unstable tree.
++ */
++static unsigned long uksm_hash_round = 1;
++
++/*
++ * How many times the whole memory is scanned.
++ */
++static unsigned long long fully_scanned_round = 1;
++
++/* The total number of virtual pages of all vma slots */
++static u64 uksm_pages_total;
++
++/* The number of pages has been scanned since the start up */
++static u64 uksm_pages_scanned;
++
++static u64 scanned_virtual_pages;
++
++/* The number of pages has been scanned since last encode_benefit call */
++static u64 uksm_pages_scanned_last;
++
++/* If the scanned number is tooo large, we encode it here */
++static u64 pages_scanned_stored;
++
++static unsigned long pages_scanned_base;
++
++/* The number of nodes in the stable tree */
++static unsigned long uksm_pages_shared;
++
++/* The number of page slots additionally sharing those nodes */
++static unsigned long uksm_pages_sharing;
++
++/* The number of nodes in the unstable tree */
++static unsigned long uksm_pages_unshared;
++
++/*
++ * Milliseconds ksmd should sleep between scans,
++ * >= 100ms to be consistent with
++ * scan_time_to_sleep_msec()
++ */
++static unsigned int uksm_sleep_jiffies;
++
++/* The real value for the uksmd next sleep */
++static unsigned int uksm_sleep_real;
++
++/* Saved value for user input uksm_sleep_jiffies when it's enlarged */
++static unsigned int uksm_sleep_saved;
++
++/* Max percentage of cpu utilization ksmd can take to scan in one batch */
++static unsigned int uksm_max_cpu_percentage;
++
++static int uksm_cpu_governor;
++
++static char *uksm_cpu_governor_str[4] = { "full", "medium", "low", "quiet" };
++
++struct uksm_cpu_preset_s {
++ int cpu_ratio[SCAN_LADDER_SIZE];
++ unsigned int cover_msecs[SCAN_LADDER_SIZE];
++ unsigned int max_cpu; /* percentage */
++};
++
++struct uksm_cpu_preset_s uksm_cpu_preset[4] = {
++ { {20, 40, -2500, -10000}, {1000, 500, 200, 50}, 95},
++ { {20, 30, -2500, -10000}, {1000, 500, 400, 100}, 50},
++ { {10, 20, -5000, -10000}, {1500, 1000, 1000, 250}, 20},
++ { {10, 20, 40, 75}, {2000, 1000, 1000, 1000}, 1},
++};
++
++/* The default value for uksm_ema_page_time if it's not initialized */
++#define UKSM_PAGE_TIME_DEFAULT 500
++
++/*cost to scan one page by expotional moving average in nsecs */
++static unsigned long uksm_ema_page_time = UKSM_PAGE_TIME_DEFAULT;
++
++/* The expotional moving average alpha weight, in percentage. */
++#define EMA_ALPHA 20
++
++/*
++ * The threshold used to filter out thrashing areas,
++ * If it == 0, filtering is disabled, otherwise it's the percentage up-bound
++ * of the thrashing ratio of all areas. Any area with a bigger thrashing ratio
++ * will be considered as having a zero duplication ratio.
++ */
++static unsigned int uksm_thrash_threshold = 50;
++
++/* How much dedup ratio is considered to be abundant*/
++static unsigned int uksm_abundant_threshold = 10;
++
++/* All slots having merged pages in this eval round. */
++struct list_head vma_slot_dedup = LIST_HEAD_INIT(vma_slot_dedup);
++
++/* How many times the ksmd has slept since startup */
++static unsigned long long uksm_sleep_times;
++
++#define UKSM_RUN_STOP 0
++#define UKSM_RUN_MERGE 1
++static unsigned int uksm_run = 1;
++
++static DECLARE_WAIT_QUEUE_HEAD(uksm_thread_wait);
++static DEFINE_MUTEX(uksm_thread_mutex);
++
++/*
++ * List vma_slot_new is for newly created vma_slot waiting to be added by
++ * ksmd. If one cannot be added(e.g. due to it's too small), it's moved to
++ * vma_slot_noadd. vma_slot_del is the list for vma_slot whose corresponding
++ * VMA has been removed/freed.
++ */
++struct list_head vma_slot_new = LIST_HEAD_INIT(vma_slot_new);
++struct list_head vma_slot_noadd = LIST_HEAD_INIT(vma_slot_noadd);
++struct list_head vma_slot_del = LIST_HEAD_INIT(vma_slot_del);
++static DEFINE_SPINLOCK(vma_slot_list_lock);
++
++/* The unstable tree heads */
++static struct rb_root root_unstable_tree = RB_ROOT;
++
++/*
++ * All tree_nodes are in a list to be freed at once when unstable tree is
++ * freed after each scan round.
++ */
++static struct list_head unstable_tree_node_list =
++ LIST_HEAD_INIT(unstable_tree_node_list);
++
++/* List contains all stable nodes */
++static struct list_head stable_node_list = LIST_HEAD_INIT(stable_node_list);
++
++/*
++ * When the hash strength is changed, the stable tree must be delta_hashed and
++ * re-structured. We use two set of below structs to speed up the
++ * re-structuring of stable tree.
++ */
++static struct list_head
++stable_tree_node_list[2] = {LIST_HEAD_INIT(stable_tree_node_list[0]),
++ LIST_HEAD_INIT(stable_tree_node_list[1])};
++
++static struct list_head *stable_tree_node_listp = &stable_tree_node_list[0];
++static struct rb_root root_stable_tree[2] = {RB_ROOT, RB_ROOT};
++static struct rb_root *root_stable_treep = &root_stable_tree[0];
++static unsigned long stable_tree_index;
++
++/* The hash strength needed to hash a full page */
++#define HASH_STRENGTH_FULL (PAGE_SIZE / sizeof(u32))
++
++/* The hash strength needed for loop-back hashing */
++#define HASH_STRENGTH_MAX (HASH_STRENGTH_FULL + 10)
++
++/* The random offsets in a page */
++static u32 *random_nums;
++
++/* The hash strength */
++static unsigned long hash_strength = HASH_STRENGTH_FULL >> 4;
++
++/* The delta value each time the hash strength increases or decreases */
++static unsigned long hash_strength_delta;
++#define HASH_STRENGTH_DELTA_MAX 5
++
++/* The time we have saved due to random_sample_hash */
++static u64 rshash_pos;
++
++/* The time we have wasted due to hash collision */
++static u64 rshash_neg;
++
++struct uksm_benefit {
++ u64 pos;
++ u64 neg;
++ u64 scanned;
++ unsigned long base;
++} benefit;
++
++/*
++ * The relative cost of memcmp, compared to 1 time unit of random sample
++ * hash, this value is tested when ksm module is initialized
++ */
++static unsigned long memcmp_cost;
++
++static unsigned long rshash_neg_cont_zero;
++static unsigned long rshash_cont_obscure;
++
++/* The possible states of hash strength adjustment heuristic */
++enum rshash_states {
++ RSHASH_STILL,
++ RSHASH_TRYUP,
++ RSHASH_TRYDOWN,
++ RSHASH_NEW,
++ RSHASH_PRE_STILL,
++};
++
++/* The possible direction we are about to adjust hash strength */
++enum rshash_direct {
++ GO_UP,
++ GO_DOWN,
++ OBSCURE,
++ STILL,
++};
++
++/* random sampling hash state machine */
++static struct {
++ enum rshash_states state;
++ enum rshash_direct pre_direct;
++ u8 below_count;
++ /* Keep a lookup window of size 5, iff above_count/below_count > 3
++ * in this window we stop trying.
++ */
++ u8 lookup_window_index;
++ u64 stable_benefit;
++ unsigned long turn_point_down;
++ unsigned long turn_benefit_down;
++ unsigned long turn_point_up;
++ unsigned long turn_benefit_up;
++ unsigned long stable_point;
++} rshash_state;
++
++/*zero page hash table, hash_strength [0 ~ HASH_STRENGTH_MAX]*/
++static u32 *zero_hash_table;
++
++static inline struct node_vma *alloc_node_vma(void)
++{
++ struct node_vma *node_vma;
++
++ node_vma = kmem_cache_zalloc(node_vma_cache, GFP_KERNEL |
++ __GFP_NORETRY | __GFP_NOWARN);
++ if (node_vma) {
++ INIT_HLIST_HEAD(&node_vma->rmap_hlist);
++ INIT_HLIST_NODE(&node_vma->hlist);
++ }
++ return node_vma;
++}
++
++static inline void free_node_vma(struct node_vma *node_vma)
++{
++ kmem_cache_free(node_vma_cache, node_vma);
++}
++
++
++static inline struct vma_slot *alloc_vma_slot(void)
++{
++ struct vma_slot *slot;
++
++ /*
++ * In case ksm is not initialized by now.
++ * Oops, we need to consider the call site of uksm_init() in the future.
++ */
++ if (!vma_slot_cache)
++ return NULL;
++
++ slot = kmem_cache_zalloc(vma_slot_cache, GFP_KERNEL |
++ __GFP_NORETRY | __GFP_NOWARN);
++ if (slot) {
++ INIT_LIST_HEAD(&slot->slot_list);
++ INIT_LIST_HEAD(&slot->dedup_list);
++ slot->flags |= UKSM_SLOT_NEED_RERAND;
++ }
++ return slot;
++}
++
++static inline void free_vma_slot(struct vma_slot *vma_slot)
++{
++ kmem_cache_free(vma_slot_cache, vma_slot);
++}
++
++
++
++static inline struct rmap_item *alloc_rmap_item(void)
++{
++ struct rmap_item *rmap_item;
++
++ rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
++ __GFP_NORETRY | __GFP_NOWARN);
++ if (rmap_item) {
++ /* bug on lowest bit is not clear for flag use */
++ BUG_ON(is_addr(rmap_item));
++ }
++ return rmap_item;
++}
++
++static inline void free_rmap_item(struct rmap_item *rmap_item)
++{
++ rmap_item->slot = NULL; /* debug safety */
++ kmem_cache_free(rmap_item_cache, rmap_item);
++}
++
++static inline struct stable_node *alloc_stable_node(void)
++{
++ struct stable_node *node;
++
++ node = kmem_cache_alloc(stable_node_cache, GFP_KERNEL |
++ __GFP_NORETRY | __GFP_NOWARN);
++ if (!node)
++ return NULL;
++
++ INIT_HLIST_HEAD(&node->hlist);
++ list_add(&node->all_list, &stable_node_list);
++ return node;
++}
++
++static inline void free_stable_node(struct stable_node *stable_node)
++{
++ list_del(&stable_node->all_list);
++ kmem_cache_free(stable_node_cache, stable_node);
++}
++
++static inline struct tree_node *alloc_tree_node(struct list_head *list)
++{
++ struct tree_node *node;
++
++ node = kmem_cache_zalloc(tree_node_cache, GFP_KERNEL |
++ __GFP_NORETRY | __GFP_NOWARN);
++ if (!node)
++ return NULL;
++
++ list_add(&node->all_list, list);
++ return node;
++}
++
++static inline void free_tree_node(struct tree_node *node)
++{
++ list_del(&node->all_list);
++ kmem_cache_free(tree_node_cache, node);
++}
++
++static void uksm_drop_anon_vma(struct rmap_item *rmap_item)
++{
++ struct anon_vma *anon_vma = rmap_item->anon_vma;
++
++ put_anon_vma(anon_vma);
++}
++
++
++/**
++ * Remove a stable node from stable_tree, may unlink from its tree_node and
++ * may remove its parent tree_node if no other stable node is pending.
++ *
++ * @stable_node The node need to be removed
++ * @unlink_rb Will this node be unlinked from the rbtree?
++ * @remove_tree_ node Will its tree_node be removed if empty?
++ */
++static void remove_node_from_stable_tree(struct stable_node *stable_node,
++ int unlink_rb, int remove_tree_node)
++{
++ struct node_vma *node_vma;
++ struct rmap_item *rmap_item;
++ struct hlist_node *n;
++
++ if (!hlist_empty(&stable_node->hlist)) {
++ hlist_for_each_entry_safe(node_vma, n,
++ &stable_node->hlist, hlist) {
++ hlist_for_each_entry(rmap_item, &node_vma->rmap_hlist, hlist) {
++ uksm_pages_sharing--;
++
++ uksm_drop_anon_vma(rmap_item);
++ rmap_item->address &= PAGE_MASK;
++ }
++ free_node_vma(node_vma);
++ cond_resched();
++ }
++
++ /* the last one is counted as shared */
++ uksm_pages_shared--;
++ uksm_pages_sharing++;
++ }
++
++ if (stable_node->tree_node && unlink_rb) {
++ rb_erase(&stable_node->node,
++ &stable_node->tree_node->sub_root);
++
++ if (RB_EMPTY_ROOT(&stable_node->tree_node->sub_root) &&
++ remove_tree_node) {
++ rb_erase(&stable_node->tree_node->node,
++ root_stable_treep);
++ free_tree_node(stable_node->tree_node);
++ } else {
++ stable_node->tree_node->count--;
++ }
++ }
++
++ free_stable_node(stable_node);
++}
++
++
++/*
++ * get_uksm_page: checks if the page indicated by the stable node
++ * is still its ksm page, despite having held no reference to it.
++ * In which case we can trust the content of the page, and it
++ * returns the gotten page; but if the page has now been zapped,
++ * remove the stale node from the stable tree and return NULL.
++ *
++ * You would expect the stable_node to hold a reference to the ksm page.
++ * But if it increments the page's count, swapping out has to wait for
++ * ksmd to come around again before it can free the page, which may take
++ * seconds or even minutes: much too unresponsive. So instead we use a
++ * "keyhole reference": access to the ksm page from the stable node peeps
++ * out through its keyhole to see if that page still holds the right key,
++ * pointing back to this stable node. This relies on freeing a PageAnon
++ * page to reset its page->mapping to NULL, and relies on no other use of
++ * a page to put something that might look like our key in page->mapping.
++ *
++ * include/linux/pagemap.h page_cache_get_speculative() is a good reference,
++ * but this is different - made simpler by uksm_thread_mutex being held, but
++ * interesting for assuming that no other use of the struct page could ever
++ * put our expected_mapping into page->mapping (or a field of the union which
++ * coincides with page->mapping). The RCU calls are not for KSM at all, but
++ * to keep the page_count protocol described with page_cache_get_speculative.
++ *
++ * Note: it is possible that get_uksm_page() will return NULL one moment,
++ * then page the next, if the page is in between page_freeze_refs() and
++ * page_unfreeze_refs(): this shouldn't be a problem anywhere, the page
++ * is on its way to being freed; but it is an anomaly to bear in mind.
++ *
++ * @unlink_rb: if the removal of this node will firstly unlink from
++ * its rbtree. stable_node_reinsert will prevent this when restructuring the
++ * node from its old tree.
++ *
++ * @remove_tree_node: if this is the last one of its tree_node, will the
++ * tree_node be freed ? If we are inserting stable node, this tree_node may
++ * be reused, so don't free it.
++ */
++static struct page *get_uksm_page(struct stable_node *stable_node,
++ int unlink_rb, int remove_tree_node)
++{
++ struct page *page;
++ void *expected_mapping;
++ unsigned long kpfn;
++
++ expected_mapping = (void *)((unsigned long)stable_node |
++ PAGE_MAPPING_KSM);
++again:
++ kpfn = READ_ONCE(stable_node->kpfn);
++ page = pfn_to_page(kpfn);
++
++ /*
++ * page is computed from kpfn, so on most architectures reading
++ * page->mapping is naturally ordered after reading node->kpfn,
++ * but on Alpha we need to be more careful.
++ */
++ smp_rmb();
++
++ if (READ_ONCE(page->mapping) != expected_mapping)
++ goto stale;
++
++ /*
++ * We cannot do anything with the page while its refcount is 0.
++ * Usually 0 means free, or tail of a higher-order page: in which
++ * case this node is no longer referenced, and should be freed;
++ * however, it might mean that the page is under page_freeze_refs().
++ * The __remove_mapping() case is easy, again the node is now stale;
++ * but if page is swapcache in migrate_page_move_mapping(), it might
++ * still be our page, in which case it's essential to keep the node.
++ */
++ while (!get_page_unless_zero(page)) {
++ /*
++ * Another check for page->mapping != expected_mapping would
++ * work here too. We have chosen the !PageSwapCache test to
++ * optimize the common case, when the page is or is about to
++ * be freed: PageSwapCache is cleared (under spin_lock_irq)
++ * in the freeze_refs section of __remove_mapping(); but Anon
++ * page->mapping reset to NULL later, in free_pages_prepare().
++ */
++ if (!PageSwapCache(page))
++ goto stale;
++ cpu_relax();
++ }
++
++ if (READ_ONCE(page->mapping) != expected_mapping) {
++ put_page(page);
++ goto stale;
++ }
++
++ lock_page(page);
++ if (READ_ONCE(page->mapping) != expected_mapping) {
++ unlock_page(page);
++ put_page(page);
++ goto stale;
++ }
++ unlock_page(page);
++ return page;
++stale:
++ /*
++ * We come here from above when page->mapping or !PageSwapCache
++ * suggests that the node is stale; but it might be under migration.
++ * We need smp_rmb(), matching the smp_wmb() in ksm_migrate_page(),
++ * before checking whether node->kpfn has been changed.
++ */
++ smp_rmb();
++ if (stable_node->kpfn != kpfn)
++ goto again;
++
++ remove_node_from_stable_tree(stable_node, unlink_rb, remove_tree_node);
++
++ return NULL;
++}
++
++/*
++ * Removing rmap_item from stable or unstable tree.
++ * This function will clean the information from the stable/unstable tree.
++ */
++static inline void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
++{
++ if (rmap_item->address & STABLE_FLAG) {
++ struct stable_node *stable_node;
++ struct node_vma *node_vma;
++ struct page *page;
++
++ node_vma = rmap_item->head;
++ stable_node = node_vma->head;
++ page = get_uksm_page(stable_node, 1, 1);
++ if (!page)
++ goto out;
++
++ /*
++ * page lock is needed because it's racing with
++ * try_to_unmap_ksm(), etc.
++ */
++ lock_page(page);
++ hlist_del(&rmap_item->hlist);
++
++ if (hlist_empty(&node_vma->rmap_hlist)) {
++ hlist_del(&node_vma->hlist);
++ free_node_vma(node_vma);
++ }
++ unlock_page(page);
++
++ put_page(page);
++ if (hlist_empty(&stable_node->hlist)) {
++ /* do NOT call remove_node_from_stable_tree() here,
++ * it's possible for a forked rmap_item not in
++ * stable tree while the in-tree rmap_items were
++ * deleted.
++ */
++ uksm_pages_shared--;
++ } else
++ uksm_pages_sharing--;
++
++
++ uksm_drop_anon_vma(rmap_item);
++ } else if (rmap_item->address & UNSTABLE_FLAG) {
++ if (rmap_item->hash_round == uksm_hash_round) {
++
++ rb_erase(&rmap_item->node,
++ &rmap_item->tree_node->sub_root);
++ if (RB_EMPTY_ROOT(&rmap_item->tree_node->sub_root)) {
++ rb_erase(&rmap_item->tree_node->node,
++ &root_unstable_tree);
++
++ free_tree_node(rmap_item->tree_node);
++ } else
++ rmap_item->tree_node->count--;
++ }
++ uksm_pages_unshared--;
++ }
++
++ rmap_item->address &= PAGE_MASK;
++ rmap_item->hash_max = 0;
++
++out:
++ cond_resched(); /* we're called from many long loops */
++}
++
++static inline int slot_in_uksm(struct vma_slot *slot)
++{
++ return list_empty(&slot->slot_list);
++}
++
++/*
++ * Test if the mm is exiting
++ */
++static inline bool uksm_test_exit(struct mm_struct *mm)
++{
++ return atomic_read(&mm->mm_users) == 0;
++}
++
++static inline unsigned long vma_pool_size(struct vma_slot *slot)
++{
++ return round_up(sizeof(struct rmap_list_entry) * slot->pages,
++ PAGE_SIZE) >> PAGE_SHIFT;
++}
++
++#define CAN_OVERFLOW_U64(x, delta) (U64_MAX - (x) < (delta))
++
++/* must be done with sem locked */
++static int slot_pool_alloc(struct vma_slot *slot)
++{
++ unsigned long pool_size;
++
++ if (slot->rmap_list_pool)
++ return 0;
++
++ pool_size = vma_pool_size(slot);
++ slot->rmap_list_pool = kcalloc(pool_size, sizeof(struct page *),
++ GFP_KERNEL);
++ if (!slot->rmap_list_pool)
++ return -ENOMEM;
++
++ slot->pool_counts = kcalloc(pool_size, sizeof(unsigned int),
++ GFP_KERNEL);
++ if (!slot->pool_counts) {
++ kfree(slot->rmap_list_pool);
++ return -ENOMEM;
++ }
++
++ slot->pool_size = pool_size;
++ BUG_ON(CAN_OVERFLOW_U64(uksm_pages_total, slot->pages));
++ slot->flags |= UKSM_SLOT_IN_UKSM;
++ uksm_pages_total += slot->pages;
++
++ return 0;
++}
++
++/*
++ * Called after vma is unlinked from its mm
++ */
++void uksm_remove_vma(struct vm_area_struct *vma)
++{
++ struct vma_slot *slot;
++
++ if (!vma->uksm_vma_slot)
++ return;
++
++ spin_lock(&vma_slot_list_lock);
++ slot = vma->uksm_vma_slot;
++ if (!slot)
++ goto out;
++
++ if (slot_in_uksm(slot)) {
++ /**
++ * This slot has been added by ksmd, so move to the del list
++ * waiting ksmd to free it.
++ */
++ list_add_tail(&slot->slot_list, &vma_slot_del);
++ } else {
++ /**
++ * It's still on new list. It's ok to free slot directly.
++ */
++ list_del(&slot->slot_list);
++ free_vma_slot(slot);
++ }
++out:
++ vma->uksm_vma_slot = NULL;
++ spin_unlock(&vma_slot_list_lock);
++}
++
++/**
++ * Need to do two things:
++ * 1. check if slot was moved to del list
++ * 2. make sure the mmap_sem is manipulated under valid vma.
++ *
++ * My concern here is that in some cases, this may make
++ * vma_slot_list_lock() waiters to serialized further by some
++ * sem->wait_lock, can this really be expensive?
++ *
++ *
++ * @return
++ * 0: if successfully locked mmap_sem
++ * -ENOENT: this slot was moved to del list
++ * -EBUSY: vma lock failed
++ */
++static int try_down_read_slot_mmap_sem(struct vma_slot *slot)
++{
++ struct vm_area_struct *vma;
++ struct mm_struct *mm;
++ struct rw_semaphore *sem;
++
++ spin_lock(&vma_slot_list_lock);
++
++ /* the slot_list was removed and inited from new list, when it enters
++ * uksm_list. If now it's not empty, then it must be moved to del list
++ */
++ if (!slot_in_uksm(slot)) {
++ spin_unlock(&vma_slot_list_lock);
++ return -ENOENT;
++ }
++
++ BUG_ON(slot->pages != vma_pages(slot->vma));
++ /* Ok, vma still valid */
++ vma = slot->vma;
++ mm = vma->vm_mm;
++ sem = &mm->mmap_lock;
++
++ if (uksm_test_exit(mm)) {
++ spin_unlock(&vma_slot_list_lock);
++ return -ENOENT;
++ }
++
++ if (down_read_trylock(sem)) {
++ spin_unlock(&vma_slot_list_lock);
++ if (slot_pool_alloc(slot)) {
++ uksm_remove_vma(vma);
++ up_read(sem);
++ return -ENOENT;
++ }
++ return 0;
++ }
++
++ spin_unlock(&vma_slot_list_lock);
++ return -EBUSY;
++}
++
++static inline unsigned long
++vma_page_address(struct page *page, struct vm_area_struct *vma)
++{
++ pgoff_t pgoff = page->index;
++ unsigned long address;
++
++ address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
++ if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
++ /* page should be within @vma mapping range */
++ return -EFAULT;
++ }
++ return address;
++}
++
++
++/* return 0 on success with the item's mmap_sem locked */
++static inline int get_mergeable_page_lock_mmap(struct rmap_item *item)
++{
++ struct mm_struct *mm;
++ struct vma_slot *slot = item->slot;
++ int err = -EINVAL;
++
++ struct page *page;
++
++ /*
++ * try_down_read_slot_mmap_sem() returns non-zero if the slot
++ * has been removed by uksm_remove_vma().
++ */
++ if (try_down_read_slot_mmap_sem(slot))
++ return -EBUSY;
++
++ mm = slot->vma->vm_mm;
++
++ if (uksm_test_exit(mm))
++ goto failout_up;
++
++ page = item->page;
++ rcu_read_lock();
++ if (!get_page_unless_zero(page)) {
++ rcu_read_unlock();
++ goto failout_up;
++ }
++
++ /* No need to consider huge page here. */
++ if (item->slot->vma->anon_vma != page_anon_vma(page) ||
++ vma_page_address(page, item->slot->vma) != get_rmap_addr(item)) {
++ /*
++ * TODO:
++ * should we release this item becase of its stale page
++ * mapping?
++ */
++ put_page(page);
++ rcu_read_unlock();
++ goto failout_up;
++ }
++ rcu_read_unlock();
++ return 0;
++
++failout_up:
++ mmap_read_unlock(mm);
++ return err;
++}
++
++/*
++ * What kind of VMA is considered ?
++ */
++static inline int vma_can_enter(struct vm_area_struct *vma)
++{
++ return uksm_flags_can_scan(vma->vm_flags);
++}
++
++/*
++ * Called whenever a fresh new vma is created A new vma_slot.
++ * is created and inserted into a global list Must be called.
++ * after vma is inserted to its mm.
++ */
++void uksm_vma_add_new(struct vm_area_struct *vma)
++{
++ struct vma_slot *slot;
++
++ if (!vma_can_enter(vma)) {
++ vma->uksm_vma_slot = NULL;
++ return;
++ }
++
++ slot = alloc_vma_slot();
++ if (!slot) {
++ vma->uksm_vma_slot = NULL;
++ return;
++ }
++
++ vma->uksm_vma_slot = slot;
++ vma->vm_flags |= VM_MERGEABLE;
++ slot->vma = vma;
++ slot->mm = vma->vm_mm;
++ slot->ctime_j = jiffies;
++ slot->pages = vma_pages(vma);
++ spin_lock(&vma_slot_list_lock);
++ list_add_tail(&slot->slot_list, &vma_slot_new);
++ spin_unlock(&vma_slot_list_lock);
++}
++
++/* 32/3 < they < 32/2 */
++#define shiftl 8
++#define shiftr 12
++
++#define HASH_FROM_TO(from, to) \
++for (index = from; index < to; index++) { \
++ pos = random_nums[index]; \
++ hash += key[pos]; \
++ hash += (hash << shiftl); \
++ hash ^= (hash >> shiftr); \
++}
++
++
++#define HASH_FROM_DOWN_TO(from, to) \
++for (index = from - 1; index >= to; index--) { \
++ hash ^= (hash >> shiftr); \
++ hash ^= (hash >> (shiftr*2)); \
++ hash -= (hash << shiftl); \
++ hash += (hash << (shiftl*2)); \
++ pos = random_nums[index]; \
++ hash -= key[pos]; \
++}
++
++/*
++ * The main random sample hash function.
++ */
++static u32 random_sample_hash(void *addr, u32 hash_strength)
++{
++ u32 hash = 0xdeadbeef;
++ int index, pos, loop = hash_strength;
++ u32 *key = (u32 *)addr;
++
++ if (loop > HASH_STRENGTH_FULL)
++ loop = HASH_STRENGTH_FULL;
++
++ HASH_FROM_TO(0, loop);
++
++ if (hash_strength > HASH_STRENGTH_FULL) {
++ loop = hash_strength - HASH_STRENGTH_FULL;
++ HASH_FROM_TO(0, loop);
++ }
++
++ return hash;
++}
++
++
++/**
++ * It's used when hash strength is adjusted
++ *
++ * @addr The page's virtual address
++ * @from The original hash strength
++ * @to The hash strength changed to
++ * @hash The hash value generated with "from" hash value
++ *
++ * return the hash value
++ */
++static u32 delta_hash(void *addr, int from, int to, u32 hash)
++{
++ u32 *key = (u32 *)addr;
++ int index, pos; /* make sure they are int type */
++
++ if (to > from) {
++ if (from >= HASH_STRENGTH_FULL) {
++ from -= HASH_STRENGTH_FULL;
++ to -= HASH_STRENGTH_FULL;
++ HASH_FROM_TO(from, to);
++ } else if (to <= HASH_STRENGTH_FULL) {
++ HASH_FROM_TO(from, to);
++ } else {
++ HASH_FROM_TO(from, HASH_STRENGTH_FULL);
++ HASH_FROM_TO(0, to - HASH_STRENGTH_FULL);
++ }
++ } else {
++ if (from <= HASH_STRENGTH_FULL) {
++ HASH_FROM_DOWN_TO(from, to);
++ } else if (to >= HASH_STRENGTH_FULL) {
++ from -= HASH_STRENGTH_FULL;
++ to -= HASH_STRENGTH_FULL;
++ HASH_FROM_DOWN_TO(from, to);
++ } else {
++ HASH_FROM_DOWN_TO(from - HASH_STRENGTH_FULL, 0);
++ HASH_FROM_DOWN_TO(HASH_STRENGTH_FULL, to);
++ }
++ }
++
++ return hash;
++}
++
++/**
++ *
++ * Called when: rshash_pos or rshash_neg is about to overflow or a scan round
++ * has finished.
++ *
++ * return 0 if no page has been scanned since last call, 1 otherwise.
++ */
++static inline int encode_benefit(void)
++{
++ u64 scanned_delta, pos_delta, neg_delta;
++ unsigned long base = benefit.base;
++
++ scanned_delta = uksm_pages_scanned - uksm_pages_scanned_last;
++
++ if (!scanned_delta)
++ return 0;
++
++ scanned_delta >>= base;
++ pos_delta = rshash_pos >> base;
++ neg_delta = rshash_neg >> base;
++
++ if (CAN_OVERFLOW_U64(benefit.pos, pos_delta) ||
++ CAN_OVERFLOW_U64(benefit.neg, neg_delta) ||
++ CAN_OVERFLOW_U64(benefit.scanned, scanned_delta)) {
++ benefit.scanned >>= 1;
++ benefit.neg >>= 1;
++ benefit.pos >>= 1;
++ benefit.base++;
++ scanned_delta >>= 1;
++ pos_delta >>= 1;
++ neg_delta >>= 1;
++ }
++
++ benefit.pos += pos_delta;
++ benefit.neg += neg_delta;
++ benefit.scanned += scanned_delta;
++
++ BUG_ON(!benefit.scanned);
++
++ rshash_pos = rshash_neg = 0;
++ uksm_pages_scanned_last = uksm_pages_scanned;
++
++ return 1;
++}
++
++static inline void reset_benefit(void)
++{
++ benefit.pos = 0;
++ benefit.neg = 0;
++ benefit.base = 0;
++ benefit.scanned = 0;
++}
++
++static inline void inc_rshash_pos(unsigned long delta)
++{
++ if (CAN_OVERFLOW_U64(rshash_pos, delta))
++ encode_benefit();
++
++ rshash_pos += delta;
++}
++
++static inline void inc_rshash_neg(unsigned long delta)
++{
++ if (CAN_OVERFLOW_U64(rshash_neg, delta))
++ encode_benefit();
++
++ rshash_neg += delta;
++}
++
++
++static inline u32 page_hash(struct page *page, unsigned long hash_strength,
++ int cost_accounting)
++{
++ u32 val;
++ unsigned long delta;
++
++ void *addr = kmap_atomic(page);
++
++ val = random_sample_hash(addr, hash_strength);
++ kunmap_atomic(addr);
++
++ if (cost_accounting) {
++ if (hash_strength < HASH_STRENGTH_FULL)
++ delta = HASH_STRENGTH_FULL - hash_strength;
++ else
++ delta = 0;
++
++ inc_rshash_pos(delta);
++ }
++
++ return val;
++}
++
++static int memcmp_pages_with_cost(struct page *page1, struct page *page2,
++ int cost_accounting)
++{
++ char *addr1, *addr2;
++ int ret;
++
++ addr1 = kmap_atomic(page1);
++ addr2 = kmap_atomic(page2);
++ ret = memcmp(addr1, addr2, PAGE_SIZE);
++ kunmap_atomic(addr2);
++ kunmap_atomic(addr1);
++
++ if (cost_accounting)
++ inc_rshash_neg(memcmp_cost);
++
++ return ret;
++}
++
++static inline int pages_identical_with_cost(struct page *page1, struct page *page2)
++{
++ return !memcmp_pages_with_cost(page1, page2, 0);
++}
++
++static inline int is_page_full_zero(struct page *page)
++{
++ char *addr;
++ int ret;
++
++ addr = kmap_atomic(page);
++ ret = is_full_zero(addr, PAGE_SIZE);
++ kunmap_atomic(addr);
++
++ return ret;
++}
++
++static int write_protect_page(struct vm_area_struct *vma, struct page *page,
++ pte_t *orig_pte, pte_t *old_pte)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ struct page_vma_mapped_walk pvmw = {
++ .page = page,
++ .vma = vma,
++ };
++ struct mmu_notifier_range range;
++ int swapped;
++ int err = -EFAULT;
++
++ pvmw.address = page_address_in_vma(page, vma);
++ if (pvmw.address == -EFAULT)
++ goto out;
++
++ BUG_ON(PageTransCompound(page));
++
++ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, pvmw.address,
++ pvmw.address + PAGE_SIZE);
++ mmu_notifier_invalidate_range_start(&range);
++
++ if (!page_vma_mapped_walk(&pvmw))
++ goto out_mn;
++ if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
++ goto out_unlock;
++
++ if (old_pte)
++ *old_pte = *pvmw.pte;
++
++ if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) ||
++ (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) || mm_tlb_flush_pending(mm)) {
++ pte_t entry;
++
++ swapped = PageSwapCache(page);
++ flush_cache_page(vma, pvmw.address, page_to_pfn(page));
++ /*
++ * Ok this is tricky, when get_user_pages_fast() run it doesn't
++ * take any lock, therefore the check that we are going to make
++ * with the pagecount against the mapcount is racey and
++ * O_DIRECT can happen right after the check.
++ * So we clear the pte and flush the tlb before the check
++ * this assure us that no O_DIRECT can happen after the check
++ * or in the middle of the check.
++ */
++ entry = ptep_clear_flush_notify(vma, pvmw.address, pvmw.pte);
++ /*
++ * Check that no O_DIRECT or similar I/O is in progress on the
++ * page
++ */
++ if (page_mapcount(page) + 1 + swapped != page_count(page)) {
++ set_pte_at(mm, pvmw.address, pvmw.pte, entry);
++ goto out_unlock;
++ }
++ if (pte_dirty(entry))
++ set_page_dirty(page);
++
++ if (pte_protnone(entry))
++ entry = pte_mkclean(pte_clear_savedwrite(entry));
++ else
++ entry = pte_mkclean(pte_wrprotect(entry));
++
++ set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry);
++ }
++ *orig_pte = *pvmw.pte;
++ err = 0;
++
++out_unlock:
++ page_vma_mapped_walk_done(&pvmw);
++out_mn:
++ mmu_notifier_invalidate_range_end(&range);
++out:
++ return err;
++}
++
++#define MERGE_ERR_PGERR 1 /* the page is invalid cannot continue */
++#define MERGE_ERR_COLLI 2 /* there is a collision */
++#define MERGE_ERR_COLLI_MAX 3 /* collision at the max hash strength */
++#define MERGE_ERR_CHANGED 4 /* the page has changed since last hash */
++
++
++/**
++ * replace_page - replace page in vma by new ksm page
++ * @vma: vma that holds the pte pointing to page
++ * @page: the page we are replacing by kpage
++ * @kpage: the ksm page we replace page by
++ * @orig_pte: the original value of the pte
++ *
++ * Returns 0 on success, MERGE_ERR_PGERR on failure.
++ */
++static int replace_page(struct vm_area_struct *vma, struct page *page,
++ struct page *kpage, pte_t orig_pte)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ struct mmu_notifier_range range;
++ pgd_t *pgd;
++ p4d_t *p4d;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *ptep;
++ spinlock_t *ptl;
++ pte_t entry;
++
++ unsigned long addr;
++ int err = MERGE_ERR_PGERR;
++
++ addr = page_address_in_vma(page, vma);
++ if (addr == -EFAULT)
++ goto out;
++
++ pgd = pgd_offset(mm, addr);
++ if (!pgd_present(*pgd))
++ goto out;
++
++ p4d = p4d_offset(pgd, addr);
++ pud = pud_offset(p4d, addr);
++ if (!pud_present(*pud))
++ goto out;
++
++ pmd = pmd_offset(pud, addr);
++ BUG_ON(pmd_trans_huge(*pmd));
++ if (!pmd_present(*pmd))
++ goto out;
++
++ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
++ addr + PAGE_SIZE);
++ mmu_notifier_invalidate_range_start(&range);
++
++ ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
++ if (!pte_same(*ptep, orig_pte)) {
++ pte_unmap_unlock(ptep, ptl);
++ goto out_mn;
++ }
++
++ flush_cache_page(vma, addr, pte_pfn(*ptep));
++ ptep_clear_flush_notify(vma, addr, ptep);
++ entry = mk_pte(kpage, vma->vm_page_prot);
++
++ /* special treatment is needed for zero_page */
++ if ((page_to_pfn(kpage) == uksm_zero_pfn) ||
++ (page_to_pfn(kpage) == zero_pfn)) {
++ entry = pte_mkspecial(entry);
++ dec_mm_counter(mm, MM_ANONPAGES);
++ inc_zone_page_state(page, NR_UKSM_ZERO_PAGES);
++ } else {
++ get_page(kpage);
++ page_add_anon_rmap(kpage, vma, addr, false);
++ }
++
++ set_pte_at_notify(mm, addr, ptep, entry);
++
++ page_remove_rmap(page, false);
++ if (!page_mapped(page))
++ try_to_free_swap(page);
++ put_page(page);
++
++ pte_unmap_unlock(ptep, ptl);
++ err = 0;
++out_mn:
++ mmu_notifier_invalidate_range_end(&range);
++out:
++ return err;
++}
++
++
++/**
++ * Fully hash a page with HASH_STRENGTH_MAX return a non-zero hash value. The
++ * zero hash value at HASH_STRENGTH_MAX is used to indicated that its
++ * hash_max member has not been calculated.
++ *
++ * @page The page needs to be hashed
++ * @hash_old The hash value calculated with current hash strength
++ *
++ * return the new hash value calculated at HASH_STRENGTH_MAX
++ */
++static inline u32 page_hash_max(struct page *page, u32 hash_old)
++{
++ u32 hash_max = 0;
++ void *addr;
++
++ addr = kmap_atomic(page);
++ hash_max = delta_hash(addr, hash_strength,
++ HASH_STRENGTH_MAX, hash_old);
++
++ kunmap_atomic(addr);
++
++ if (!hash_max)
++ hash_max = 1;
++
++ inc_rshash_neg(HASH_STRENGTH_MAX - hash_strength);
++ return hash_max;
++}
++
++/*
++ * We compare the hash again, to ensure that it is really a hash collision
++ * instead of being caused by page write.
++ */
++static inline int check_collision(struct rmap_item *rmap_item,
++ u32 hash)
++{
++ int err;
++ struct page *page = rmap_item->page;
++
++ /* if this rmap_item has already been hash_maxed, then the collision
++ * must appears in the second-level rbtree search. In this case we check
++ * if its hash_max value has been changed. Otherwise, the collision
++ * happens in the first-level rbtree search, so we check against it's
++ * current hash value.
++ */
++ if (rmap_item->hash_max) {
++ inc_rshash_neg(memcmp_cost);
++ inc_rshash_neg(HASH_STRENGTH_MAX - hash_strength);
++
++ if (rmap_item->hash_max == page_hash_max(page, hash))
++ err = MERGE_ERR_COLLI;
++ else
++ err = MERGE_ERR_CHANGED;
++ } else {
++ inc_rshash_neg(memcmp_cost + hash_strength);
++
++ if (page_hash(page, hash_strength, 0) == hash)
++ err = MERGE_ERR_COLLI;
++ else
++ err = MERGE_ERR_CHANGED;
++ }
++
++ return err;
++}
++
++/**
++ * Try to merge a rmap_item.page with a kpage in stable node. kpage must
++ * already be a ksm page.
++ *
++ * @return 0 if the pages were merged, -EFAULT otherwise.
++ */
++static int try_to_merge_with_uksm_page(struct rmap_item *rmap_item,
++ struct page *kpage, u32 hash)
++{
++ struct vm_area_struct *vma = rmap_item->slot->vma;
++ struct mm_struct *mm = vma->vm_mm;
++ pte_t orig_pte = __pte(0);
++ int err = MERGE_ERR_PGERR;
++ struct page *page;
++
++ if (uksm_test_exit(mm))
++ goto out;
++
++ page = rmap_item->page;
++
++ if (page == kpage) { /* ksm page forked */
++ err = 0;
++ goto out;
++ }
++
++ /*
++ * We need the page lock to read a stable PageSwapCache in
++ * write_protect_page(). We use trylock_page() instead of
++ * lock_page() because we don't want to wait here - we
++ * prefer to continue scanning and merging different pages,
++ * then come back to this page when it is unlocked.
++ */
++ if (!trylock_page(page))
++ goto out;
++
++ if (!PageAnon(page) || !PageKsm(kpage))
++ goto out_unlock;
++
++ if (PageTransCompound(page)) {
++ err = split_huge_page(page);
++ if (err)
++ goto out_unlock;
++ }
++
++ /*
++ * If this anonymous page is mapped only here, its pte may need
++ * to be write-protected. If it's mapped elsewhere, all of its
++ * ptes are necessarily already write-protected. But in either
++ * case, we need to lock and check page_count is not raised.
++ */
++ if (write_protect_page(vma, page, &orig_pte, NULL) == 0) {
++ if (pages_identical_with_cost(page, kpage))
++ err = replace_page(vma, page, kpage, orig_pte);
++ else
++ err = check_collision(rmap_item, hash);
++ }
++
++ if ((vma->vm_flags & VM_LOCKED) && kpage && !err) {
++ munlock_vma_page(page);
++ if (!PageMlocked(kpage)) {
++ unlock_page(page);
++ lock_page(kpage);
++ mlock_vma_page(kpage);
++ page = kpage; /* for final unlock */
++ }
++ }
++
++out_unlock:
++ unlock_page(page);
++out:
++ return err;
++}
++
++
++
++/**
++ * If two pages fail to merge in try_to_merge_two_pages, then we have a chance
++ * to restore a page mapping that has been changed in try_to_merge_two_pages.
++ *
++ * @return 0 on success.
++ */
++static int restore_uksm_page_pte(struct vm_area_struct *vma, unsigned long addr,
++ pte_t orig_pte, pte_t wprt_pte)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ pgd_t *pgd;
++ p4d_t *p4d;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *ptep;
++ spinlock_t *ptl;
++
++ int err = -EFAULT;
++
++ pgd = pgd_offset(mm, addr);
++ if (!pgd_present(*pgd))
++ goto out;
++
++ p4d = p4d_offset(pgd, addr);
++ pud = pud_offset(p4d, addr);
++ if (!pud_present(*pud))
++ goto out;
++
++ pmd = pmd_offset(pud, addr);
++ if (!pmd_present(*pmd))
++ goto out;
++
++ ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
++ if (!pte_same(*ptep, wprt_pte)) {
++ /* already copied, let it be */
++ pte_unmap_unlock(ptep, ptl);
++ goto out;
++ }
++
++ /*
++ * Good boy, still here. When we still get the ksm page, it does not
++ * return to the free page pool, there is no way that a pte was changed
++ * to other page and gets back to this page. And remind that ksm page
++ * do not reuse in do_wp_page(). So it's safe to restore the original
++ * pte.
++ */
++ flush_cache_page(vma, addr, pte_pfn(*ptep));
++ ptep_clear_flush_notify(vma, addr, ptep);
++ set_pte_at_notify(mm, addr, ptep, orig_pte);
++
++ pte_unmap_unlock(ptep, ptl);
++ err = 0;
++out:
++ return err;
++}
++
++/**
++ * try_to_merge_two_pages() - take two identical pages and prepare
++ * them to be merged into one page(rmap_item->page)
++ *
++ * @return 0 if we successfully merged two identical pages into
++ * one ksm page. MERGE_ERR_COLLI if it's only a hash collision
++ * search in rbtree. MERGE_ERR_CHANGED if rmap_item has been
++ * changed since it's hashed. MERGE_ERR_PGERR otherwise.
++ *
++ */
++static int try_to_merge_two_pages(struct rmap_item *rmap_item,
++ struct rmap_item *tree_rmap_item,
++ u32 hash)
++{
++ pte_t orig_pte1 = __pte(0), orig_pte2 = __pte(0);
++ pte_t wprt_pte1 = __pte(0), wprt_pte2 = __pte(0);
++ struct vm_area_struct *vma1 = rmap_item->slot->vma;
++ struct vm_area_struct *vma2 = tree_rmap_item->slot->vma;
++ struct page *page = rmap_item->page;
++ struct page *tree_page = tree_rmap_item->page;
++ int err = MERGE_ERR_PGERR;
++ struct address_space *saved_mapping;
++
++
++ if (rmap_item->page == tree_rmap_item->page)
++ goto out;
++
++ if (!trylock_page(page))
++ goto out;
++
++ if (!PageAnon(page))
++ goto out_unlock;
++
++ if (PageTransCompound(page)) {
++ err = split_huge_page(page);
++ if (err)
++ goto out_unlock;
++ }
++
++ if (write_protect_page(vma1, page, &wprt_pte1, &orig_pte1) != 0) {
++ unlock_page(page);
++ goto out;
++ }
++
++ /*
++ * While we hold page lock, upgrade page from
++ * PageAnon+anon_vma to PageKsm+NULL stable_node:
++ * stable_tree_insert() will update stable_node.
++ */
++ saved_mapping = page->mapping;
++ set_page_stable_node(page, NULL);
++ mark_page_accessed(page);
++ if (!PageDirty(page))
++ SetPageDirty(page);
++
++ unlock_page(page);
++
++ if (!trylock_page(tree_page))
++ goto restore_out;
++
++ if (!PageAnon(tree_page)) {
++ unlock_page(tree_page);
++ goto restore_out;
++ }
++
++ if (PageTransCompound(tree_page)) {
++ err = split_huge_page(tree_page);
++ if (err) {
++ unlock_page(tree_page);
++ goto restore_out;
++ }
++ }
++
++ if (write_protect_page(vma2, tree_page, &wprt_pte2, &orig_pte2) != 0) {
++ unlock_page(tree_page);
++ goto restore_out;
++ }
++
++ if (pages_identical_with_cost(page, tree_page)) {
++ err = replace_page(vma2, tree_page, page, wprt_pte2);
++ if (err) {
++ unlock_page(tree_page);
++ goto restore_out;
++ }
++
++ if ((vma2->vm_flags & VM_LOCKED)) {
++ munlock_vma_page(tree_page);
++ if (!PageMlocked(page)) {
++ unlock_page(tree_page);
++ lock_page(page);
++ mlock_vma_page(page);
++ tree_page = page; /* for final unlock */
++ }
++ }
++
++ unlock_page(tree_page);
++
++ goto out; /* success */
++
++ } else {
++ if (tree_rmap_item->hash_max &&
++ tree_rmap_item->hash_max == rmap_item->hash_max) {
++ err = MERGE_ERR_COLLI_MAX;
++ } else if (page_hash(page, hash_strength, 0) ==
++ page_hash(tree_page, hash_strength, 0)) {
++ inc_rshash_neg(memcmp_cost + hash_strength * 2);
++ err = MERGE_ERR_COLLI;
++ } else {
++ err = MERGE_ERR_CHANGED;
++ }
++
++ unlock_page(tree_page);
++ }
++
++restore_out:
++ lock_page(page);
++ if (!restore_uksm_page_pte(vma1, get_rmap_addr(rmap_item),
++ orig_pte1, wprt_pte1))
++ page->mapping = saved_mapping;
++
++out_unlock:
++ unlock_page(page);
++out:
++ return err;
++}
++
++static inline int hash_cmp(u32 new_val, u32 node_val)
++{
++ if (new_val > node_val)
++ return 1;
++ else if (new_val < node_val)
++ return -1;
++ else
++ return 0;
++}
++
++static inline u32 rmap_item_hash_max(struct rmap_item *item, u32 hash)
++{
++ u32 hash_max = item->hash_max;
++
++ if (!hash_max) {
++ hash_max = page_hash_max(item->page, hash);
++
++ item->hash_max = hash_max;
++ }
++
++ return hash_max;
++}
++
++
++
++/**
++ * stable_tree_search() - search the stable tree for a page
++ *
++ * @item: the rmap_item we are comparing with
++ * @hash: the hash value of this item->page already calculated
++ *
++ * @return the page we have found, NULL otherwise. The page returned has
++ * been gotten.
++ */
++static struct page *stable_tree_search(struct rmap_item *item, u32 hash)
++{
++ struct rb_node *node = root_stable_treep->rb_node;
++ struct tree_node *tree_node;
++ unsigned long hash_max;
++ struct page *page = item->page;
++ struct stable_node *stable_node;
++
++ stable_node = page_stable_node(page);
++ if (stable_node) {
++ /* ksm page forked, that is
++ * if (PageKsm(page) && !in_stable_tree(rmap_item))
++ * it's actually gotten once outside.
++ */
++ get_page(page);
++ return page;
++ }
++
++ while (node) {
++ int cmp;
++
++ tree_node = rb_entry(node, struct tree_node, node);
++
++ cmp = hash_cmp(hash, tree_node->hash);
++
++ if (cmp < 0)
++ node = node->rb_left;
++ else if (cmp > 0)
++ node = node->rb_right;
++ else
++ break;
++ }
++
++ if (!node)
++ return NULL;
++
++ if (tree_node->count == 1) {
++ stable_node = rb_entry(tree_node->sub_root.rb_node,
++ struct stable_node, node);
++ BUG_ON(!stable_node);
++
++ goto get_page_out;
++ }
++
++ /*
++ * ok, we have to search the second
++ * level subtree, hash the page to a
++ * full strength.
++ */
++ node = tree_node->sub_root.rb_node;
++ BUG_ON(!node);
++ hash_max = rmap_item_hash_max(item, hash);
++
++ while (node) {
++ int cmp;
++
++ stable_node = rb_entry(node, struct stable_node, node);
++
++ cmp = hash_cmp(hash_max, stable_node->hash_max);
++
++ if (cmp < 0)
++ node = node->rb_left;
++ else if (cmp > 0)
++ node = node->rb_right;
++ else
++ goto get_page_out;
++ }
++
++ return NULL;
++
++get_page_out:
++ page = get_uksm_page(stable_node, 1, 1);
++ return page;
++}
++
++static int try_merge_rmap_item(struct rmap_item *item,
++ struct page *kpage,
++ struct page *tree_page)
++{
++ struct vm_area_struct *vma = item->slot->vma;
++ struct page_vma_mapped_walk pvmw = {
++ .page = kpage,
++ .vma = vma,
++ };
++
++ pvmw.address = get_rmap_addr(item);
++ if (!page_vma_mapped_walk(&pvmw))
++ return 0;
++
++ if (pte_write(*pvmw.pte)) {
++ /* has changed, abort! */
++ page_vma_mapped_walk_done(&pvmw);
++ return 0;
++ }
++
++ get_page(tree_page);
++ page_add_anon_rmap(tree_page, vma, pvmw.address, false);
++
++ flush_cache_page(vma, pvmw.address, page_to_pfn(kpage));
++ ptep_clear_flush_notify(vma, pvmw.address, pvmw.pte);
++ set_pte_at_notify(vma->vm_mm, pvmw.address, pvmw.pte,
++ mk_pte(tree_page, vma->vm_page_prot));
++
++ page_remove_rmap(kpage, false);
++ put_page(kpage);
++
++ page_vma_mapped_walk_done(&pvmw);
++
++ return 1;
++}
++
++/**
++ * try_to_merge_with_stable_page() - when two rmap_items need to be inserted
++ * into stable tree, the page was found to be identical to a stable ksm page,
++ * this is the last chance we can merge them into one.
++ *
++ * @item1: the rmap_item holding the page which we wanted to insert
++ * into stable tree.
++ * @item2: the other rmap_item we found when unstable tree search
++ * @oldpage: the page currently mapped by the two rmap_items
++ * @tree_page: the page we found identical in stable tree node
++ * @success1: return if item1 is successfully merged
++ * @success2: return if item2 is successfully merged
++ */
++static void try_merge_with_stable(struct rmap_item *item1,
++ struct rmap_item *item2,
++ struct page **kpage,
++ struct page *tree_page,
++ int *success1, int *success2)
++{
++ struct vm_area_struct *vma1 = item1->slot->vma;
++ struct vm_area_struct *vma2 = item2->slot->vma;
++ *success1 = 0;
++ *success2 = 0;
++
++ if (unlikely(*kpage == tree_page)) {
++ /* I don't think this can really happen */
++ pr_warn("UKSM: unexpected condition detected in "
++ "%s -- *kpage == tree_page !\n", __func__);
++ *success1 = 1;
++ *success2 = 1;
++ return;
++ }
++
++ if (!PageAnon(*kpage) || !PageKsm(*kpage))
++ goto failed;
++
++ if (!trylock_page(tree_page))
++ goto failed;
++
++ /* If the oldpage is still ksm and still pointed
++ * to in the right place, and still write protected,
++ * we are confident it's not changed, no need to
++ * memcmp anymore.
++ * be ware, we cannot take nested pte locks,
++ * deadlock risk.
++ */
++ if (!try_merge_rmap_item(item1, *kpage, tree_page))
++ goto unlock_failed;
++
++ /* ok, then vma2, remind that pte1 already set */
++ if (!try_merge_rmap_item(item2, *kpage, tree_page))
++ goto success_1;
++
++ *success2 = 1;
++success_1:
++ *success1 = 1;
++
++
++ if ((*success1 && vma1->vm_flags & VM_LOCKED) ||
++ (*success2 && vma2->vm_flags & VM_LOCKED)) {
++ munlock_vma_page(*kpage);
++ if (!PageMlocked(tree_page))
++ mlock_vma_page(tree_page);
++ }
++
++ /*
++ * We do not need oldpage any more in the caller, so can break the lock
++ * now.
++ */
++ unlock_page(*kpage);
++ *kpage = tree_page; /* Get unlocked outside. */
++ return;
++
++unlock_failed:
++ unlock_page(tree_page);
++failed:
++ return;
++}
++
++static inline void stable_node_hash_max(struct stable_node *node,
++ struct page *page, u32 hash)
++{
++ u32 hash_max = node->hash_max;
++
++ if (!hash_max) {
++ hash_max = page_hash_max(page, hash);
++ node->hash_max = hash_max;
++ }
++}
++
++static inline
++struct stable_node *new_stable_node(struct tree_node *tree_node,
++ struct page *kpage, u32 hash_max)
++{
++ struct stable_node *new_stable_node;
++
++ new_stable_node = alloc_stable_node();
++ if (!new_stable_node)
++ return NULL;
++
++ new_stable_node->kpfn = page_to_pfn(kpage);
++ new_stable_node->hash_max = hash_max;
++ new_stable_node->tree_node = tree_node;
++ set_page_stable_node(kpage, new_stable_node);
++
++ return new_stable_node;
++}
++
++static inline
++struct stable_node *first_level_insert(struct tree_node *tree_node,
++ struct rmap_item *rmap_item,
++ struct rmap_item *tree_rmap_item,
++ struct page **kpage, u32 hash,
++ int *success1, int *success2)
++{
++ int cmp;
++ struct page *tree_page;
++ u32 hash_max = 0;
++ struct stable_node *stable_node, *new_snode;
++ struct rb_node *parent = NULL, **new;
++
++ /* this tree node contains no sub-tree yet */
++ stable_node = rb_entry(tree_node->sub_root.rb_node,
++ struct stable_node, node);
++
++ tree_page = get_uksm_page(stable_node, 1, 0);
++ if (tree_page) {
++ cmp = memcmp_pages_with_cost(*kpage, tree_page, 1);
++ if (!cmp) {
++ try_merge_with_stable(rmap_item, tree_rmap_item, kpage,
++ tree_page, success1, success2);
++ put_page(tree_page);
++ if (!*success1 && !*success2)
++ goto failed;
++
++ return stable_node;
++
++ } else {
++ /*
++ * collision in first level try to create a subtree.
++ * A new node need to be created.
++ */
++ put_page(tree_page);
++
++ stable_node_hash_max(stable_node, tree_page,
++ tree_node->hash);
++ hash_max = rmap_item_hash_max(rmap_item, hash);
++ cmp = hash_cmp(hash_max, stable_node->hash_max);
++
++ parent = &stable_node->node;
++ if (cmp < 0)
++ new = &parent->rb_left;
++ else if (cmp > 0)
++ new = &parent->rb_right;
++ else
++ goto failed;
++ }
++
++ } else {
++ /* the only stable_node deleted, we reuse its tree_node.
++ */
++ parent = NULL;
++ new = &tree_node->sub_root.rb_node;
++ }
++
++ new_snode = new_stable_node(tree_node, *kpage, hash_max);
++ if (!new_snode)
++ goto failed;
++
++ rb_link_node(&new_snode->node, parent, new);
++ rb_insert_color(&new_snode->node, &tree_node->sub_root);
++ tree_node->count++;
++ *success1 = *success2 = 1;
++
++ return new_snode;
++
++failed:
++ return NULL;
++}
++
++static inline
++struct stable_node *stable_subtree_insert(struct tree_node *tree_node,
++ struct rmap_item *rmap_item,
++ struct rmap_item *tree_rmap_item,
++ struct page **kpage, u32 hash,
++ int *success1, int *success2)
++{
++ struct page *tree_page;
++ u32 hash_max;
++ struct stable_node *stable_node, *new_snode;
++ struct rb_node *parent, **new;
++
++research:
++ parent = NULL;
++ new = &tree_node->sub_root.rb_node;
++ BUG_ON(!*new);
++ hash_max = rmap_item_hash_max(rmap_item, hash);
++ while (*new) {
++ int cmp;
++
++ stable_node = rb_entry(*new, struct stable_node, node);
++
++ cmp = hash_cmp(hash_max, stable_node->hash_max);
++
++ if (cmp < 0) {
++ parent = *new;
++ new = &parent->rb_left;
++ } else if (cmp > 0) {
++ parent = *new;
++ new = &parent->rb_right;
++ } else {
++ tree_page = get_uksm_page(stable_node, 1, 0);
++ if (tree_page) {
++ cmp = memcmp_pages_with_cost(*kpage, tree_page, 1);
++ if (!cmp) {
++ try_merge_with_stable(rmap_item,
++ tree_rmap_item, kpage,
++ tree_page, success1, success2);
++
++ put_page(tree_page);
++ if (!*success1 && !*success2)
++ goto failed;
++ /*
++ * successfully merged with a stable
++ * node
++ */
++ return stable_node;
++ } else {
++ put_page(tree_page);
++ goto failed;
++ }
++ } else {
++ /*
++ * stable node may be deleted,
++ * and subtree maybe
++ * restructed, cannot
++ * continue, research it.
++ */
++ if (tree_node->count) {
++ goto research;
++ } else {
++ /* reuse the tree node*/
++ parent = NULL;
++ new = &tree_node->sub_root.rb_node;
++ }
++ }
++ }
++ }
++
++ new_snode = new_stable_node(tree_node, *kpage, hash_max);
++ if (!new_snode)
++ goto failed;
++
++ rb_link_node(&new_snode->node, parent, new);
++ rb_insert_color(&new_snode->node, &tree_node->sub_root);
++ tree_node->count++;
++ *success1 = *success2 = 1;
++
++ return new_snode;
++
++failed:
++ return NULL;
++}
++
++
++/**
++ * stable_tree_insert() - try to insert a merged page in unstable tree to
++ * the stable tree
++ *
++ * @kpage: the page need to be inserted
++ * @hash: the current hash of this page
++ * @rmap_item: the rmap_item being scanned
++ * @tree_rmap_item: the rmap_item found on unstable tree
++ * @success1: return if rmap_item is merged
++ * @success2: return if tree_rmap_item is merged
++ *
++ * @return the stable_node on stable tree if at least one
++ * rmap_item is inserted into stable tree, NULL
++ * otherwise.
++ */
++static struct stable_node *
++stable_tree_insert(struct page **kpage, u32 hash,
++ struct rmap_item *rmap_item,
++ struct rmap_item *tree_rmap_item,
++ int *success1, int *success2)
++{
++ struct rb_node **new = &root_stable_treep->rb_node;
++ struct rb_node *parent = NULL;
++ struct stable_node *stable_node;
++ struct tree_node *tree_node;
++ u32 hash_max = 0;
++
++ *success1 = *success2 = 0;
++
++ while (*new) {
++ int cmp;
++
++ tree_node = rb_entry(*new, struct tree_node, node);
++
++ cmp = hash_cmp(hash, tree_node->hash);
++
++ if (cmp < 0) {
++ parent = *new;
++ new = &parent->rb_left;
++ } else if (cmp > 0) {
++ parent = *new;
++ new = &parent->rb_right;
++ } else
++ break;
++ }
++
++ if (*new) {
++ if (tree_node->count == 1) {
++ stable_node = first_level_insert(tree_node, rmap_item,
++ tree_rmap_item, kpage,
++ hash, success1, success2);
++ } else {
++ stable_node = stable_subtree_insert(tree_node,
++ rmap_item, tree_rmap_item, kpage,
++ hash, success1, success2);
++ }
++ } else {
++
++ /* no tree node found */
++ tree_node = alloc_tree_node(stable_tree_node_listp);
++ if (!tree_node) {
++ stable_node = NULL;
++ goto out;
++ }
++
++ stable_node = new_stable_node(tree_node, *kpage, hash_max);
++ if (!stable_node) {
++ free_tree_node(tree_node);
++ goto out;
++ }
++
++ tree_node->hash = hash;
++ rb_link_node(&tree_node->node, parent, new);
++ rb_insert_color(&tree_node->node, root_stable_treep);
++ parent = NULL;
++ new = &tree_node->sub_root.rb_node;
++
++ rb_link_node(&stable_node->node, parent, new);
++ rb_insert_color(&stable_node->node, &tree_node->sub_root);
++ tree_node->count++;
++ *success1 = *success2 = 1;
++ }
++
++out:
++ return stable_node;
++}
++
++
++/**
++ * get_tree_rmap_item_page() - try to get the page and lock the mmap_sem
++ *
++ * @return 0 on success, -EBUSY if unable to lock the mmap_sem,
++ * -EINVAL if the page mapping has been changed.
++ */
++static inline int get_tree_rmap_item_page(struct rmap_item *tree_rmap_item)
++{
++ int err;
++
++ err = get_mergeable_page_lock_mmap(tree_rmap_item);
++
++ if (err == -EINVAL) {
++ /* its page map has been changed, remove it */
++ remove_rmap_item_from_tree(tree_rmap_item);
++ }
++
++ /* The page is gotten and mmap_sem is locked now. */
++ return err;
++}
++
++
++/**
++ * unstable_tree_search_insert() - search an unstable tree rmap_item with the
++ * same hash value. Get its page and trylock the mmap_sem
++ */
++static inline
++struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
++ u32 hash)
++
++{
++ struct rb_node **new = &root_unstable_tree.rb_node;
++ struct rb_node *parent = NULL;
++ struct tree_node *tree_node;
++ u32 hash_max;
++ struct rmap_item *tree_rmap_item;
++
++ while (*new) {
++ int cmp;
++
++ tree_node = rb_entry(*new, struct tree_node, node);
++
++ cmp = hash_cmp(hash, tree_node->hash);
++
++ if (cmp < 0) {
++ parent = *new;
++ new = &parent->rb_left;
++ } else if (cmp > 0) {
++ parent = *new;
++ new = &parent->rb_right;
++ } else
++ break;
++ }
++
++ if (*new) {
++ /* got the tree_node */
++ if (tree_node->count == 1) {
++ tree_rmap_item = rb_entry(tree_node->sub_root.rb_node,
++ struct rmap_item, node);
++ BUG_ON(!tree_rmap_item);
++
++ goto get_page_out;
++ }
++
++ /* well, search the collision subtree */
++ new = &tree_node->sub_root.rb_node;
++ BUG_ON(!*new);
++ hash_max = rmap_item_hash_max(rmap_item, hash);
++
++ while (*new) {
++ int cmp;
++
++ tree_rmap_item = rb_entry(*new, struct rmap_item,
++ node);
++
++ cmp = hash_cmp(hash_max, tree_rmap_item->hash_max);
++ parent = *new;
++ if (cmp < 0)
++ new = &parent->rb_left;
++ else if (cmp > 0)
++ new = &parent->rb_right;
++ else
++ goto get_page_out;
++ }
++ } else {
++ /* alloc a new tree_node */
++ tree_node = alloc_tree_node(&unstable_tree_node_list);
++ if (!tree_node)
++ return NULL;
++
++ tree_node->hash = hash;
++ rb_link_node(&tree_node->node, parent, new);
++ rb_insert_color(&tree_node->node, &root_unstable_tree);
++ parent = NULL;
++ new = &tree_node->sub_root.rb_node;
++ }
++
++ /* did not found even in sub-tree */
++ rmap_item->tree_node = tree_node;
++ rmap_item->address |= UNSTABLE_FLAG;
++ rmap_item->hash_round = uksm_hash_round;
++ rb_link_node(&rmap_item->node, parent, new);
++ rb_insert_color(&rmap_item->node, &tree_node->sub_root);
++
++ uksm_pages_unshared++;
++ return NULL;
++
++get_page_out:
++ if (tree_rmap_item->page == rmap_item->page)
++ return NULL;
++
++ if (get_tree_rmap_item_page(tree_rmap_item))
++ return NULL;
++
++ return tree_rmap_item;
++}
++
++static void hold_anon_vma(struct rmap_item *rmap_item,
++ struct anon_vma *anon_vma)
++{
++ rmap_item->anon_vma = anon_vma;
++ get_anon_vma(anon_vma);
++}
++
++
++/**
++ * stable_tree_append() - append a rmap_item to a stable node. Deduplication
++ * ratio statistics is done in this function.
++ *
++ */
++static void stable_tree_append(struct rmap_item *rmap_item,
++ struct stable_node *stable_node, int logdedup)
++{
++ struct node_vma *node_vma = NULL, *new_node_vma, *node_vma_cont = NULL;
++ unsigned long key = (unsigned long)rmap_item->slot;
++ unsigned long factor = rmap_item->slot->rung->step;
++
++ BUG_ON(!stable_node);
++ rmap_item->address |= STABLE_FLAG;
++
++ if (hlist_empty(&stable_node->hlist)) {
++ uksm_pages_shared++;
++ goto node_vma_new;
++ } else {
++ uksm_pages_sharing++;
++ }
++
++ hlist_for_each_entry(node_vma, &stable_node->hlist, hlist) {
++ if (node_vma->key >= key)
++ break;
++
++ if (logdedup) {
++ node_vma->slot->pages_bemerged += factor;
++ if (list_empty(&node_vma->slot->dedup_list))
++ list_add(&node_vma->slot->dedup_list,
++ &vma_slot_dedup);
++ }
++ }
++
++ if (node_vma) {
++ if (node_vma->key == key) {
++ node_vma_cont = hlist_entry_safe(node_vma->hlist.next, struct node_vma, hlist);
++ goto node_vma_ok;
++ } else if (node_vma->key > key) {
++ node_vma_cont = node_vma;
++ }
++ }
++
++node_vma_new:
++ /* no same vma already in node, alloc a new node_vma */
++ new_node_vma = alloc_node_vma();
++ BUG_ON(!new_node_vma);
++ new_node_vma->head = stable_node;
++ new_node_vma->slot = rmap_item->slot;
++
++ if (!node_vma) {
++ hlist_add_head(&new_node_vma->hlist, &stable_node->hlist);
++ } else if (node_vma->key != key) {
++ if (node_vma->key < key)
++ hlist_add_behind(&new_node_vma->hlist, &node_vma->hlist);
++ else {
++ hlist_add_before(&new_node_vma->hlist,
++ &node_vma->hlist);
++ }
++
++ }
++ node_vma = new_node_vma;
++
++node_vma_ok: /* ok, ready to add to the list */
++ rmap_item->head = node_vma;
++ hlist_add_head(&rmap_item->hlist, &node_vma->rmap_hlist);
++ hold_anon_vma(rmap_item, rmap_item->slot->vma->anon_vma);
++ if (logdedup) {
++ rmap_item->slot->pages_merged++;
++ if (node_vma_cont) {
++ node_vma = node_vma_cont;
++ hlist_for_each_entry_continue(node_vma, hlist) {
++ node_vma->slot->pages_bemerged += factor;
++ if (list_empty(&node_vma->slot->dedup_list))
++ list_add(&node_vma->slot->dedup_list,
++ &vma_slot_dedup);
++ }
++ }
++ }
++}
++
++/*
++ * We use break_ksm to break COW on a ksm page: it's a stripped down
++ *
++ * if (get_user_pages(addr, 1, 1, 1, &page, NULL) == 1)
++ * put_page(page);
++ *
++ * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
++ * in case the application has unmapped and remapped mm,addr meanwhile.
++ * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP
++ * mmap of /dev/mem or /dev/kmem, where we would not want to touch it.
++ */
++static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
++{
++ struct page *page;
++ int ret = 0;
++
++ do {
++ cond_resched();
++ page = follow_page(vma, addr, FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE);
++ if (IS_ERR_OR_NULL(page))
++ break;
++ if (PageKsm(page)) {
++ ret = handle_mm_fault(vma, addr,
++ FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE,
++ NULL);
++ } else
++ ret = VM_FAULT_WRITE;
++ put_page(page);
++ } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
++ /*
++ * We must loop because handle_mm_fault() may back out if there's
++ * any difficulty e.g. if pte accessed bit gets updated concurrently.
++ *
++ * VM_FAULT_WRITE is what we have been hoping for: it indicates that
++ * COW has been broken, even if the vma does not permit VM_WRITE;
++ * but note that a concurrent fault might break PageKsm for us.
++ *
++ * VM_FAULT_SIGBUS could occur if we race with truncation of the
++ * backing file, which also invalidates anonymous pages: that's
++ * okay, that truncation will have unmapped the PageKsm for us.
++ *
++ * VM_FAULT_OOM: at the time of writing (late July 2009), setting
++ * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
++ * current task has TIF_MEMDIE set, and will be OOM killed on return
++ * to user; and ksmd, having no mm, would never be chosen for that.
++ *
++ * But if the mm is in a limited mem_cgroup, then the fault may fail
++ * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
++ * even ksmd can fail in this way - though it's usually breaking ksm
++ * just to undo a merge it made a moment before, so unlikely to oom.
++ *
++ * That's a pity: we might therefore have more kernel pages allocated
++ * than we're counting as nodes in the stable tree; but uksm_do_scan
++ * will retry to break_cow on each pass, so should recover the page
++ * in due course. The important thing is to not let VM_MERGEABLE
++ * be cleared while any such pages might remain in the area.
++ */
++ return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
++}
++
++static void break_cow(struct rmap_item *rmap_item)
++{
++ struct vm_area_struct *vma = rmap_item->slot->vma;
++ struct mm_struct *mm = vma->vm_mm;
++ unsigned long addr = get_rmap_addr(rmap_item);
++
++ if (uksm_test_exit(mm))
++ goto out;
++
++ break_ksm(vma, addr);
++out:
++ return;
++}
++
++/*
++ * Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather
++ * than check every pte of a given vma, the locking doesn't quite work for
++ * that - an rmap_item is assigned to the stable tree after inserting ksm
++ * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing
++ * rmap_items from parent to child at fork time (so as not to waste time
++ * if exit comes before the next scan reaches it).
++ *
++ * Similarly, although we'd like to remove rmap_items (so updating counts
++ * and freeing memory) when unmerging an area, it's easier to leave that
++ * to the next pass of ksmd - consider, for example, how ksmd might be
++ * in cmp_and_merge_page on one of the rmap_items we would be removing.
++ */
++inline int unmerge_uksm_pages(struct vm_area_struct *vma,
++ unsigned long start, unsigned long end)
++{
++ unsigned long addr;
++ int err = 0;
++
++ for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
++ if (uksm_test_exit(vma->vm_mm))
++ break;
++ if (signal_pending(current))
++ err = -ERESTARTSYS;
++ else
++ err = break_ksm(vma, addr);
++ }
++ return err;
++}
++
++static inline void inc_uksm_pages_scanned(void)
++{
++ u64 delta;
++
++
++ if (uksm_pages_scanned == U64_MAX) {
++ encode_benefit();
++
++ delta = uksm_pages_scanned >> pages_scanned_base;
++
++ if (CAN_OVERFLOW_U64(pages_scanned_stored, delta)) {
++ pages_scanned_stored >>= 1;
++ delta >>= 1;
++ pages_scanned_base++;
++ }
++
++ pages_scanned_stored += delta;
++
++ uksm_pages_scanned = uksm_pages_scanned_last = 0;
++ }
++
++ uksm_pages_scanned++;
++}
++
++static inline int find_zero_page_hash(int strength, u32 hash)
++{
++ return (zero_hash_table[strength] == hash);
++}
++
++static
++int cmp_and_merge_zero_page(struct vm_area_struct *vma, struct page *page)
++{
++ struct page *zero_page = empty_uksm_zero_page;
++ struct mm_struct *mm = vma->vm_mm;
++ pte_t orig_pte = __pte(0);
++ int err = -EFAULT;
++
++ if (uksm_test_exit(mm))
++ goto out;
++
++ if (!trylock_page(page))
++ goto out;
++
++ if (!PageAnon(page))
++ goto out_unlock;
++
++ if (PageTransCompound(page)) {
++ err = split_huge_page(page);
++ if (err)
++ goto out_unlock;
++ }
++
++ if (write_protect_page(vma, page, &orig_pte, 0) == 0) {
++ if (is_page_full_zero(page))
++ err = replace_page(vma, page, zero_page, orig_pte);
++ }
++
++out_unlock:
++ unlock_page(page);
++out:
++ return err;
++}
++
++/*
++ * cmp_and_merge_page() - first see if page can be merged into the stable
++ * tree; if not, compare hash to previous and if it's the same, see if page
++ * can be inserted into the unstable tree, or merged with a page already there
++ * and both transferred to the stable tree.
++ *
++ * @page: the page that we are searching identical page to.
++ * @rmap_item: the reverse mapping into the virtual address of this page
++ */
++static void cmp_and_merge_page(struct rmap_item *rmap_item, u32 hash)
++{
++ struct rmap_item *tree_rmap_item;
++ struct page *page;
++ struct page *kpage = NULL;
++ u32 hash_max;
++ int err;
++ unsigned int success1, success2;
++ struct stable_node *snode;
++ int cmp;
++ struct rb_node *parent = NULL, **new;
++
++ remove_rmap_item_from_tree(rmap_item);
++ page = rmap_item->page;
++
++ /* We first start with searching the page inside the stable tree */
++ kpage = stable_tree_search(rmap_item, hash);
++ if (kpage) {
++ err = try_to_merge_with_uksm_page(rmap_item, kpage,
++ hash);
++ if (!err) {
++ /*
++ * The page was successfully merged, add
++ * its rmap_item to the stable tree.
++ * page lock is needed because it's
++ * racing with try_to_unmap_ksm(), etc.
++ */
++ lock_page(kpage);
++ snode = page_stable_node(kpage);
++ stable_tree_append(rmap_item, snode, 1);
++ unlock_page(kpage);
++ put_page(kpage);
++ return; /* success */
++ }
++ put_page(kpage);
++
++ /*
++ * if it's a collision and it has been search in sub-rbtree
++ * (hash_max != 0), we want to abort, because if it is
++ * successfully merged in unstable tree, the collision trends to
++ * happen again.
++ */
++ if (err == MERGE_ERR_COLLI && rmap_item->hash_max)
++ return;
++ }
++
++ tree_rmap_item =
++ unstable_tree_search_insert(rmap_item, hash);
++ if (tree_rmap_item) {
++ err = try_to_merge_two_pages(rmap_item, tree_rmap_item, hash);
++ /*
++ * As soon as we merge this page, we want to remove the
++ * rmap_item of the page we have merged with from the unstable
++ * tree, and insert it instead as new node in the stable tree.
++ */
++ if (!err) {
++ kpage = page;
++ remove_rmap_item_from_tree(tree_rmap_item);
++ lock_page(kpage);
++ snode = stable_tree_insert(&kpage, hash,
++ rmap_item, tree_rmap_item,
++ &success1, &success2);
++
++ /*
++ * Do not log dedup for tree item, it's not counted as
++ * scanned in this round.
++ */
++ if (success2)
++ stable_tree_append(tree_rmap_item, snode, 0);
++
++ /*
++ * The order of these two stable append is important:
++ * we are scanning rmap_item.
++ */
++ if (success1)
++ stable_tree_append(rmap_item, snode, 1);
++
++ /*
++ * The original kpage may be unlocked inside
++ * stable_tree_insert() already. This page
++ * should be unlocked before doing
++ * break_cow().
++ */
++ unlock_page(kpage);
++
++ if (!success1)
++ break_cow(rmap_item);
++
++ if (!success2)
++ break_cow(tree_rmap_item);
++
++ } else if (err == MERGE_ERR_COLLI) {
++ BUG_ON(tree_rmap_item->tree_node->count > 1);
++
++ rmap_item_hash_max(tree_rmap_item,
++ tree_rmap_item->tree_node->hash);
++
++ hash_max = rmap_item_hash_max(rmap_item, hash);
++ cmp = hash_cmp(hash_max, tree_rmap_item->hash_max);
++ parent = &tree_rmap_item->node;
++ if (cmp < 0)
++ new = &parent->rb_left;
++ else if (cmp > 0)
++ new = &parent->rb_right;
++ else
++ goto put_up_out;
++
++ rmap_item->tree_node = tree_rmap_item->tree_node;
++ rmap_item->address |= UNSTABLE_FLAG;
++ rmap_item->hash_round = uksm_hash_round;
++ rb_link_node(&rmap_item->node, parent, new);
++ rb_insert_color(&rmap_item->node,
++ &tree_rmap_item->tree_node->sub_root);
++ rmap_item->tree_node->count++;
++ } else {
++ /*
++ * either one of the page has changed or they collide
++ * at the max hash, we consider them as ill items.
++ */
++ remove_rmap_item_from_tree(tree_rmap_item);
++ }
++put_up_out:
++ put_page(tree_rmap_item->page);
++ mmap_read_unlock(tree_rmap_item->slot->vma->vm_mm);
++ }
++}
++
++
++
++
++static inline unsigned long get_pool_index(struct vma_slot *slot,
++ unsigned long index)
++{
++ unsigned long pool_index;
++
++ pool_index = (sizeof(struct rmap_list_entry *) * index) >> PAGE_SHIFT;
++ if (pool_index >= slot->pool_size)
++ BUG();
++ return pool_index;
++}
++
++static inline unsigned long index_page_offset(unsigned long index)
++{
++ return offset_in_page(sizeof(struct rmap_list_entry *) * index);
++}
++
++static inline
++struct rmap_list_entry *get_rmap_list_entry(struct vma_slot *slot,
++ unsigned long index, int need_alloc)
++{
++ unsigned long pool_index;
++ struct page *page;
++ void *addr;
++
++
++ pool_index = get_pool_index(slot, index);
++ if (!slot->rmap_list_pool[pool_index]) {
++ if (!need_alloc)
++ return NULL;
++
++ page = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
++ if (!page)
++ return NULL;
++
++ slot->rmap_list_pool[pool_index] = page;
++ }
++
++ addr = kmap(slot->rmap_list_pool[pool_index]);
++ addr += index_page_offset(index);
++
++ return addr;
++}
++
++static inline void put_rmap_list_entry(struct vma_slot *slot,
++ unsigned long index)
++{
++ unsigned long pool_index;
++
++ pool_index = get_pool_index(slot, index);
++ BUG_ON(!slot->rmap_list_pool[pool_index]);
++ kunmap(slot->rmap_list_pool[pool_index]);
++}
++
++static inline int entry_is_new(struct rmap_list_entry *entry)
++{
++ return !entry->item;
++}
++
++static inline unsigned long get_index_orig_addr(struct vma_slot *slot,
++ unsigned long index)
++{
++ return slot->vma->vm_start + (index << PAGE_SHIFT);
++}
++
++static inline unsigned long get_entry_address(struct rmap_list_entry *entry)
++{
++ unsigned long addr;
++
++ if (is_addr(entry->addr))
++ addr = get_clean_addr(entry->addr);
++ else if (entry->item)
++ addr = get_rmap_addr(entry->item);
++ else
++ BUG();
++
++ return addr;
++}
++
++static inline struct rmap_item *get_entry_item(struct rmap_list_entry *entry)
++{
++ if (is_addr(entry->addr))
++ return NULL;
++
++ return entry->item;
++}
++
++static inline void inc_rmap_list_pool_count(struct vma_slot *slot,
++ unsigned long index)
++{
++ unsigned long pool_index;
++
++ pool_index = get_pool_index(slot, index);
++ BUG_ON(!slot->rmap_list_pool[pool_index]);
++ slot->pool_counts[pool_index]++;
++}
++
++static inline void dec_rmap_list_pool_count(struct vma_slot *slot,
++ unsigned long index)
++{
++ unsigned long pool_index;
++
++ pool_index = get_pool_index(slot, index);
++ BUG_ON(!slot->rmap_list_pool[pool_index]);
++ BUG_ON(!slot->pool_counts[pool_index]);
++ slot->pool_counts[pool_index]--;
++}
++
++static inline int entry_has_rmap(struct rmap_list_entry *entry)
++{
++ return !is_addr(entry->addr) && entry->item;
++}
++
++static inline void swap_entries(struct rmap_list_entry *entry1,
++ unsigned long index1,
++ struct rmap_list_entry *entry2,
++ unsigned long index2)
++{
++ struct rmap_list_entry tmp;
++
++ /* swapping two new entries is meaningless */
++ BUG_ON(entry_is_new(entry1) && entry_is_new(entry2));
++
++ tmp = *entry1;
++ *entry1 = *entry2;
++ *entry2 = tmp;
++
++ if (entry_has_rmap(entry1))
++ entry1->item->entry_index = index1;
++
++ if (entry_has_rmap(entry2))
++ entry2->item->entry_index = index2;
++
++ if (entry_has_rmap(entry1) && !entry_has_rmap(entry2)) {
++ inc_rmap_list_pool_count(entry1->item->slot, index1);
++ dec_rmap_list_pool_count(entry1->item->slot, index2);
++ } else if (!entry_has_rmap(entry1) && entry_has_rmap(entry2)) {
++ inc_rmap_list_pool_count(entry2->item->slot, index2);
++ dec_rmap_list_pool_count(entry2->item->slot, index1);
++ }
++}
++
++static inline void free_entry_item(struct rmap_list_entry *entry)
++{
++ unsigned long index;
++ struct rmap_item *item;
++
++ if (!is_addr(entry->addr)) {
++ BUG_ON(!entry->item);
++ item = entry->item;
++ entry->addr = get_rmap_addr(item);
++ set_is_addr(entry->addr);
++ index = item->entry_index;
++ remove_rmap_item_from_tree(item);
++ dec_rmap_list_pool_count(item->slot, index);
++ free_rmap_item(item);
++ }
++}
++
++static inline int pool_entry_boundary(unsigned long index)
++{
++ unsigned long linear_addr;
++
++ linear_addr = sizeof(struct rmap_list_entry *) * index;
++ return index && !offset_in_page(linear_addr);
++}
++
++static inline void try_free_last_pool(struct vma_slot *slot,
++ unsigned long index)
++{
++ unsigned long pool_index;
++
++ pool_index = get_pool_index(slot, index);
++ if (slot->rmap_list_pool[pool_index] &&
++ !slot->pool_counts[pool_index]) {
++ __free_page(slot->rmap_list_pool[pool_index]);
++ slot->rmap_list_pool[pool_index] = NULL;
++ slot->flags |= UKSM_SLOT_NEED_SORT;
++ }
++
++}
++
++static inline unsigned long vma_item_index(struct vm_area_struct *vma,
++ struct rmap_item *item)
++{
++ return (get_rmap_addr(item) - vma->vm_start) >> PAGE_SHIFT;
++}
++
++static int within_same_pool(struct vma_slot *slot,
++ unsigned long i, unsigned long j)
++{
++ unsigned long pool_i, pool_j;
++
++ pool_i = get_pool_index(slot, i);
++ pool_j = get_pool_index(slot, j);
++
++ return (pool_i == pool_j);
++}
++
++static void sort_rmap_entry_list(struct vma_slot *slot)
++{
++ unsigned long i, j;
++ struct rmap_list_entry *entry, *swap_entry;
++
++ entry = get_rmap_list_entry(slot, 0, 0);
++ for (i = 0; i < slot->pages; ) {
++
++ if (!entry)
++ goto skip_whole_pool;
++
++ if (entry_is_new(entry))
++ goto next_entry;
++
++ if (is_addr(entry->addr)) {
++ entry->addr = 0;
++ goto next_entry;
++ }
++
++ j = vma_item_index(slot->vma, entry->item);
++ if (j == i)
++ goto next_entry;
++
++ if (within_same_pool(slot, i, j))
++ swap_entry = entry + j - i;
++ else
++ swap_entry = get_rmap_list_entry(slot, j, 1);
++
++ swap_entries(entry, i, swap_entry, j);
++ if (!within_same_pool(slot, i, j))
++ put_rmap_list_entry(slot, j);
++ continue;
++
++skip_whole_pool:
++ i += PAGE_SIZE / sizeof(*entry);
++ if (i < slot->pages)
++ entry = get_rmap_list_entry(slot, i, 0);
++ continue;
++
++next_entry:
++ if (i >= slot->pages - 1 ||
++ !within_same_pool(slot, i, i + 1)) {
++ put_rmap_list_entry(slot, i);
++ if (i + 1 < slot->pages)
++ entry = get_rmap_list_entry(slot, i + 1, 0);
++ } else
++ entry++;
++ i++;
++ continue;
++ }
++
++ /* free empty pool entries which contain no rmap_item */
++ /* CAN be simplied to based on only pool_counts when bug freed !!!!! */
++ for (i = 0; i < slot->pool_size; i++) {
++ unsigned char has_rmap;
++ void *addr;
++
++ if (!slot->rmap_list_pool[i])
++ continue;
++
++ has_rmap = 0;
++ addr = kmap(slot->rmap_list_pool[i]);
++ BUG_ON(!addr);
++ for (j = 0; j < PAGE_SIZE / sizeof(*entry); j++) {
++ entry = (struct rmap_list_entry *)addr + j;
++ if (is_addr(entry->addr))
++ continue;
++ if (!entry->item)
++ continue;
++ has_rmap = 1;
++ }
++ kunmap(slot->rmap_list_pool[i]);
++ if (!has_rmap) {
++ BUG_ON(slot->pool_counts[i]);
++ __free_page(slot->rmap_list_pool[i]);
++ slot->rmap_list_pool[i] = NULL;
++ }
++ }
++
++ slot->flags &= ~UKSM_SLOT_NEED_SORT;
++}
++
++/*
++ * vma_fully_scanned() - if all the pages in this slot have been scanned.
++ */
++static inline int vma_fully_scanned(struct vma_slot *slot)
++{
++ return slot->pages_scanned == slot->pages;
++}
++
++/**
++ * get_next_rmap_item() - Get the next rmap_item in a vma_slot according to
++ * its random permutation. This function is embedded with the random
++ * permutation index management code.
++ */
++static struct rmap_item *get_next_rmap_item(struct vma_slot *slot, u32 *hash)
++{
++ unsigned long rand_range, addr, swap_index, scan_index;
++ struct rmap_item *item = NULL;
++ struct rmap_list_entry *scan_entry, *swap_entry = NULL;
++ struct page *page;
++
++ scan_index = swap_index = slot->pages_scanned % slot->pages;
++
++ if (pool_entry_boundary(scan_index))
++ try_free_last_pool(slot, scan_index - 1);
++
++ if (vma_fully_scanned(slot)) {
++ if (slot->flags & UKSM_SLOT_NEED_SORT)
++ slot->flags |= UKSM_SLOT_NEED_RERAND;
++ else
++ slot->flags &= ~UKSM_SLOT_NEED_RERAND;
++ if (slot->flags & UKSM_SLOT_NEED_SORT)
++ sort_rmap_entry_list(slot);
++ }
++
++ scan_entry = get_rmap_list_entry(slot, scan_index, 1);
++ if (!scan_entry)
++ return NULL;
++
++ if (entry_is_new(scan_entry)) {
++ scan_entry->addr = get_index_orig_addr(slot, scan_index);
++ set_is_addr(scan_entry->addr);
++ }
++
++ if (slot->flags & UKSM_SLOT_NEED_RERAND) {
++ rand_range = slot->pages - scan_index;
++ BUG_ON(!rand_range);
++ swap_index = scan_index + (prandom_u32() % rand_range);
++ }
++
++ if (swap_index != scan_index) {
++ swap_entry = get_rmap_list_entry(slot, swap_index, 1);
++
++ if (!swap_entry)
++ return NULL;
++
++ if (entry_is_new(swap_entry)) {
++ swap_entry->addr = get_index_orig_addr(slot,
++ swap_index);
++ set_is_addr(swap_entry->addr);
++ }
++ swap_entries(scan_entry, scan_index, swap_entry, swap_index);
++ }
++
++ addr = get_entry_address(scan_entry);
++ item = get_entry_item(scan_entry);
++ BUG_ON(addr > slot->vma->vm_end || addr < slot->vma->vm_start);
++
++ page = follow_page(slot->vma, addr, FOLL_GET);
++ if (IS_ERR_OR_NULL(page))
++ goto nopage;
++
++ if (!PageAnon(page))
++ goto putpage;
++
++ /*check is zero_page pfn or uksm_zero_page*/
++ if ((page_to_pfn(page) == zero_pfn)
++ || (page_to_pfn(page) == uksm_zero_pfn))
++ goto putpage;
++
++ flush_anon_page(slot->vma, page, addr);
++ flush_dcache_page(page);
++
++
++ *hash = page_hash(page, hash_strength, 1);
++ inc_uksm_pages_scanned();
++ /*if the page content all zero, re-map to zero-page*/
++ if (find_zero_page_hash(hash_strength, *hash)) {
++ if (!cmp_and_merge_zero_page(slot->vma, page)) {
++ slot->pages_merged++;
++
++ /* For full-zero pages, no need to create rmap item */
++ goto putpage;
++ } else {
++ inc_rshash_neg(memcmp_cost / 2);
++ }
++ }
++
++ if (!item) {
++ item = alloc_rmap_item();
++ if (item) {
++ /* It has already been zeroed */
++ item->slot = slot;
++ item->address = addr;
++ item->entry_index = scan_index;
++ scan_entry->item = item;
++ inc_rmap_list_pool_count(slot, scan_index);
++ } else
++ goto putpage;
++ }
++
++ BUG_ON(item->slot != slot);
++ /* the page may have changed */
++ item->page = page;
++ put_rmap_list_entry(slot, scan_index);
++ if (swap_entry)
++ put_rmap_list_entry(slot, swap_index);
++ return item;
++
++putpage:
++ put_page(page);
++ page = NULL;
++nopage:
++ /* no page, store addr back and free rmap_item if possible */
++ free_entry_item(scan_entry);
++ put_rmap_list_entry(slot, scan_index);
++ if (swap_entry)
++ put_rmap_list_entry(slot, swap_index);
++ return NULL;
++}
++
++static inline int in_stable_tree(struct rmap_item *rmap_item)
++{
++ return rmap_item->address & STABLE_FLAG;
++}
++
++/**
++ * scan_vma_one_page() - scan the next page in a vma_slot. Called with
++ * mmap_sem locked.
++ */
++static noinline void scan_vma_one_page(struct vma_slot *slot)
++{
++ u32 hash;
++ struct mm_struct *mm;
++ struct rmap_item *rmap_item = NULL;
++ struct vm_area_struct *vma = slot->vma;
++
++ mm = vma->vm_mm;
++ BUG_ON(!mm);
++ BUG_ON(!slot);
++
++ rmap_item = get_next_rmap_item(slot, &hash);
++ if (!rmap_item)
++ goto out1;
++
++ if (PageKsm(rmap_item->page) && in_stable_tree(rmap_item))
++ goto out2;
++
++ cmp_and_merge_page(rmap_item, hash);
++out2:
++ put_page(rmap_item->page);
++out1:
++ slot->pages_scanned++;
++ slot->this_sampled++;
++ if (slot->fully_scanned_round != fully_scanned_round)
++ scanned_virtual_pages++;
++
++ if (vma_fully_scanned(slot))
++ slot->fully_scanned_round = fully_scanned_round;
++}
++
++static inline unsigned long rung_get_pages(struct scan_rung *rung)
++{
++ struct slot_tree_node *node;
++
++ if (!rung->vma_root.rnode)
++ return 0;
++
++ node = container_of(rung->vma_root.rnode, struct slot_tree_node, snode);
++
++ return node->size;
++}
++
++#define RUNG_SAMPLED_MIN 3
++
++static inline
++void uksm_calc_rung_step(struct scan_rung *rung,
++ unsigned long page_time, unsigned long ratio)
++{
++ unsigned long sampled, pages;
++
++ /* will be fully scanned ? */
++ if (!rung->cover_msecs) {
++ rung->step = 1;
++ return;
++ }
++
++ sampled = rung->cover_msecs * (NSEC_PER_MSEC / TIME_RATIO_SCALE)
++ * ratio / page_time;
++
++ /*
++ * Before we finsish a scan round and expensive per-round jobs,
++ * we need to have a chance to estimate the per page time. So
++ * the sampled number can not be too small.
++ */
++ if (sampled < RUNG_SAMPLED_MIN)
++ sampled = RUNG_SAMPLED_MIN;
++
++ pages = rung_get_pages(rung);
++ if (likely(pages > sampled))
++ rung->step = pages / sampled;
++ else
++ rung->step = 1;
++}
++
++static inline int step_need_recalc(struct scan_rung *rung)
++{
++ unsigned long pages, stepmax;
++
++ pages = rung_get_pages(rung);
++ stepmax = pages / RUNG_SAMPLED_MIN;
++
++ return pages && (rung->step > pages ||
++ (stepmax && rung->step > stepmax));
++}
++
++static inline
++void reset_current_scan(struct scan_rung *rung, int finished, int step_recalc)
++{
++ struct vma_slot *slot;
++
++ if (finished)
++ rung->flags |= UKSM_RUNG_ROUND_FINISHED;
++
++ if (step_recalc || step_need_recalc(rung)) {
++ uksm_calc_rung_step(rung, uksm_ema_page_time, rung->cpu_ratio);
++ BUG_ON(step_need_recalc(rung));
++ }
++
++ slot_iter_index = prandom_u32() % rung->step;
++ BUG_ON(!rung->vma_root.rnode);
++ slot = sradix_tree_next(&rung->vma_root, NULL, 0, slot_iter);
++ BUG_ON(!slot);
++
++ rung->current_scan = slot;
++ rung->current_offset = slot_iter_index;
++}
++
++static inline struct sradix_tree_root *slot_get_root(struct vma_slot *slot)
++{
++ return &slot->rung->vma_root;
++}
++
++/*
++ * return if resetted.
++ */
++static int advance_current_scan(struct scan_rung *rung)
++{
++ unsigned short n;
++ struct vma_slot *slot, *next = NULL;
++
++ BUG_ON(!rung->vma_root.num);
++
++ slot = rung->current_scan;
++ n = (slot->pages - rung->current_offset) % rung->step;
++ slot_iter_index = rung->step - n;
++ next = sradix_tree_next(&rung->vma_root, slot->snode,
++ slot->sindex, slot_iter);
++
++ if (next) {
++ rung->current_offset = slot_iter_index;
++ rung->current_scan = next;
++ return 0;
++ } else {
++ reset_current_scan(rung, 1, 0);
++ return 1;
++ }
++}
++
++static inline void rung_rm_slot(struct vma_slot *slot)
++{
++ struct scan_rung *rung = slot->rung;
++ struct sradix_tree_root *root;
++
++ if (rung->current_scan == slot)
++ advance_current_scan(rung);
++
++ root = slot_get_root(slot);
++ sradix_tree_delete_from_leaf(root, slot->snode, slot->sindex);
++ slot->snode = NULL;
++ if (step_need_recalc(rung)) {
++ uksm_calc_rung_step(rung, uksm_ema_page_time, rung->cpu_ratio);
++ BUG_ON(step_need_recalc(rung));
++ }
++
++ /* In case advance_current_scan loop back to this slot again */
++ if (rung->vma_root.num && rung->current_scan == slot)
++ reset_current_scan(slot->rung, 1, 0);
++}
++
++static inline void rung_add_new_slots(struct scan_rung *rung,
++ struct vma_slot **slots, unsigned long num)
++{
++ int err;
++ struct vma_slot *slot;
++ unsigned long i;
++ struct sradix_tree_root *root = &rung->vma_root;
++
++ err = sradix_tree_enter(root, (void **)slots, num);
++ BUG_ON(err);
++
++ for (i = 0; i < num; i++) {
++ slot = slots[i];
++ slot->rung = rung;
++ BUG_ON(vma_fully_scanned(slot));
++ }
++
++ if (rung->vma_root.num == num)
++ reset_current_scan(rung, 0, 1);
++}
++
++static inline int rung_add_one_slot(struct scan_rung *rung,
++ struct vma_slot *slot)
++{
++ int err;
++
++ err = sradix_tree_enter(&rung->vma_root, (void **)&slot, 1);
++ if (err)
++ return err;
++
++ slot->rung = rung;
++ if (rung->vma_root.num == 1)
++ reset_current_scan(rung, 0, 1);
++
++ return 0;
++}
++
++/*
++ * Return true if the slot is deleted from its rung.
++ */
++static inline int vma_rung_enter(struct vma_slot *slot, struct scan_rung *rung)
++{
++ struct scan_rung *old_rung = slot->rung;
++ int err;
++
++ if (old_rung == rung)
++ return 0;
++
++ rung_rm_slot(slot);
++ err = rung_add_one_slot(rung, slot);
++ if (err) {
++ err = rung_add_one_slot(old_rung, slot);
++ WARN_ON(err); /* OOPS, badly OOM, we lost this slot */
++ }
++
++ return 1;
++}
++
++static inline int vma_rung_up(struct vma_slot *slot)
++{
++ struct scan_rung *rung;
++
++ rung = slot->rung;
++ if (slot->rung != &uksm_scan_ladder[SCAN_LADDER_SIZE-1])
++ rung++;
++
++ return vma_rung_enter(slot, rung);
++}
++
++static inline int vma_rung_down(struct vma_slot *slot)
++{
++ struct scan_rung *rung;
++
++ rung = slot->rung;
++ if (slot->rung != &uksm_scan_ladder[0])
++ rung--;
++
++ return vma_rung_enter(slot, rung);
++}
++
++/**
++ * cal_dedup_ratio() - Calculate the deduplication ratio for this slot.
++ */
++static unsigned long cal_dedup_ratio(struct vma_slot *slot)
++{
++ unsigned long ret;
++ unsigned long pages;
++
++ pages = slot->this_sampled;
++ if (!pages)
++ return 0;
++
++ BUG_ON(slot->pages_scanned == slot->last_scanned);
++
++ ret = slot->pages_merged;
++
++ /* Thrashing area filtering */
++ if (ret && uksm_thrash_threshold) {
++ if (slot->pages_cowed * 100 / slot->pages_merged
++ > uksm_thrash_threshold) {
++ ret = 0;
++ } else {
++ ret = slot->pages_merged - slot->pages_cowed;
++ }
++ }
++
++ return ret * 100 / pages;
++}
++
++/**
++ * cal_dedup_ratio() - Calculate the deduplication ratio for this slot.
++ */
++static unsigned long cal_dedup_ratio_old(struct vma_slot *slot)
++{
++ unsigned long ret;
++ unsigned long pages;
++
++ pages = slot->pages;
++ if (!pages)
++ return 0;
++
++ ret = slot->pages_bemerged;
++
++ /* Thrashing area filtering */
++ if (ret && uksm_thrash_threshold) {
++ if (slot->pages_cowed * 100 / slot->pages_bemerged
++ > uksm_thrash_threshold) {
++ ret = 0;
++ } else {
++ ret = slot->pages_bemerged - slot->pages_cowed;
++ }
++ }
++
++ return ret * 100 / pages;
++}
++
++/**
++ * stable_node_reinsert() - When the hash_strength has been adjusted, the
++ * stable tree need to be restructured, this is the function re-inserting the
++ * stable node.
++ */
++static inline void stable_node_reinsert(struct stable_node *new_node,
++ struct page *page,
++ struct rb_root *root_treep,
++ struct list_head *tree_node_listp,
++ u32 hash)
++{
++ struct rb_node **new = &root_treep->rb_node;
++ struct rb_node *parent = NULL;
++ struct stable_node *stable_node;
++ struct tree_node *tree_node;
++ struct page *tree_page;
++ int cmp;
++
++ while (*new) {
++ int cmp;
++
++ tree_node = rb_entry(*new, struct tree_node, node);
++
++ cmp = hash_cmp(hash, tree_node->hash);
++
++ if (cmp < 0) {
++ parent = *new;
++ new = &parent->rb_left;
++ } else if (cmp > 0) {
++ parent = *new;
++ new = &parent->rb_right;
++ } else
++ break;
++ }
++
++ if (*new) {
++ /* find a stable tree node with same first level hash value */
++ stable_node_hash_max(new_node, page, hash);
++ if (tree_node->count == 1) {
++ stable_node = rb_entry(tree_node->sub_root.rb_node,
++ struct stable_node, node);
++ tree_page = get_uksm_page(stable_node, 1, 0);
++ if (tree_page) {
++ stable_node_hash_max(stable_node,
++ tree_page, hash);
++ put_page(tree_page);
++
++ /* prepare for stable node insertion */
++
++ cmp = hash_cmp(new_node->hash_max,
++ stable_node->hash_max);
++ parent = &stable_node->node;
++ if (cmp < 0)
++ new = &parent->rb_left;
++ else if (cmp > 0)
++ new = &parent->rb_right;
++ else
++ goto failed;
++
++ goto add_node;
++ } else {
++ /* the only stable_node deleted, the tree node
++ * was not deleted.
++ */
++ goto tree_node_reuse;
++ }
++ }
++
++ /* well, search the collision subtree */
++ new = &tree_node->sub_root.rb_node;
++ parent = NULL;
++ BUG_ON(!*new);
++ while (*new) {
++ int cmp;
++
++ stable_node = rb_entry(*new, struct stable_node, node);
++
++ cmp = hash_cmp(new_node->hash_max,
++ stable_node->hash_max);
++
++ if (cmp < 0) {
++ parent = *new;
++ new = &parent->rb_left;
++ } else if (cmp > 0) {
++ parent = *new;
++ new = &parent->rb_right;
++ } else {
++ /* oh, no, still a collision */
++ goto failed;
++ }
++ }
++
++ goto add_node;
++ }
++
++ /* no tree node found */
++ tree_node = alloc_tree_node(tree_node_listp);
++ if (!tree_node) {
++ pr_err("UKSM: memory allocation error!\n");
++ goto failed;
++ } else {
++ tree_node->hash = hash;
++ rb_link_node(&tree_node->node, parent, new);
++ rb_insert_color(&tree_node->node, root_treep);
++
++tree_node_reuse:
++ /* prepare for stable node insertion */
++ parent = NULL;
++ new = &tree_node->sub_root.rb_node;
++ }
++
++add_node:
++ rb_link_node(&new_node->node, parent, new);
++ rb_insert_color(&new_node->node, &tree_node->sub_root);
++ new_node->tree_node = tree_node;
++ tree_node->count++;
++ return;
++
++failed:
++ /* This can only happen when two nodes have collided
++ * in two levels.
++ */
++ new_node->tree_node = NULL;
++ return;
++}
++
++static inline void free_all_tree_nodes(struct list_head *list)
++{
++ struct tree_node *node, *tmp;
++
++ list_for_each_entry_safe(node, tmp, list, all_list) {
++ free_tree_node(node);
++ }
++}
++
++/**
++ * stable_tree_delta_hash() - Delta hash the stable tree from previous hash
++ * strength to the current hash_strength. It re-structures the hole tree.
++ */
++static inline void stable_tree_delta_hash(u32 prev_hash_strength)
++{
++ struct stable_node *node, *tmp;
++ struct rb_root *root_new_treep;
++ struct list_head *new_tree_node_listp;
++
++ stable_tree_index = (stable_tree_index + 1) % 2;
++ root_new_treep = &root_stable_tree[stable_tree_index];
++ new_tree_node_listp = &stable_tree_node_list[stable_tree_index];
++ *root_new_treep = RB_ROOT;
++ BUG_ON(!list_empty(new_tree_node_listp));
++
++ /*
++ * we need to be safe, the node could be removed by get_uksm_page()
++ */
++ list_for_each_entry_safe(node, tmp, &stable_node_list, all_list) {
++ void *addr;
++ struct page *node_page;
++ u32 hash;
++
++ /*
++ * We are completely re-structuring the stable nodes to a new
++ * stable tree. We don't want to touch the old tree unlinks and
++ * old tree_nodes. The old tree_nodes will be freed at once.
++ */
++ node_page = get_uksm_page(node, 0, 0);
++ if (!node_page)
++ continue;
++
++ if (node->tree_node) {
++ hash = node->tree_node->hash;
++
++ addr = kmap_atomic(node_page);
++
++ hash = delta_hash(addr, prev_hash_strength,
++ hash_strength, hash);
++ kunmap_atomic(addr);
++ } else {
++ /*
++ *it was not inserted to rbtree due to collision in last
++ *round scan.
++ */
++ hash = page_hash(node_page, hash_strength, 0);
++ }
++
++ stable_node_reinsert(node, node_page, root_new_treep,
++ new_tree_node_listp, hash);
++ put_page(node_page);
++ }
++
++ root_stable_treep = root_new_treep;
++ free_all_tree_nodes(stable_tree_node_listp);
++ BUG_ON(!list_empty(stable_tree_node_listp));
++ stable_tree_node_listp = new_tree_node_listp;
++}
++
++static inline void inc_hash_strength(unsigned long delta)
++{
++ hash_strength += 1 << delta;
++ if (hash_strength > HASH_STRENGTH_MAX)
++ hash_strength = HASH_STRENGTH_MAX;
++}
++
++static inline void dec_hash_strength(unsigned long delta)
++{
++ unsigned long change = 1 << delta;
++
++ if (hash_strength <= change + 1)
++ hash_strength = 1;
++ else
++ hash_strength -= change;
++}
++
++static inline void inc_hash_strength_delta(void)
++{
++ hash_strength_delta++;
++ if (hash_strength_delta > HASH_STRENGTH_DELTA_MAX)
++ hash_strength_delta = HASH_STRENGTH_DELTA_MAX;
++}
++
++static inline unsigned long get_current_neg_ratio(void)
++{
++ u64 pos = benefit.pos;
++ u64 neg = benefit.neg;
++
++ if (!neg)
++ return 0;
++
++ if (!pos || neg > pos)
++ return 100;
++
++ if (neg > div64_u64(U64_MAX, 100))
++ pos = div64_u64(pos, 100);
++ else
++ neg *= 100;
++
++ return div64_u64(neg, pos);
++}
++
++static inline unsigned long get_current_benefit(void)
++{
++ u64 pos = benefit.pos;
++ u64 neg = benefit.neg;
++ u64 scanned = benefit.scanned;
++
++ if (neg > pos)
++ return 0;
++
++ return div64_u64((pos - neg), scanned);
++}
++
++static inline int judge_rshash_direction(void)
++{
++ u64 current_neg_ratio, stable_benefit;
++ u64 current_benefit, delta = 0;
++ int ret = STILL;
++
++ /*
++ * Try to probe a value after the boot, and in case the system
++ * are still for a long time.
++ */
++ if ((fully_scanned_round & 0xFFULL) == 10) {
++ ret = OBSCURE;
++ goto out;
++ }
++
++ current_neg_ratio = get_current_neg_ratio();
++
++ if (current_neg_ratio == 0) {
++ rshash_neg_cont_zero++;
++ if (rshash_neg_cont_zero > 2)
++ return GO_DOWN;
++ else
++ return STILL;
++ }
++ rshash_neg_cont_zero = 0;
++
++ if (current_neg_ratio > 90) {
++ ret = GO_UP;
++ goto out;
++ }
++
++ current_benefit = get_current_benefit();
++ stable_benefit = rshash_state.stable_benefit;
++
++ if (!stable_benefit) {
++ ret = OBSCURE;
++ goto out;
++ }
++
++ if (current_benefit > stable_benefit)
++ delta = current_benefit - stable_benefit;
++ else if (current_benefit < stable_benefit)
++ delta = stable_benefit - current_benefit;
++
++ delta = div64_u64(100 * delta, stable_benefit);
++
++ if (delta > 50) {
++ rshash_cont_obscure++;
++ if (rshash_cont_obscure > 2)
++ return OBSCURE;
++ else
++ return STILL;
++ }
++
++out:
++ rshash_cont_obscure = 0;
++ return ret;
++}
++
++/**
++ * rshash_adjust() - The main function to control the random sampling state
++ * machine for hash strength adapting.
++ *
++ * return true if hash_strength has changed.
++ */
++static inline int rshash_adjust(void)
++{
++ unsigned long prev_hash_strength = hash_strength;
++
++ if (!encode_benefit())
++ return 0;
++
++ switch (rshash_state.state) {
++ case RSHASH_STILL:
++ switch (judge_rshash_direction()) {
++ case GO_UP:
++ if (rshash_state.pre_direct == GO_DOWN)
++ hash_strength_delta = 0;
++
++ inc_hash_strength(hash_strength_delta);
++ inc_hash_strength_delta();
++ rshash_state.stable_benefit = get_current_benefit();
++ rshash_state.pre_direct = GO_UP;
++ break;
++
++ case GO_DOWN:
++ if (rshash_state.pre_direct == GO_UP)
++ hash_strength_delta = 0;
++
++ dec_hash_strength(hash_strength_delta);
++ inc_hash_strength_delta();
++ rshash_state.stable_benefit = get_current_benefit();
++ rshash_state.pre_direct = GO_DOWN;
++ break;
++
++ case OBSCURE:
++ rshash_state.stable_point = hash_strength;
++ rshash_state.turn_point_down = hash_strength;
++ rshash_state.turn_point_up = hash_strength;
++ rshash_state.turn_benefit_down = get_current_benefit();
++ rshash_state.turn_benefit_up = get_current_benefit();
++ rshash_state.lookup_window_index = 0;
++ rshash_state.state = RSHASH_TRYDOWN;
++ dec_hash_strength(hash_strength_delta);
++ inc_hash_strength_delta();
++ break;
++
++ case STILL:
++ break;
++ default:
++ BUG();
++ }
++ break;
++
++ case RSHASH_TRYDOWN:
++ if (rshash_state.lookup_window_index++ % 5 == 0)
++ rshash_state.below_count = 0;
++
++ if (get_current_benefit() < rshash_state.stable_benefit)
++ rshash_state.below_count++;
++ else if (get_current_benefit() >
++ rshash_state.turn_benefit_down) {
++ rshash_state.turn_point_down = hash_strength;
++ rshash_state.turn_benefit_down = get_current_benefit();
++ }
++
++ if (rshash_state.below_count >= 3 ||
++ judge_rshash_direction() == GO_UP ||
++ hash_strength == 1) {
++ hash_strength = rshash_state.stable_point;
++ hash_strength_delta = 0;
++ inc_hash_strength(hash_strength_delta);
++ inc_hash_strength_delta();
++ rshash_state.lookup_window_index = 0;
++ rshash_state.state = RSHASH_TRYUP;
++ hash_strength_delta = 0;
++ } else {
++ dec_hash_strength(hash_strength_delta);
++ inc_hash_strength_delta();
++ }
++ break;
++
++ case RSHASH_TRYUP:
++ if (rshash_state.lookup_window_index++ % 5 == 0)
++ rshash_state.below_count = 0;
++
++ if (get_current_benefit() < rshash_state.turn_benefit_down)
++ rshash_state.below_count++;
++ else if (get_current_benefit() > rshash_state.turn_benefit_up) {
++ rshash_state.turn_point_up = hash_strength;
++ rshash_state.turn_benefit_up = get_current_benefit();
++ }
++
++ if (rshash_state.below_count >= 3 ||
++ judge_rshash_direction() == GO_DOWN ||
++ hash_strength == HASH_STRENGTH_MAX) {
++ hash_strength = rshash_state.turn_benefit_up >
++ rshash_state.turn_benefit_down ?
++ rshash_state.turn_point_up :
++ rshash_state.turn_point_down;
++
++ rshash_state.state = RSHASH_PRE_STILL;
++ } else {
++ inc_hash_strength(hash_strength_delta);
++ inc_hash_strength_delta();
++ }
++
++ break;
++
++ case RSHASH_NEW:
++ case RSHASH_PRE_STILL:
++ rshash_state.stable_benefit = get_current_benefit();
++ rshash_state.state = RSHASH_STILL;
++ hash_strength_delta = 0;
++ break;
++ default:
++ BUG();
++ }
++
++ /* rshash_neg = rshash_pos = 0; */
++ reset_benefit();
++
++ if (prev_hash_strength != hash_strength)
++ stable_tree_delta_hash(prev_hash_strength);
++
++ return prev_hash_strength != hash_strength;
++}
++
++/**
++ * round_update_ladder() - The main function to do update of all the
++ * adjustments whenever a scan round is finished.
++ */
++static noinline void round_update_ladder(void)
++{
++ int i;
++ unsigned long dedup;
++ struct vma_slot *slot, *tmp_slot;
++
++ for (i = 0; i < SCAN_LADDER_SIZE; i++)
++ uksm_scan_ladder[i].flags &= ~UKSM_RUNG_ROUND_FINISHED;
++
++ list_for_each_entry_safe(slot, tmp_slot, &vma_slot_dedup, dedup_list) {
++
++ /* slot may be rung_rm_slot() when mm exits */
++ if (slot->snode) {
++ dedup = cal_dedup_ratio_old(slot);
++ if (dedup && dedup >= uksm_abundant_threshold)
++ vma_rung_up(slot);
++ }
++
++ slot->pages_bemerged = 0;
++ slot->pages_cowed = 0;
++
++ list_del_init(&slot->dedup_list);
++ }
++}
++
++static void uksm_del_vma_slot(struct vma_slot *slot)
++{
++ int i, j;
++ struct rmap_list_entry *entry;
++
++ if (slot->snode) {
++ /*
++ * In case it just failed when entering the rung, it's not
++ * necessary.
++ */
++ rung_rm_slot(slot);
++ }
++
++ if (!list_empty(&slot->dedup_list))
++ list_del(&slot->dedup_list);
++
++ if (!slot->rmap_list_pool || !slot->pool_counts) {
++ /* In case it OOMed in uksm_vma_enter() */
++ goto out;
++ }
++
++ for (i = 0; i < slot->pool_size; i++) {
++ void *addr;
++
++ if (!slot->rmap_list_pool[i])
++ continue;
++
++ addr = kmap(slot->rmap_list_pool[i]);
++ for (j = 0; j < PAGE_SIZE / sizeof(*entry); j++) {
++ entry = (struct rmap_list_entry *)addr + j;
++ if (is_addr(entry->addr))
++ continue;
++ if (!entry->item)
++ continue;
++
++ remove_rmap_item_from_tree(entry->item);
++ free_rmap_item(entry->item);
++ slot->pool_counts[i]--;
++ }
++ BUG_ON(slot->pool_counts[i]);
++ kunmap(slot->rmap_list_pool[i]);
++ __free_page(slot->rmap_list_pool[i]);
++ }
++ kfree(slot->rmap_list_pool);
++ kfree(slot->pool_counts);
++
++out:
++ slot->rung = NULL;
++ if (slot->flags & UKSM_SLOT_IN_UKSM) {
++ BUG_ON(uksm_pages_total < slot->pages);
++ uksm_pages_total -= slot->pages;
++ }
++
++ if (slot->fully_scanned_round == fully_scanned_round)
++ scanned_virtual_pages -= slot->pages;
++ else
++ scanned_virtual_pages -= slot->pages_scanned;
++ free_vma_slot(slot);
++}
++
++
++#define SPIN_LOCK_PERIOD 32
++static struct vma_slot *cleanup_slots[SPIN_LOCK_PERIOD];
++static inline void cleanup_vma_slots(void)
++{
++ struct vma_slot *slot;
++ int i;
++
++ i = 0;
++ spin_lock(&vma_slot_list_lock);
++ while (!list_empty(&vma_slot_del)) {
++ slot = list_entry(vma_slot_del.next,
++ struct vma_slot, slot_list);
++ list_del(&slot->slot_list);
++ cleanup_slots[i++] = slot;
++ if (i == SPIN_LOCK_PERIOD) {
++ spin_unlock(&vma_slot_list_lock);
++ while (--i >= 0)
++ uksm_del_vma_slot(cleanup_slots[i]);
++ i = 0;
++ spin_lock(&vma_slot_list_lock);
++ }
++ }
++ spin_unlock(&vma_slot_list_lock);
++
++ while (--i >= 0)
++ uksm_del_vma_slot(cleanup_slots[i]);
++}
++
++/*
++ * Expotional moving average formula
++ */
++static inline unsigned long ema(unsigned long curr, unsigned long last_ema)
++{
++ /*
++ * For a very high burst, even the ema cannot work well, a false very
++ * high per-page time estimation can result in feedback in very high
++ * overhead of context switch and rung update -- this will then lead
++ * to higher per-paper time, this may not converge.
++ *
++ * Instead, we try to approach this value in a binary manner.
++ */
++ if (curr > last_ema * 10)
++ return last_ema * 2;
++
++ return (EMA_ALPHA * curr + (100 - EMA_ALPHA) * last_ema) / 100;
++}
++
++/*
++ * convert cpu ratio in 1/TIME_RATIO_SCALE configured by user to
++ * nanoseconds based on current uksm_sleep_jiffies.
++ */
++static inline unsigned long cpu_ratio_to_nsec(unsigned int ratio)
++{
++ return NSEC_PER_USEC * jiffies_to_usecs(uksm_sleep_jiffies) /
++ (TIME_RATIO_SCALE - ratio) * ratio;
++}
++
++
++static inline unsigned long rung_real_ratio(int cpu_time_ratio)
++{
++ unsigned long ret;
++
++ BUG_ON(!cpu_time_ratio);
++
++ if (cpu_time_ratio > 0)
++ ret = cpu_time_ratio;
++ else
++ ret = (unsigned long)(-cpu_time_ratio) *
++ uksm_max_cpu_percentage / 100UL;
++
++ return ret ? ret : 1;
++}
++
++static noinline void uksm_calc_scan_pages(void)
++{
++ struct scan_rung *ladder = uksm_scan_ladder;
++ unsigned long sleep_usecs, nsecs;
++ unsigned long ratio;
++ int i;
++ unsigned long per_page;
++
++ if (uksm_ema_page_time > 100000 ||
++ (((unsigned long) uksm_eval_round & (256UL - 1)) == 0UL))
++ uksm_ema_page_time = UKSM_PAGE_TIME_DEFAULT;
++
++ per_page = uksm_ema_page_time;
++ BUG_ON(!per_page);
++
++ /*
++ * For every 8 eval round, we try to probe a uksm_sleep_jiffies value
++ * based on saved user input.
++ */
++ if (((unsigned long) uksm_eval_round & (8UL - 1)) == 0UL)
++ uksm_sleep_jiffies = uksm_sleep_saved;
++
++ /* We require a rung scan at least 1 page in a period. */
++ nsecs = per_page;
++ ratio = rung_real_ratio(ladder[0].cpu_ratio);
++ if (cpu_ratio_to_nsec(ratio) < nsecs) {
++ sleep_usecs = nsecs * (TIME_RATIO_SCALE - ratio) / ratio
++ / NSEC_PER_USEC;
++ uksm_sleep_jiffies = usecs_to_jiffies(sleep_usecs) + 1;
++ }
++
++ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
++ ratio = rung_real_ratio(ladder[i].cpu_ratio);
++ ladder[i].pages_to_scan = cpu_ratio_to_nsec(ratio) /
++ per_page;
++ BUG_ON(!ladder[i].pages_to_scan);
++ uksm_calc_rung_step(&ladder[i], per_page, ratio);
++ }
++}
++
++/*
++ * From the scan time of this round (ns) to next expected min sleep time
++ * (ms), be careful of the possible overflows. ratio is taken from
++ * rung_real_ratio()
++ */
++static inline
++unsigned int scan_time_to_sleep(unsigned long long scan_time, unsigned long ratio)
++{
++ scan_time >>= 20; /* to msec level now */
++ BUG_ON(scan_time > (ULONG_MAX / TIME_RATIO_SCALE));
++
++ return (unsigned int) ((unsigned long) scan_time *
++ (TIME_RATIO_SCALE - ratio) / ratio);
++}
++
++#define __round_mask(x, y) ((__typeof__(x))((y)-1))
++#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
++
++static void uksm_vma_enter(struct vma_slot **slots, unsigned long num)
++{
++ struct scan_rung *rung;
++
++ rung = &uksm_scan_ladder[0];
++ rung_add_new_slots(rung, slots, num);
++}
++
++static struct vma_slot *batch_slots[SLOT_TREE_NODE_STORE_SIZE];
++
++static void uksm_enter_all_slots(void)
++{
++ struct vma_slot *slot;
++ unsigned long index;
++ struct list_head empty_vma_list;
++ int i;
++
++ i = 0;
++ index = 0;
++ INIT_LIST_HEAD(&empty_vma_list);
++
++ spin_lock(&vma_slot_list_lock);
++ while (!list_empty(&vma_slot_new)) {
++ slot = list_entry(vma_slot_new.next,
++ struct vma_slot, slot_list);
++
++ if (!slot->vma->anon_vma) {
++ list_move(&slot->slot_list, &empty_vma_list);
++ } else if (vma_can_enter(slot->vma)) {
++ batch_slots[index++] = slot;
++ list_del_init(&slot->slot_list);
++ } else {
++ list_move(&slot->slot_list, &vma_slot_noadd);
++ }
++
++ if (++i == SPIN_LOCK_PERIOD ||
++ (index && !(index % SLOT_TREE_NODE_STORE_SIZE))) {
++ spin_unlock(&vma_slot_list_lock);
++
++ if (index && !(index % SLOT_TREE_NODE_STORE_SIZE)) {
++ uksm_vma_enter(batch_slots, index);
++ index = 0;
++ }
++ i = 0;
++ cond_resched();
++ spin_lock(&vma_slot_list_lock);
++ }
++ }
++
++ list_splice(&empty_vma_list, &vma_slot_new);
++
++ spin_unlock(&vma_slot_list_lock);
++
++ if (index)
++ uksm_vma_enter(batch_slots, index);
++
++}
++
++static inline int rung_round_finished(struct scan_rung *rung)
++{
++ return rung->flags & UKSM_RUNG_ROUND_FINISHED;
++}
++
++static inline void judge_slot(struct vma_slot *slot)
++{
++ struct scan_rung *rung = slot->rung;
++ unsigned long dedup;
++ int deleted;
++
++ dedup = cal_dedup_ratio(slot);
++ if (vma_fully_scanned(slot) && uksm_thrash_threshold)
++ deleted = vma_rung_enter(slot, &uksm_scan_ladder[0]);
++ else if (dedup && dedup >= uksm_abundant_threshold)
++ deleted = vma_rung_up(slot);
++ else
++ deleted = vma_rung_down(slot);
++
++ slot->pages_merged = 0;
++ slot->pages_cowed = 0;
++ slot->this_sampled = 0;
++
++ if (vma_fully_scanned(slot))
++ slot->pages_scanned = 0;
++
++ slot->last_scanned = slot->pages_scanned;
++
++ /* If its deleted in above, then rung was already advanced. */
++ if (!deleted)
++ advance_current_scan(rung);
++}
++
++
++static inline int hash_round_finished(void)
++{
++ if (scanned_virtual_pages > (uksm_pages_total >> 2)) {
++ scanned_virtual_pages = 0;
++ if (uksm_pages_scanned)
++ fully_scanned_round++;
++
++ return 1;
++ } else {
++ return 0;
++ }
++}
++
++#define UKSM_MMSEM_BATCH 5
++#define BUSY_RETRY 100
++
++/**
++ * uksm_do_scan() - the main worker function.
++ */
++static noinline void uksm_do_scan(void)
++{
++ struct vma_slot *slot, *iter;
++ struct mm_struct *busy_mm;
++ unsigned char round_finished, all_rungs_emtpy;
++ int i, err, mmsem_batch;
++ unsigned long pcost;
++ long long delta_exec;
++ unsigned long vpages, max_cpu_ratio;
++ unsigned long long start_time, end_time, scan_time;
++ unsigned int expected_jiffies;
++
++ might_sleep();
++
++ vpages = 0;
++
++ start_time = task_sched_runtime(current);
++ max_cpu_ratio = 0;
++ mmsem_batch = 0;
++
++ for (i = 0; i < SCAN_LADDER_SIZE;) {
++ struct scan_rung *rung = &uksm_scan_ladder[i];
++ unsigned long ratio;
++ int busy_retry;
++
++ if (!rung->pages_to_scan) {
++ i++;
++ continue;
++ }
++
++ if (!rung->vma_root.num) {
++ rung->pages_to_scan = 0;
++ i++;
++ continue;
++ }
++
++ ratio = rung_real_ratio(rung->cpu_ratio);
++ if (ratio > max_cpu_ratio)
++ max_cpu_ratio = ratio;
++
++ busy_retry = BUSY_RETRY;
++ /*
++ * Do not consider rung_round_finished() here, just used up the
++ * rung->pages_to_scan quota.
++ */
++ while (rung->pages_to_scan && rung->vma_root.num &&
++ likely(!freezing(current))) {
++ int reset = 0;
++
++ slot = rung->current_scan;
++
++ BUG_ON(vma_fully_scanned(slot));
++
++ if (mmsem_batch)
++ err = 0;
++ else
++ err = try_down_read_slot_mmap_sem(slot);
++
++ if (err == -ENOENT) {
++rm_slot:
++ rung_rm_slot(slot);
++ continue;
++ }
++
++ busy_mm = slot->mm;
++
++ if (err == -EBUSY) {
++ /* skip other vmas on the same mm */
++ do {
++ reset = advance_current_scan(rung);
++ iter = rung->current_scan;
++ busy_retry--;
++ if (iter->vma->vm_mm != busy_mm ||
++ !busy_retry || reset)
++ break;
++ } while (1);
++
++ if (iter->vma->vm_mm != busy_mm) {
++ continue;
++ } else {
++ /* scan round finsished */
++ break;
++ }
++ }
++
++ BUG_ON(!vma_can_enter(slot->vma));
++ if (uksm_test_exit(slot->vma->vm_mm)) {
++ mmsem_batch = 0;
++ mmap_read_unlock(slot->vma->vm_mm);
++ goto rm_slot;
++ }
++
++ if (mmsem_batch)
++ mmsem_batch--;
++ else
++ mmsem_batch = UKSM_MMSEM_BATCH;
++
++ /* Ok, we have take the mmap_sem, ready to scan */
++ scan_vma_one_page(slot);
++ rung->pages_to_scan--;
++ vpages++;
++
++ if (rung->current_offset + rung->step > slot->pages - 1
++ || vma_fully_scanned(slot)) {
++ mmap_read_unlock(slot->vma->vm_mm);
++ judge_slot(slot);
++ mmsem_batch = 0;
++ } else {
++ rung->current_offset += rung->step;
++ if (!mmsem_batch)
++ mmap_read_unlock(slot->vma->vm_mm);
++ }
++
++ busy_retry = BUSY_RETRY;
++ cond_resched();
++ }
++
++ if (mmsem_batch) {
++ mmap_read_unlock(slot->vma->vm_mm);
++ mmsem_batch = 0;
++ }
++
++ if (freezing(current))
++ break;
++
++ cond_resched();
++ }
++ end_time = task_sched_runtime(current);
++ delta_exec = end_time - start_time;
++
++ if (freezing(current))
++ return;
++
++ cleanup_vma_slots();
++ uksm_enter_all_slots();
++
++ round_finished = 1;
++ all_rungs_emtpy = 1;
++ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
++ struct scan_rung *rung = &uksm_scan_ladder[i];
++
++ if (rung->vma_root.num) {
++ all_rungs_emtpy = 0;
++ if (!rung_round_finished(rung))
++ round_finished = 0;
++ }
++ }
++
++ if (all_rungs_emtpy)
++ round_finished = 0;
++
++ if (round_finished) {
++ round_update_ladder();
++ uksm_eval_round++;
++
++ if (hash_round_finished() && rshash_adjust()) {
++ /* Reset the unstable root iff hash strength changed */
++ uksm_hash_round++;
++ root_unstable_tree = RB_ROOT;
++ free_all_tree_nodes(&unstable_tree_node_list);
++ }
++
++ /*
++ * A number of pages can hang around indefinitely on per-cpu
++ * pagevecs, raised page count preventing write_protect_page
++ * from merging them. Though it doesn't really matter much,
++ * it is puzzling to see some stuck in pages_volatile until
++ * other activity jostles them out, and they also prevented
++ * LTP's KSM test from succeeding deterministically; so drain
++ * them here (here rather than on entry to uksm_do_scan(),
++ * so we don't IPI too often when pages_to_scan is set low).
++ */
++ lru_add_drain_all();
++ }
++
++
++ if (vpages && delta_exec > 0) {
++ pcost = (unsigned long) delta_exec / vpages;
++ if (likely(uksm_ema_page_time))
++ uksm_ema_page_time = ema(pcost, uksm_ema_page_time);
++ else
++ uksm_ema_page_time = pcost;
++ }
++
++ uksm_calc_scan_pages();
++ uksm_sleep_real = uksm_sleep_jiffies;
++ /* in case of radical cpu bursts, apply the upper bound */
++ end_time = task_sched_runtime(current);
++ if (max_cpu_ratio && end_time > start_time) {
++ scan_time = end_time - start_time;
++ expected_jiffies = msecs_to_jiffies(
++ scan_time_to_sleep(scan_time, max_cpu_ratio));
++
++ if (expected_jiffies > uksm_sleep_real)
++ uksm_sleep_real = expected_jiffies;
++
++ /* We have a 1 second up bound for responsiveness. */
++ if (jiffies_to_msecs(uksm_sleep_real) > MSEC_PER_SEC)
++ uksm_sleep_real = msecs_to_jiffies(1000);
++ }
++
++ return;
++}
++
++static int ksmd_should_run(void)
++{
++ return uksm_run & UKSM_RUN_MERGE;
++}
++
++static int uksm_scan_thread(void *nothing)
++{
++ set_freezable();
++ set_user_nice(current, 5);
++
++ while (!kthread_should_stop()) {
++ mutex_lock(&uksm_thread_mutex);
++ if (ksmd_should_run())
++ uksm_do_scan();
++ mutex_unlock(&uksm_thread_mutex);
++
++ try_to_freeze();
++
++ if (ksmd_should_run()) {
++ schedule_timeout_interruptible(uksm_sleep_real);
++ uksm_sleep_times++;
++ } else {
++ wait_event_freezable(uksm_thread_wait,
++ ksmd_should_run() || kthread_should_stop());
++ }
++ }
++ return 0;
++}
++
++void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
++{
++ struct stable_node *stable_node;
++ struct node_vma *node_vma;
++ struct rmap_item *rmap_item;
++ int search_new_forks = 0;
++ unsigned long address;
++
++ VM_BUG_ON_PAGE(!PageKsm(page), page);
++ VM_BUG_ON_PAGE(!PageLocked(page), page);
++
++ stable_node = page_stable_node(page);
++ if (!stable_node)
++ return;
++again:
++ hlist_for_each_entry(node_vma, &stable_node->hlist, hlist) {
++ hlist_for_each_entry(rmap_item, &node_vma->rmap_hlist, hlist) {
++ struct anon_vma *anon_vma = rmap_item->anon_vma;
++ struct anon_vma_chain *vmac;
++ struct vm_area_struct *vma;
++
++ cond_resched();
++ anon_vma_lock_read(anon_vma);
++ anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
++ 0, ULONG_MAX) {
++ cond_resched();
++ vma = vmac->vma;
++ address = get_rmap_addr(rmap_item);
++
++ if (address < vma->vm_start ||
++ address >= vma->vm_end)
++ continue;
++
++ if ((rmap_item->slot->vma == vma) ==
++ search_new_forks)
++ continue;
++
++ if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
++ continue;
++
++ if (!rwc->rmap_one(page, vma, address, rwc->arg)) {
++ anon_vma_unlock_read(anon_vma);
++ return;
++ }
++
++ if (rwc->done && rwc->done(page)) {
++ anon_vma_unlock_read(anon_vma);
++ return;
++ }
++ }
++ anon_vma_unlock_read(anon_vma);
++ }
++ }
++ if (!search_new_forks++)
++ goto again;
++}
++
++#ifdef CONFIG_MIGRATION
++/* Common ksm interface but may be specific to uksm */
++void ksm_migrate_page(struct page *newpage, struct page *oldpage)
++{
++ struct stable_node *stable_node;
++
++ VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
++ VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
++ VM_BUG_ON(newpage->mapping != oldpage->mapping);
++
++ stable_node = page_stable_node(newpage);
++ if (stable_node) {
++ VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage));
++ stable_node->kpfn = page_to_pfn(newpage);
++ /*
++ * newpage->mapping was set in advance; now we need smp_wmb()
++ * to make sure that the new stable_node->kpfn is visible
++ * to get_ksm_page() before it can see that oldpage->mapping
++ * has gone stale (or that PageSwapCache has been cleared).
++ */
++ smp_wmb();
++ set_page_stable_node(oldpage, NULL);
++ }
++}
++#endif /* CONFIG_MIGRATION */
++
++#ifdef CONFIG_MEMORY_HOTREMOVE
++static struct stable_node *uksm_check_stable_tree(unsigned long start_pfn,
++ unsigned long end_pfn)
++{
++ struct rb_node *node;
++
++ for (node = rb_first(root_stable_treep); node; node = rb_next(node)) {
++ struct stable_node *stable_node;
++
++ stable_node = rb_entry(node, struct stable_node, node);
++ if (stable_node->kpfn >= start_pfn &&
++ stable_node->kpfn < end_pfn)
++ return stable_node;
++ }
++ return NULL;
++}
++
++static int uksm_memory_callback(struct notifier_block *self,
++ unsigned long action, void *arg)
++{
++ struct memory_notify *mn = arg;
++ struct stable_node *stable_node;
++
++ switch (action) {
++ case MEM_GOING_OFFLINE:
++ /*
++ * Keep it very simple for now: just lock out ksmd and
++ * MADV_UNMERGEABLE while any memory is going offline.
++ * mutex_lock_nested() is necessary because lockdep was alarmed
++ * that here we take uksm_thread_mutex inside notifier chain
++ * mutex, and later take notifier chain mutex inside
++ * uksm_thread_mutex to unlock it. But that's safe because both
++ * are inside mem_hotplug_mutex.
++ */
++ mutex_lock_nested(&uksm_thread_mutex, SINGLE_DEPTH_NESTING);
++ break;
++
++ case MEM_OFFLINE:
++ /*
++ * Most of the work is done by page migration; but there might
++ * be a few stable_nodes left over, still pointing to struct
++ * pages which have been offlined: prune those from the tree.
++ */
++ while ((stable_node = uksm_check_stable_tree(mn->start_pfn,
++ mn->start_pfn + mn->nr_pages)) != NULL)
++ remove_node_from_stable_tree(stable_node, 1, 1);
++ /* fallthrough */
++
++ case MEM_CANCEL_OFFLINE:
++ mutex_unlock(&uksm_thread_mutex);
++ break;
++ }
++ return NOTIFY_OK;
++}
++#endif /* CONFIG_MEMORY_HOTREMOVE */
++
++#ifdef CONFIG_SYSFS
++/*
++ * This all compiles without CONFIG_SYSFS, but is a waste of space.
++ */
++
++#define UKSM_ATTR_RO(_name) \
++ static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
++#define UKSM_ATTR(_name) \
++ static struct kobj_attribute _name##_attr = \
++ __ATTR(_name, 0644, _name##_show, _name##_store)
++
++static ssize_t max_cpu_percentage_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return sprintf(buf, "%u\n", uksm_max_cpu_percentage);
++}
++
++static ssize_t max_cpu_percentage_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count)
++{
++ unsigned long max_cpu_percentage;
++ int err;
++
++ err = kstrtoul(buf, 10, &max_cpu_percentage);
++ if (err || max_cpu_percentage > 100)
++ return -EINVAL;
++
++ if (max_cpu_percentage == 100)
++ max_cpu_percentage = 99;
++ else if (max_cpu_percentage < 10)
++ max_cpu_percentage = 10;
++
++ uksm_max_cpu_percentage = max_cpu_percentage;
++
++ return count;
++}
++UKSM_ATTR(max_cpu_percentage);
++
++static ssize_t sleep_millisecs_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return sprintf(buf, "%u\n", jiffies_to_msecs(uksm_sleep_jiffies));
++}
++
++static ssize_t sleep_millisecs_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count)
++{
++ unsigned long msecs;
++ int err;
++
++ err = kstrtoul(buf, 10, &msecs);
++ if (err || msecs > MSEC_PER_SEC)
++ return -EINVAL;
++
++ uksm_sleep_jiffies = msecs_to_jiffies(msecs);
++ uksm_sleep_saved = uksm_sleep_jiffies;
++
++ return count;
++}
++UKSM_ATTR(sleep_millisecs);
++
++
++static ssize_t cpu_governor_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int n = sizeof(uksm_cpu_governor_str) / sizeof(char *);
++ int i;
++
++ buf[0] = '\0';
++ for (i = 0; i < n ; i++) {
++ if (uksm_cpu_governor == i)
++ strcat(buf, "[");
++
++ strcat(buf, uksm_cpu_governor_str[i]);
++
++ if (uksm_cpu_governor == i)
++ strcat(buf, "]");
++
++ strcat(buf, " ");
++ }
++ strcat(buf, "\n");
++
++ return strlen(buf);
++}
++
++static inline void init_performance_values(void)
++{
++ int i;
++ struct scan_rung *rung;
++ struct uksm_cpu_preset_s *preset = uksm_cpu_preset + uksm_cpu_governor;
++
++
++ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
++ rung = uksm_scan_ladder + i;
++ rung->cpu_ratio = preset->cpu_ratio[i];
++ rung->cover_msecs = preset->cover_msecs[i];
++ }
++
++ uksm_max_cpu_percentage = preset->max_cpu;
++}
++
++static ssize_t cpu_governor_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count)
++{
++ int n = sizeof(uksm_cpu_governor_str) / sizeof(char *);
++
++ for (n--; n >= 0 ; n--) {
++ if (!strncmp(buf, uksm_cpu_governor_str[n],
++ strlen(uksm_cpu_governor_str[n])))
++ break;
++ }
++
++ if (n < 0)
++ return -EINVAL;
++ else
++ uksm_cpu_governor = n;
++
++ init_performance_values();
++
++ return count;
++}
++UKSM_ATTR(cpu_governor);
++
++static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
++ char *buf)
++{
++ return sprintf(buf, "%u\n", uksm_run);
++}
++
++static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
++ const char *buf, size_t count)
++{
++ int err;
++ unsigned long flags;
++
++ err = kstrtoul(buf, 10, &flags);
++ if (err || flags > UINT_MAX)
++ return -EINVAL;
++ if (flags > UKSM_RUN_MERGE)
++ return -EINVAL;
++
++ mutex_lock(&uksm_thread_mutex);
++ if (uksm_run != flags)
++ uksm_run = flags;
++ mutex_unlock(&uksm_thread_mutex);
++
++ if (flags & UKSM_RUN_MERGE)
++ wake_up_interruptible(&uksm_thread_wait);
++
++ return count;
++}
++UKSM_ATTR(run);
++
++static ssize_t abundant_threshold_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return sprintf(buf, "%u\n", uksm_abundant_threshold);
++}
++
++static ssize_t abundant_threshold_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count)
++{
++ int err;
++ unsigned long flags;
++
++ err = kstrtoul(buf, 10, &flags);
++ if (err || flags > 99)
++ return -EINVAL;
++
++ uksm_abundant_threshold = flags;
++
++ return count;
++}
++UKSM_ATTR(abundant_threshold);
++
++static ssize_t thrash_threshold_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return sprintf(buf, "%u\n", uksm_thrash_threshold);
++}
++
++static ssize_t thrash_threshold_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count)
++{
++ int err;
++ unsigned long flags;
++
++ err = kstrtoul(buf, 10, &flags);
++ if (err || flags > 99)
++ return -EINVAL;
++
++ uksm_thrash_threshold = flags;
++
++ return count;
++}
++UKSM_ATTR(thrash_threshold);
++
++static ssize_t cpu_ratios_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int i, size;
++ struct scan_rung *rung;
++ char *p = buf;
++
++ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
++ rung = &uksm_scan_ladder[i];
++
++ if (rung->cpu_ratio > 0)
++ size = sprintf(p, "%d ", rung->cpu_ratio);
++ else
++ size = sprintf(p, "MAX/%d ",
++ TIME_RATIO_SCALE / -rung->cpu_ratio);
++
++ p += size;
++ }
++
++ *p++ = '\n';
++ *p = '\0';
++
++ return p - buf;
++}
++
++static ssize_t cpu_ratios_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count)
++{
++ int i, cpuratios[SCAN_LADDER_SIZE], err;
++ unsigned long value;
++ struct scan_rung *rung;
++ char *p, *end = NULL;
++
++ p = kzalloc(count, GFP_KERNEL);
++ if (!p)
++ return -ENOMEM;
++
++ memcpy(p, buf, count);
++
++ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
++ if (i != SCAN_LADDER_SIZE - 1) {
++ end = strchr(p, ' ');
++ if (!end)
++ return -EINVAL;
++
++ *end = '\0';
++ }
++
++ if (strstr(p, "MAX/")) {
++ p = strchr(p, '/') + 1;
++ err = kstrtoul(p, 10, &value);
++ if (err || value > TIME_RATIO_SCALE || !value)
++ return -EINVAL;
++
++ cpuratios[i] = -(int) (TIME_RATIO_SCALE / value);
++ } else {
++ err = kstrtoul(p, 10, &value);
++ if (err || value > TIME_RATIO_SCALE || !value)
++ return -EINVAL;
++
++ cpuratios[i] = value;
++ }
++
++ p = end + 1;
++ }
++
++ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
++ rung = &uksm_scan_ladder[i];
++
++ rung->cpu_ratio = cpuratios[i];
++ }
++
++ return count;
++}
++UKSM_ATTR(cpu_ratios);
++
++static ssize_t eval_intervals_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ int i, size;
++ struct scan_rung *rung;
++ char *p = buf;
++
++ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
++ rung = &uksm_scan_ladder[i];
++ size = sprintf(p, "%u ", rung->cover_msecs);
++ p += size;
++ }
++
++ *p++ = '\n';
++ *p = '\0';
++
++ return p - buf;
++}
++
++static ssize_t eval_intervals_store(struct kobject *kobj,
++ struct kobj_attribute *attr,
++ const char *buf, size_t count)
++{
++ int i, err;
++ unsigned long values[SCAN_LADDER_SIZE];
++ struct scan_rung *rung;
++ char *p, *end = NULL;
++ ssize_t ret = count;
++
++ p = kzalloc(count + 2, GFP_KERNEL);
++ if (!p)
++ return -ENOMEM;
++
++ memcpy(p, buf, count);
++
++ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
++ if (i != SCAN_LADDER_SIZE - 1) {
++ end = strchr(p, ' ');
++ if (!end) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ *end = '\0';
++ }
++
++ err = kstrtoul(p, 10, &values[i]);
++ if (err) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ p = end + 1;
++ }
++
++ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
++ rung = &uksm_scan_ladder[i];
++
++ rung->cover_msecs = values[i];
++ }
++
++out:
++ kfree(p);
++ return ret;
++}
++UKSM_ATTR(eval_intervals);
++
++static ssize_t ema_per_page_time_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return sprintf(buf, "%lu\n", uksm_ema_page_time);
++}
++UKSM_ATTR_RO(ema_per_page_time);
++
++static ssize_t pages_shared_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return sprintf(buf, "%lu\n", uksm_pages_shared);
++}
++UKSM_ATTR_RO(pages_shared);
++
++static ssize_t pages_sharing_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return sprintf(buf, "%lu\n", uksm_pages_sharing);
++}
++UKSM_ATTR_RO(pages_sharing);
++
++static ssize_t pages_unshared_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return sprintf(buf, "%lu\n", uksm_pages_unshared);
++}
++UKSM_ATTR_RO(pages_unshared);
++
++static ssize_t full_scans_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return sprintf(buf, "%llu\n", fully_scanned_round);
++}
++UKSM_ATTR_RO(full_scans);
++
++static ssize_t pages_scanned_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ unsigned long base = 0;
++ u64 delta, ret;
++
++ if (pages_scanned_stored) {
++ base = pages_scanned_base;
++ ret = pages_scanned_stored;
++ delta = uksm_pages_scanned >> base;
++ if (CAN_OVERFLOW_U64(ret, delta)) {
++ ret >>= 1;
++ delta >>= 1;
++ base++;
++ ret += delta;
++ }
++ } else {
++ ret = uksm_pages_scanned;
++ }
++
++ while (ret > ULONG_MAX) {
++ ret >>= 1;
++ base++;
++ }
++
++ if (base)
++ return sprintf(buf, "%lu * 2^%lu\n", (unsigned long)ret, base);
++ else
++ return sprintf(buf, "%lu\n", (unsigned long)ret);
++}
++UKSM_ATTR_RO(pages_scanned);
++
++static ssize_t hash_strength_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return sprintf(buf, "%lu\n", hash_strength);
++}
++UKSM_ATTR_RO(hash_strength);
++
++static ssize_t sleep_times_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return sprintf(buf, "%llu\n", uksm_sleep_times);
++}
++UKSM_ATTR_RO(sleep_times);
++
++
++static struct attribute *uksm_attrs[] = {
++ &max_cpu_percentage_attr.attr,
++ &sleep_millisecs_attr.attr,
++ &cpu_governor_attr.attr,
++ &run_attr.attr,
++ &ema_per_page_time_attr.attr,
++ &pages_shared_attr.attr,
++ &pages_sharing_attr.attr,
++ &pages_unshared_attr.attr,
++ &full_scans_attr.attr,
++ &pages_scanned_attr.attr,
++ &hash_strength_attr.attr,
++ &sleep_times_attr.attr,
++ &thrash_threshold_attr.attr,
++ &abundant_threshold_attr.attr,
++ &cpu_ratios_attr.attr,
++ &eval_intervals_attr.attr,
++ NULL,
++};
++
++static struct attribute_group uksm_attr_group = {
++ .attrs = uksm_attrs,
++ .name = "uksm",
++};
++#endif /* CONFIG_SYSFS */
++
++static inline void init_scan_ladder(void)
++{
++ int i;
++ struct scan_rung *rung;
++
++ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
++ rung = uksm_scan_ladder + i;
++ slot_tree_init_root(&rung->vma_root);
++ }
++
++ init_performance_values();
++ uksm_calc_scan_pages();
++}
++
++static inline int cal_positive_negative_costs(void)
++{
++ struct page *p1, *p2;
++ unsigned char *addr1, *addr2;
++ unsigned long i, time_start, hash_cost;
++ unsigned long loopnum = 0;
++
++ /*IMPORTANT: volatile is needed to prevent over-optimization by gcc. */
++ volatile u32 hash;
++ volatile int ret;
++
++ p1 = alloc_page(GFP_KERNEL);
++ if (!p1)
++ return -ENOMEM;
++
++ p2 = alloc_page(GFP_KERNEL);
++ if (!p2)
++ return -ENOMEM;
++
++ addr1 = kmap_atomic(p1);
++ addr2 = kmap_atomic(p2);
++ memset(addr1, prandom_u32(), PAGE_SIZE);
++ memcpy(addr2, addr1, PAGE_SIZE);
++
++ /* make sure that the two pages differ in last byte */
++ addr2[PAGE_SIZE-1] = ~addr2[PAGE_SIZE-1];
++ kunmap_atomic(addr2);
++ kunmap_atomic(addr1);
++
++ time_start = jiffies;
++ while (jiffies - time_start < 100) {
++ for (i = 0; i < 100; i++)
++ hash = page_hash(p1, HASH_STRENGTH_FULL, 0);
++ loopnum += 100;
++ }
++ hash_cost = (jiffies - time_start);
++
++ time_start = jiffies;
++ for (i = 0; i < loopnum; i++)
++ ret = pages_identical_with_cost(p1, p2);
++ memcmp_cost = HASH_STRENGTH_FULL * (jiffies - time_start);
++ memcmp_cost /= hash_cost;
++ pr_info("UKSM: relative memcmp_cost = %lu "
++ "hash=%u cmp_ret=%d.\n",
++ memcmp_cost, hash, ret);
++
++ __free_page(p1);
++ __free_page(p2);
++ return 0;
++}
++
++static int init_zeropage_hash_table(void)
++{
++ struct page *page;
++ char *addr;
++ int i;
++
++ page = alloc_page(GFP_KERNEL);
++ if (!page)
++ return -ENOMEM;
++
++ addr = kmap_atomic(page);
++ memset(addr, 0, PAGE_SIZE);
++ kunmap_atomic(addr);
++
++ zero_hash_table = kmalloc_array(HASH_STRENGTH_MAX, sizeof(u32),
++ GFP_KERNEL);
++ if (!zero_hash_table)
++ return -ENOMEM;
++
++ for (i = 0; i < HASH_STRENGTH_MAX; i++)
++ zero_hash_table[i] = page_hash(page, i, 0);
++
++ __free_page(page);
++
++ return 0;
++}
++
++static inline int init_random_sampling(void)
++{
++ unsigned long i;
++
++ random_nums = kmalloc(PAGE_SIZE, GFP_KERNEL);
++ if (!random_nums)
++ return -ENOMEM;
++
++ for (i = 0; i < HASH_STRENGTH_FULL; i++)
++ random_nums[i] = i;
++
++ for (i = 0; i < HASH_STRENGTH_FULL; i++) {
++ unsigned long rand_range, swap_index, tmp;
++
++ rand_range = HASH_STRENGTH_FULL - i;
++ swap_index = i + prandom_u32() % rand_range;
++ tmp = random_nums[i];
++ random_nums[i] = random_nums[swap_index];
++ random_nums[swap_index] = tmp;
++ }
++
++ rshash_state.state = RSHASH_NEW;
++ rshash_state.below_count = 0;
++ rshash_state.lookup_window_index = 0;
++
++ return cal_positive_negative_costs();
++}
++
++static int __init uksm_slab_init(void)
++{
++ rmap_item_cache = UKSM_KMEM_CACHE(rmap_item, 0);
++ if (!rmap_item_cache)
++ goto out;
++
++ stable_node_cache = UKSM_KMEM_CACHE(stable_node, 0);
++ if (!stable_node_cache)
++ goto out_free1;
++
++ node_vma_cache = UKSM_KMEM_CACHE(node_vma, 0);
++ if (!node_vma_cache)
++ goto out_free2;
++
++ vma_slot_cache = UKSM_KMEM_CACHE(vma_slot, 0);
++ if (!vma_slot_cache)
++ goto out_free3;
++
++ tree_node_cache = UKSM_KMEM_CACHE(tree_node, 0);
++ if (!tree_node_cache)
++ goto out_free4;
++
++ return 0;
++
++out_free4:
++ kmem_cache_destroy(vma_slot_cache);
++out_free3:
++ kmem_cache_destroy(node_vma_cache);
++out_free2:
++ kmem_cache_destroy(stable_node_cache);
++out_free1:
++ kmem_cache_destroy(rmap_item_cache);
++out:
++ return -ENOMEM;
++}
++
++static void __init uksm_slab_free(void)
++{
++ kmem_cache_destroy(stable_node_cache);
++ kmem_cache_destroy(rmap_item_cache);
++ kmem_cache_destroy(node_vma_cache);
++ kmem_cache_destroy(vma_slot_cache);
++ kmem_cache_destroy(tree_node_cache);
++}
++
++/* Common interface to ksm, different to it. */
++int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
++ unsigned long end, int advice, unsigned long *vm_flags)
++{
++ int err;
++
++ switch (advice) {
++ case MADV_MERGEABLE:
++ return 0; /* just ignore the advice */
++
++ case MADV_UNMERGEABLE:
++ if (!(*vm_flags & VM_MERGEABLE) || !uksm_flags_can_scan(*vm_flags))
++ return 0; /* just ignore the advice */
++
++ if (vma->anon_vma) {
++ err = unmerge_uksm_pages(vma, start, end);
++ if (err)
++ return err;
++ }
++
++ uksm_remove_vma(vma);
++ *vm_flags &= ~VM_MERGEABLE;
++ break;
++ }
++
++ return 0;
++}
++
++/* Common interface to ksm, actually the same. */
++struct page *ksm_might_need_to_copy(struct page *page,
++ struct vm_area_struct *vma, unsigned long address)
++{
++ struct anon_vma *anon_vma = page_anon_vma(page);
++ struct page *new_page;
++
++ if (PageKsm(page)) {
++ if (page_stable_node(page))
++ return page; /* no need to copy it */
++ } else if (!anon_vma) {
++ return page; /* no need to copy it */
++ } else if (anon_vma->root == vma->anon_vma->root &&
++ page->index == linear_page_index(vma, address)) {
++ return page; /* still no need to copy it */
++ }
++ if (!PageUptodate(page))
++ return page; /* let do_swap_page report the error */
++
++ new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
++ if (new_page) {
++ copy_user_highpage(new_page, page, address, vma);
++
++ SetPageDirty(new_page);
++ __SetPageUptodate(new_page);
++ __SetPageLocked(new_page);
++ }
++
++ return new_page;
++}
++
++/* Copied from mm/ksm.c and required from 5.1 */
++bool reuse_ksm_page(struct page *page,
++ struct vm_area_struct *vma,
++ unsigned long address)
++{
++#ifdef CONFIG_DEBUG_VM
++ if (WARN_ON(is_zero_pfn(page_to_pfn(page))) ||
++ WARN_ON(!page_mapped(page)) ||
++ WARN_ON(!PageLocked(page))) {
++ dump_page(page, "reuse_ksm_page");
++ return false;
++ }
++#endif
++
++ if (PageSwapCache(page) || !page_stable_node(page))
++ return false;
++ /* Prohibit parallel get_ksm_page() */
++ if (!page_ref_freeze(page, 1))
++ return false;
++
++ page_move_anon_rmap(page, vma);
++ page->index = linear_page_index(vma, address);
++ page_ref_unfreeze(page, 1);
++
++ return true;
++}
++
++static int __init uksm_init(void)
++{
++ struct task_struct *uksm_thread;
++ int err;
++
++ uksm_sleep_jiffies = msecs_to_jiffies(100);
++ uksm_sleep_saved = uksm_sleep_jiffies;
++
++ slot_tree_init();
++ init_scan_ladder();
++
++
++ err = init_random_sampling();
++ if (err)
++ goto out_free2;
++
++ err = uksm_slab_init();
++ if (err)
++ goto out_free1;
++
++ err = init_zeropage_hash_table();
++ if (err)
++ goto out_free0;
++
++ uksm_thread = kthread_run(uksm_scan_thread, NULL, "uksmd");
++ if (IS_ERR(uksm_thread)) {
++ pr_err("uksm: creating kthread failed\n");
++ err = PTR_ERR(uksm_thread);
++ goto out_free;
++ }
++
++#ifdef CONFIG_SYSFS
++ err = sysfs_create_group(mm_kobj, &uksm_attr_group);
++ if (err) {
++ pr_err("uksm: register sysfs failed\n");
++ kthread_stop(uksm_thread);
++ goto out_free;
++ }
++#else
++ uksm_run = UKSM_RUN_MERGE; /* no way for user to start it */
++
++#endif /* CONFIG_SYSFS */
++
++#ifdef CONFIG_MEMORY_HOTREMOVE
++ /*
++ * Choose a high priority since the callback takes uksm_thread_mutex:
++ * later callbacks could only be taking locks which nest within that.
++ */
++ hotplug_memory_notifier(uksm_memory_callback, 100);
++#endif
++ return 0;
++
++out_free:
++ kfree(zero_hash_table);
++out_free0:
++ uksm_slab_free();
++out_free1:
++ kfree(random_nums);
++out_free2:
++ kfree(uksm_scan_ladder);
++ return err;
++}
++
++#ifdef MODULE
++subsys_initcall(ksm_init);
++#else
++late_initcall(uksm_init);
++#endif
++
+diff --git a/mm/vmstat.c b/mm/vmstat.c
+index 698bc0bc18d1..b7590f4944ca 100644
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -1216,6 +1216,9 @@ const char * const vmstat_text[] = {
+ "nr_shadow_call_stack",
+ #endif
+
++#ifdef CONFIG_UKSM
++ "nr_uksm_zero_pages",
++#endif
+ /* enum writeback_stat_item counters */
+ "nr_dirty_threshold",
+ "nr_dirty_background_threshold",