summaryrefslogtreecommitdiffstats
path: root/core/modules/vmware15/patches/vmmon__4.14-4.14.99__12.0-14.0.patch
diff options
context:
space:
mode:
Diffstat (limited to 'core/modules/vmware15/patches/vmmon__4.14-4.14.99__12.0-14.0.patch')
-rw-r--r--core/modules/vmware15/patches/vmmon__4.14-4.14.99__12.0-14.0.patch61
1 files changed, 61 insertions, 0 deletions
diff --git a/core/modules/vmware15/patches/vmmon__4.14-4.14.99__12.0-14.0.patch b/core/modules/vmware15/patches/vmmon__4.14-4.14.99__12.0-14.0.patch
new file mode 100644
index 00000000..5278d645
--- /dev/null
+++ b/core/modules/vmware15/patches/vmmon__4.14-4.14.99__12.0-14.0.patch
@@ -0,0 +1,61 @@
+--- a/linux/hostif.c 2017-09-18 15:22:18.000000000 +0200
++++ b/linux/hostif.c 2017-11-17 13:35:49.600578115 +0100
+@@ -79,6 +79,37 @@
+ #error CONFIG_HIGH_RES_TIMERS required for acceptable performance
+ #endif
+
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
++# define global_zone_page_state global_page_state
++#endif
++
++static unsigned long get_nr_slab_unreclaimable(void)
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
++ return global_node_page_state(NR_SLAB_UNRECLAIMABLE);
++#else
++ return global_page_state(NR_SLAB_UNRECLAIMABLE);
++#endif
++}
++
++static unsigned long get_nr_unevictable(void)
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
++ return global_node_page_state(NR_UNEVICTABLE);
++#else
++ return global_page_state(NR_UNEVICTABLE);
++#endif
++}
++
++static unsigned long get_nr_anon_mapped(void)
++{
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
++ return global_node_page_state(NR_ANON_MAPPED);
++ #else
++ return global_page_state(NR_ANON_PAGES);
++ #endif
++}
++
+ /*
+ * Although this is not really related to kernel-compatibility, I put this
+ * helper macro here for now for a lack of better place --hpreg
+@@ -1516,16 +1547,11 @@
+ unsigned int reservedPages = MEMDEFAULTS_MIN_HOST_PAGES;
+ unsigned int hugePages = (vm == NULL) ? 0 :
+ BYTES_2_PAGES(vm->memInfo.hugePageBytes);
+- unsigned int lockedPages = global_page_state(NR_PAGETABLE) +
+- global_page_state(NR_SLAB_UNRECLAIMABLE) +
+- global_page_state(NR_UNEVICTABLE) +
++ unsigned int lockedPages = global_zone_page_state(NR_PAGETABLE) +
++ get_nr_slab_unreclaimable() +
++ get_nr_unevictable() +
+ hugePages + reservedPages;
+- unsigned int anonPages =
+-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+- global_page_state(NR_ANON_MAPPED);
+-#else
+- global_page_state(NR_ANON_PAGES);
+-#endif
++ unsigned int anonPages = get_nr_anon_mapped();
+ unsigned int swapPages = BYTES_2_PAGES(linuxState.swapSize);
+
+ if (anonPages > swapPages) {