summarylogtreecommitdiffstats
path: root/vmnet.patch
blob: d0687d45bbacd4d38e4b9a4e4934cd34af2a8335 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
--- a/vmnet/Makefile
+++ b/vmnet/Makefile
@@ -43,7 +43,11 @@ INCLUDE      += -I$(SRCROOT)/shared
 endif
 
 
+ifdef KVERSION
+VM_UNAME = $(KVERSION)
+else
 VM_UNAME = $(shell uname -r)
+endif
 
 # Header directory for the running kernel
 ifdef LINUXINCLUDE
@@ -100,6 +104,13 @@ auto-build: $(DRIVER_KO)
 $(DRIVER): $(DRIVER_KO)
 	if [ $< -nt $@ ] || [ ! -e $@ ] ; then cp -f $< $@; fi
 
+# Use SUBDIRS on 2.x, 3.x, 4.x.  Use M on newer kernels.
+ifeq ($(filter-out 2 3 4,$(firstword $(subst ., ,$(VM_UNAME)))),)
+DIRVAR := SUBDIRS
+else
+DIRVAR := M
+endif
+
 # Pass gcc version down the chain, so we can detect if kernel attempts to use unapproved compiler
 VM_CCVER := $(VMCCVER)
 export VM_CCVER
@@ -117,7 +128,7 @@ prebuild:: ;
 postbuild:: ;
 
 $(DRIVER_KO): prebuild
-	$(MAKE) -C $(BUILD_DIR) SUBDIRS=$$PWD SRCROOT=$$PWD/$(SRCROOT) \
+	$(MAKE) -C $(BUILD_DIR) $(DIRVAR)=$$PWD SRCROOT=$$PWD/$(SRCROOT) \
 	  MODULEBUILDDIR=$(MODULEBUILDDIR) modules
 	$(MAKE) -C $$PWD SRCROOT=$$PWD/$(SRCROOT) \
 	  MODULEBUILDDIR=$(MODULEBUILDDIR) postbuild
--- a/vmnet/bridge.c
+++ b/vmnet/bridge.c
@@ -636,7 +636,7 @@
 	 unsigned long flags;
 	 int i;
 
-	 atomic_inc(&clone->users);
+	 clone = skb_get(clone);
 
 	 clone->dev = dev;
 	 clone->protocol = eth_type_trans(clone, dev);
--- a/vmnet/userif.c
+++ b/vmnet/userif.c
@@ -115,16 +115,7 @@
    struct page *page = NULL;
    int retval;
 
-   down_read(&current->mm->mmap_sem);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
-   retval = get_user_pages(addr, 1, FOLL_WRITE, &page, NULL);
-#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
-   retval = get_user_pages(addr, 1, 1, 0, &page, NULL);
-#else
-   retval = get_user_pages(current, current->mm, addr,
-                           1, 1, 0, &page, NULL);
-#endif
-   up_read(&current->mm->mmap_sem);
+   retval = get_user_pages_fast(addr, 1, FOLL_WRITE, &page);
 
    if (retval != 1) {
       return NULL;
@@ -152,16 +143,21 @@
  */
 
 static INLINE int
-VNetUserIfMapPtr(VA uAddr,        // IN: pointer to user memory
+VNetUserIfMapPtr(VA64 uAddr,      // IN: pointer to user memory
                  size_t size,     // IN: size of data
                  struct page **p, // OUT: locked page
                  void **ptr)      // OUT: kernel mapped pointer
 {
-   if (!access_ok(VERIFY_WRITE, (void *)uAddr, size) ||
-       (((uAddr + size - 1) & ~(PAGE_SIZE - 1)) !=
-        (uAddr & ~(PAGE_SIZE - 1)))) {
+   uint8 v;
+
+   /* Check area does not straddle two pages. */
+   if ((uAddr & (PAGE_SIZE - 1)) + size > PAGE_SIZE) {
       return -EINVAL;
    }
+   /* Check if it is user's area.  UserifLockPage() checks writability. */
+   if (copy_from_user(&v, (void *)(unsigned long)uAddr, sizeof v) != 0) {
+      return -EFAULT;
+   }
 
    *p = UserifLockPage(uAddr);
    if (*p == NULL) {
@@ -173,7 +169,7 @@
 }
 
 static INLINE int
-VNetUserIfMapUint32Ptr(VA uAddr,        // IN: pointer to user memory
+VNetUserIfMapUint32Ptr(VA64 uAddr,      // IN: pointer to user memory
                        struct page **p, // OUT: locked page
                        uint32 **ptr)    // OUT: kernel mapped pointer
 {
@@ -218,7 +214,7 @@
       return -EBUSY;
    }
 
-   if ((retval = VNetUserIfMapUint32Ptr((VA)vn->pollPtr, &pollPage,
+   if ((retval = VNetUserIfMapUint32Ptr(vn->pollPtr, &pollPage,
                                         &pollPtr)) < 0) {
       return retval;
    }
@@ -236,7 +232,7 @@
       goto error_free;
    }
 
-   if ((retval = VNetUserIfMapUint32Ptr((VA)vn->recvClusterPtr,
+   if ((retval = VNetUserIfMapUint32Ptr(vn->recvClusterPtr,
                                         &recvClusterPage,
                                         &recvClusterCount)) < 0) {
       goto error_free;