Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

log the access to SUID #33

Open
wants to merge 104 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 103 commits
Commits
Show all changes
104 commits
Select commit Hold shift + click to select a range
8bc7636
make DEFAULT_MMAP_MIN_ADDR match LSM_MMAP_MIN_ADDR
thestinger May 27, 2017
a8488f5
enable HARDENED_USERCOPY by default
thestinger May 29, 2017
74df025
disable HARDENED_USERCOPY_FALLBACK by default
thestinger Apr 26, 2018
4eb0d58
enable SECURITY_DMESG_RESTRICT by default
thestinger May 3, 2017
c51ecd0
set kptr_restrict=2 by default
thestinger May 3, 2017
d4adb1e
enable DEBUG_LIST by default
thestinger May 3, 2017
1ccc60c
enable BUG_ON_DATA_CORRUPTION by default
thestinger May 29, 2017
1d4df4f
enable ARM64_SW_TTBR0_PAN by default
thestinger Feb 25, 2018
8c8221b
arm64: enable RANDOMIZE_BASE by default
thestinger Feb 25, 2018
522a7a8
enable SLAB_FREELIST_RANDOM by default
thestinger May 3, 2017
e66a9db
enable SLAB_FREELIST_HARDENED by default
thestinger Aug 20, 2017
a54f690
disable SLAB_MERGE_DEFAULT by default
thestinger Jul 8, 2017
6dc663b
enable FORTIFY_SOURCE by default
thestinger May 8, 2017
1b317c4
enable PANIC_ON_OOPS by default
thestinger May 3, 2017
d6c6493
stop hiding SLUB_DEBUG behind EXPERT
thestinger May 15, 2017
3970d13
stop hiding X86_16BIT behind EXPERT
thestinger May 4, 2017
3945314
disable X86_16BIT by default
thestinger May 4, 2017
8ffec63
stop hiding MODIFY_LDT_SYSCALL behind EXPERT
thestinger May 4, 2017
9d44baa
disable MODIFY_LDT_SYSCALL by default
thestinger May 4, 2017
f108bf6
set LEGACY_VSYSCALL_NONE by default
thestinger May 29, 2017
fd40a80
stop hiding AIO behind EXPERT
Bernhard40 Oct 6, 2017
7b271c4
disable AIO by default
Bernhard40 Oct 6, 2017
08bb496
remove SYSVIPC from arm64/x86_64 defconfigs
thestinger Feb 25, 2018
c01594f
disable DEVPORT by default
thestinger May 27, 2017
1373658
disable PROC_VMCORE by default
thestinger May 27, 2017
b74b828
disable NFS_DEBUG by default
thestinger May 28, 2017
3014c2f
enable DEBUG_WX by default
thestinger May 29, 2017
d2c3ec4
disable LEGACY_PTYS by default
thestinger Jan 5, 2018
a865724
disable DEVMEM by default
thestinger Jan 5, 2018
aaa4557
enable IO_STRICT_DEVMEM by default
thestinger Jan 5, 2018
cf7b465
disable COMPAT_BRK by default
thestinger May 7, 2017
250a7a8
use maximum supported mmap rnd entropy by default
thestinger May 7, 2017
63ee9bb
enable protected_{symlinks,hardlinks} by default
thestinger May 30, 2017
e400d98
enable SECURITY by default
thestinger Feb 25, 2018
b9c3132
enable SECURITY_YAMA by default
thestinger May 29, 2017
e915334
enable SECURITY_NETWORK by default
thestinger Feb 25, 2018
128adb0
enable AUDIT by default
thestinger Feb 25, 2018
9a82a14
enable SECURITY_SELINUX by default
thestinger Feb 25, 2018
982fabd
enable SYN_COOKIES by default
thestinger Jan 6, 2018
a32a012
add __read_only for non-init related usage
thestinger May 7, 2017
3c64c49
make sysctl constants read-only
thestinger May 7, 2017
6394f49
mark kernel_set_to_readonly as __ro_after_init
thestinger May 12, 2017
9682c78
mark slub runtime configuration as __ro_after_init
thestinger May 14, 2017
659c806
add __ro_after_init to slab_nomerge and slab_state
thestinger May 3, 2017
c2668ce
mark kmem_cache as __ro_after_init
thestinger May 28, 2017
080a508
mark __supported_pte_mask as __ro_after_init
thestinger May 12, 2017
12b1e2a
mark kobj_ns_type_register as only used for init
thestinger Jul 4, 2017
9e3cfd8
mark open_softirq as only used for init
thestinger Jul 4, 2017
114a072
remove unused softirq_action callback parameter
thestinger Jul 4, 2017
983d7af
mark softirq_vec as __ro_after_init
thestinger Jul 4, 2017
dd112cf
mm: slab: trigger BUG if requested object is not a slab page
thestinger Sep 17, 2019
1d286a3
bug on kmem_cache_free with the wrong cache
thestinger May 3, 2017
da2673e
bug on !PageSlab && !PageCompound in ksize
thestinger May 3, 2017
bf7150e
mm: add support for verifying page sanitization
thestinger May 4, 2017
2c095f3
slub: Extend init_on_free to slab caches with constructors
tsautereau-anssi Sep 20, 2019
9458c89
slub: Add support for verifying slab sanitization
thestinger May 4, 2017
2ce1956
slub: add multi-purpose random canaries
thestinger May 3, 2017
468095d
security,perf: Allow further restriction of perf_event_open
bwhacks Jan 11, 2016
99a3330
enable SECURITY_PERF_EVENTS_RESTRICT by default
thestinger May 4, 2017
f5058d9
add sysctl to disallow unprivileged CLONE_NEWUSER by default
hallyn May 31, 2013
ec73ebd
add kmalloc/krealloc alloc_size attributes
thestinger May 3, 2017
533c43f
add vmalloc alloc_size attributes
thestinger May 3, 2017
fe1765f
add kvmalloc alloc_size attribute
thestinger Jul 4, 2017
eee3fa3
add percpu alloc_size attributes
thestinger May 14, 2017
694cbc9
add alloc_pages_exact alloc_size attributes
thestinger May 14, 2017
0ca98c2
Add the extra_latent_entropy kernel parameter
ephox-gcc-plugins May 30, 2016
ecbd814
ata: avoid null pointer dereference on bug
thestinger May 16, 2017
14c10d0
sanity check for negative length in nla_memcpy
thestinger May 16, 2017
5cc8adb
add page destructor sanity check
thestinger May 16, 2017
6973f4b
PaX shadow cr4 sanity check (essentially a revert)
thestinger May 16, 2017
eaa87f6
add writable function pointer detection
thestinger Jul 9, 2017
5639af8
support overriding early audit kernel cmdline
thestinger Jul 9, 2017
8cc6f2c
FORTIFY_SOURCE intra-object overflow checking
thestinger Jun 3, 2017
224b56e
Revert "mm: revert x86_64 and arm64 ELF_ET_DYN_BASE base changes"
thestinger Aug 27, 2017
5bcddc9
x86_64: move vdso to mmap region from stack region
thestinger May 11, 2017
8b31fc5
x86: determine stack entropy based on mmap entropy
thestinger May 22, 2017
01ece9e
arm64: determine stack entropy based on mmap entropy
thestinger May 22, 2017
b596f54
randomize lower bits of the argument block
thestinger May 11, 2017
0f012fe
x86_64: match arm64 brk randomization entropy
thestinger May 30, 2017
4f3d196
support randomizing the lower bits of brk
thestinger May 30, 2017
0e35de5
mm: randomize lower bits of brk
thestinger Jun 1, 2017
e0da9d8
x86: randomize lower bits of brk
thestinger Jun 1, 2017
b55113f
mm: guarantee brk gap is at least one page
thestinger Jun 1, 2017
3a4c898
x86: guarantee brk gap is at least one page
thestinger Jun 1, 2017
293f984
x86_64: bound mmap between legacy/modern bases
thestinger Jul 4, 2017
030e84d
restrict device timing side channels
thestinger May 16, 2017
b36b517
add toggle for disabling newly added USB devices
thestinger May 16, 2017
cc19037
hard-wire legacy checkreqprot option to 0
thestinger Feb 25, 2018
941ac11
security: tty: Add owner user namespace to tty_struct
nmatt0 May 29, 2017
3134273
security: tty: make TIOCSTI ioctl require CAP_SYS_ADMIN
nmatt0 May 29, 2017
b3029bd
enable SECURITY_TIOCSTI_RESTRICT by default
thestinger May 4, 2017
69b5671
disable unprivileged eBPF access by default
anthraxx May 7, 2018
453f635
enable BPF JIT hardening by default (if available)
anthraxx May 7, 2018
95d0499
enable protected_{fifos,regular} by default
anthraxx Nov 4, 2018
51bb625
Revert "mark kernel_set_to_readonly as __ro_after_init"
anthraxx Jan 13, 2019
1336a9e
modpost: Add CONFIG_DEBUG_WRITABLE_FUNCTION_POINTERS_VERBOSE
tsautereau-anssi May 6, 2019
d803650
mm: Fix extra_latent_entropy
tsautereau-anssi May 7, 2019
86bcf07
add CONFIG for unprivileged_userns_clone
anthraxx Jul 31, 2019
b9b5b84
enable INIT_ON_ALLOC_DEFAULT_ON by default
anthraxx Sep 19, 2019
a746e05
enable INIT_ON_FREE_DEFAULT_ON by default
anthraxx Sep 19, 2019
922fadc
add CONFIG for unprivileged_userfaultfd
anthraxx Oct 1, 2019
932258a
slub: Extend init_on_alloc to slab caches with constructors
tsautereau-anssi Nov 29, 2019
7e22e8c
log the access to SUID
theLOICofFRANCE Mar 31, 2020
13ef903
Use KERN_WARNING to log the access to SUID
theLOICofFRANCE Mar 31, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
15 changes: 5 additions & 10 deletions Documentation/admin-guide/kernel-parameters.txt
Expand Up @@ -503,16 +503,6 @@
nosocket -- Disable socket memory accounting.
nokmem -- Disable kernel memory accounting.

checkreqprot [SELINUX] Set initial checkreqprot flag value.
Format: { "0" | "1" }
See security/selinux/Kconfig help text.
0 -- check protection applied by kernel (includes
any implied execute protection).
1 -- check protection requested by application.
Default value is set via a kernel config option.
Value can be changed at runtime via
/sys/fs/selinux/checkreqprot.

cio_ignore= [S390]
See Documentation/s390/common_io.rst for details.
clk_ignore_unused
Expand Down Expand Up @@ -3399,6 +3389,11 @@
the specified number of seconds. This is to be used if
your oopses keep scrolling off the screen.

extra_latent_entropy
Enable a very simple form of latent entropy extraction
from the first 4GB of memory as the bootmem allocator
passes the memory pages to the buddy allocator.

pcbit= [HW,ISDN]

pcd. [PARIDE]
Expand Down
20 changes: 20 additions & 0 deletions Documentation/admin-guide/sysctl/kernel.rst
Expand Up @@ -102,6 +102,7 @@ show up in /proc/sys/kernel:
- sysctl_writes_strict
- tainted ==> Documentation/admin-guide/tainted-kernels.rst
- threads-max
- tiocsti_restrict
- unknown_nmi_panic
- watchdog
- watchdog_thresh
Expand Down Expand Up @@ -1112,6 +1113,25 @@ If a value outside of this range is written to threads-max an error
EINVAL occurs.


tiocsti_restrict:
=================

This toggle indicates whether unprivileged users are prevented from using the
TIOCSTI ioctl to inject commands into other processes which share a tty
session.

When tiocsti_restrict is set to (0) there are no restrictions(accept the
default restriction of only being able to injection commands into one's own
tty). When tiocsti_restrict is set to (1), users must have CAP_SYS_ADMIN to
use the TIOCSTI ioctl.

When user namespaces are in use, the check for the capability CAP_SYS_ADMIN is
done against the user namespace that originally opened the tty.

The kernel config option CONFIG_SECURITY_TIOCSTI_RESTRICT sets the default
value of tiocsti_restrict.


unknown_nmi_panic:
==================

Expand Down
4 changes: 2 additions & 2 deletions arch/Kconfig
Expand Up @@ -658,7 +658,7 @@ config ARCH_MMAP_RND_BITS
int "Number of bits to use for ASLR of mmap base address" if EXPERT
range ARCH_MMAP_RND_BITS_MIN ARCH_MMAP_RND_BITS_MAX
default ARCH_MMAP_RND_BITS_DEFAULT if ARCH_MMAP_RND_BITS_DEFAULT
default ARCH_MMAP_RND_BITS_MIN
default ARCH_MMAP_RND_BITS_MAX
depends on HAVE_ARCH_MMAP_RND_BITS
help
This value can be used to select the number of bits to use to
Expand Down Expand Up @@ -692,7 +692,7 @@ config ARCH_MMAP_RND_COMPAT_BITS
int "Number of bits to use for ASLR of mmap base address for compatible applications" if EXPERT
range ARCH_MMAP_RND_COMPAT_BITS_MIN ARCH_MMAP_RND_COMPAT_BITS_MAX
default ARCH_MMAP_RND_COMPAT_BITS_DEFAULT if ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
default ARCH_MMAP_RND_COMPAT_BITS_MIN
default ARCH_MMAP_RND_COMPAT_BITS_MAX
depends on HAVE_ARCH_MMAP_RND_COMPAT_BITS
help
This value can be used to select the number of bits to use to
Expand Down
2 changes: 2 additions & 0 deletions arch/arm64/Kconfig
Expand Up @@ -1212,6 +1212,7 @@ config RODATA_FULL_DEFAULT_ENABLED

config ARM64_SW_TTBR0_PAN
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
default y
help
Enabling this option prevents the kernel from accessing
user-space memory directly by pointing TTBR0_EL1 to a reserved
Expand Down Expand Up @@ -1640,6 +1641,7 @@ config RANDOMIZE_BASE
bool "Randomize the address of the kernel image"
select ARM64_MODULE_PLTS if MODULES
select RELOCATABLE
default y
help
Randomizes the virtual address at which the kernel image is
loaded, as a security feature that deters exploit attempts
Expand Down
1 change: 1 addition & 0 deletions arch/arm64/Kconfig.debug
Expand Up @@ -26,6 +26,7 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
config DEBUG_WX
bool "Warn on W+X mappings at boot"
select PTDUMP_CORE
default y
---help---
Generate a warning if any W+X mappings are found at boot.

Expand Down
1 change: 0 additions & 1 deletion arch/arm64/configs/defconfig
@@ -1,4 +1,3 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ_IDLE=y
Expand Down
14 changes: 5 additions & 9 deletions arch/arm64/include/asm/elf.h
Expand Up @@ -103,14 +103,10 @@

/*
* This is the base location for PIE (ET_DYN with INTERP) loads. On
* 64-bit, this is above 4GB to leave the entire 32-bit address
* 64-bit, this is raised to 4GB to leave the entire 32-bit address
* space open for things that want to use the area for 32-bit pointers.
*/
#ifdef CONFIG_ARM64_FORCE_52BIT
#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
#else
#define ELF_ET_DYN_BASE (2 * DEFAULT_MAP_WINDOW_64 / 3)
#endif /* CONFIG_ARM64_FORCE_52BIT */
#define ELF_ET_DYN_BASE 0x100000000UL

#ifndef __ASSEMBLY__

Expand Down Expand Up @@ -164,10 +160,10 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
/* 1GB of VA */
#ifdef CONFIG_COMPAT
#define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \
0x7ff >> (PAGE_SHIFT - 12) : \
0x3ffff >> (PAGE_SHIFT - 12))
((1UL << mmap_rnd_compat_bits) - 1) >> (PAGE_SHIFT - 12) : \
((1UL << mmap_rnd_bits) - 1) >> (PAGE_SHIFT - 12))
#else
#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
#define STACK_RND_MASK (((1UL << mmap_rnd_bits) - 1) >> (PAGE_SHIFT - 12))
#endif

#ifdef __AARCH64EB__
Expand Down
8 changes: 3 additions & 5 deletions arch/x86/Kconfig
Expand Up @@ -1193,8 +1193,7 @@ config VM86
default X86_LEGACY_VM86

config X86_16BIT
bool "Enable support for 16-bit segments" if EXPERT
default y
bool "Enable support for 16-bit segments"
depends on MODIFY_LDT_SYSCALL
---help---
This option is required by programs like Wine to run 16-bit
Expand Down Expand Up @@ -2331,7 +2330,7 @@ config COMPAT_VDSO
choice
prompt "vsyscall table for legacy applications"
depends on X86_64
default LEGACY_VSYSCALL_XONLY
default LEGACY_VSYSCALL_NONE
help
Legacy user code that does not know how to find the vDSO expects
to be able to issue three syscalls by calling fixed addresses in
Expand Down Expand Up @@ -2427,8 +2426,7 @@ config CMDLINE_OVERRIDE
be set to 'N' under normal conditions.

config MODIFY_LDT_SYSCALL
bool "Enable the LDT (local descriptor table)" if EXPERT
default y
bool "Enable the LDT (local descriptor table)"
---help---
Linux can allow user programs to install a per-process x86
Local Descriptor Table (LDT) using the modify_ldt(2) system
Expand Down
1 change: 1 addition & 0 deletions arch/x86/Kconfig.debug
Expand Up @@ -75,6 +75,7 @@ config EFI_PGT_DUMP
config DEBUG_WX
bool "Warn on W+X mappings at boot"
select PTDUMP_CORE
default y
---help---
Generate a warning if any W+X mappings are found at boot.

Expand Down
1 change: 0 additions & 1 deletion arch/x86/configs/x86_64_defconfig
@@ -1,5 +1,4 @@
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_TASKSTATS=y
Expand Down
48 changes: 1 addition & 47 deletions arch/x86/entry/vdso/vma.c
Expand Up @@ -314,55 +314,9 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
}

#ifdef CONFIG_X86_64
/*
* Put the vdso above the (randomized) stack with another randomized
* offset. This way there is no hole in the middle of address space.
* To save memory make sure it is still in the same PTE as the stack
* top. This doesn't give that many random bits.
*
* Note that this algorithm is imperfect: the distribution of the vdso
* start address within a PMD is biased toward the end.
*
* Only used for the 64-bit and x32 vdsos.
*/
static unsigned long vdso_addr(unsigned long start, unsigned len)
{
unsigned long addr, end;
unsigned offset;

/*
* Round up the start address. It can start out unaligned as a result
* of stack start randomization.
*/
start = PAGE_ALIGN(start);

/* Round the lowest possible end address up to a PMD boundary. */
end = (start + len + PMD_SIZE - 1) & PMD_MASK;
if (end >= TASK_SIZE_MAX)
end = TASK_SIZE_MAX;
end -= len;

if (end > start) {
offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
addr = start + (offset << PAGE_SHIFT);
} else {
addr = start;
}

/*
* Forcibly align the final address in case we have a hardware
* issue that requires alignment for performance reasons.
*/
addr = align_vdso_addr(addr);

return addr;
}

static int map_vdso_randomized(const struct vdso_image *image)
{
unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);

return map_vdso(image, addr);
return map_vdso(image, 0);
}
#endif

Expand Down
15 changes: 9 additions & 6 deletions arch/x86/include/asm/elf.h
Expand Up @@ -248,11 +248,11 @@ extern int force_personality32;

/*
* This is the base location for PIE (ET_DYN with INTERP) loads. On
* 64-bit, this is above 4GB to leave the entire 32-bit address
* 64-bit, this is raised to 4GB to leave the entire 32-bit address
* space open for things that want to use the area for 32-bit pointers.
*/
#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
(DEFAULT_MAP_WINDOW / 3 * 2))
0x100000000UL)

/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,
Expand Down Expand Up @@ -312,8 +312,8 @@ extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len);

#ifdef CONFIG_X86_32

#define __STACK_RND_MASK(is32bit) (0x7ff)
#define STACK_RND_MASK (0x7ff)
#define __STACK_RND_MASK(is32bit) ((1UL << mmap_rnd_bits) - 1)
#define STACK_RND_MASK ((1UL << mmap_rnd_bits) - 1)

#define ARCH_DLINFO ARCH_DLINFO_IA32

Expand All @@ -322,7 +322,11 @@ extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len);
#else /* CONFIG_X86_32 */

/* 1GB for 64bit, 8MB for 32bit */
#define __STACK_RND_MASK(is32bit) ((is32bit) ? 0x7ff : 0x3fffff)
#ifdef CONFIG_COMPAT
#define __STACK_RND_MASK(is32bit) ((is32bit) ? (1UL << mmap_rnd_compat_bits) - 1 : (1UL << mmap_rnd_bits) - 1)
#else
#define __STACK_RND_MASK(is32bit) ((1UL << mmap_rnd_bits) - 1)
#endif
#define STACK_RND_MASK __STACK_RND_MASK(mmap_is_ia32())

#define ARCH_DLINFO \
Expand Down Expand Up @@ -380,5 +384,4 @@ struct va_alignment {
} ____cacheline_aligned;

extern struct va_alignment va_align;
extern unsigned long align_vdso_addr(unsigned long);
#endif /* _ASM_X86_ELF_H */
4 changes: 4 additions & 0 deletions arch/x86/include/asm/tlbflush.h
Expand Up @@ -295,6 +295,7 @@ static inline void cr4_set_bits_irqsoff(unsigned long mask)
unsigned long cr4;

cr4 = this_cpu_read(cpu_tlbstate.cr4);
BUG_ON(cr4 != __read_cr4());
if ((cr4 | mask) != cr4)
__cr4_set(cr4 | mask);
}
Expand All @@ -305,6 +306,7 @@ static inline void cr4_clear_bits_irqsoff(unsigned long mask)
unsigned long cr4;

cr4 = this_cpu_read(cpu_tlbstate.cr4);
BUG_ON(cr4 != __read_cr4());
if ((cr4 & ~mask) != cr4)
__cr4_set(cr4 & ~mask);
}
Expand Down Expand Up @@ -334,6 +336,7 @@ static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
unsigned long cr4;

cr4 = this_cpu_read(cpu_tlbstate.cr4);
BUG_ON(cr4 != __read_cr4());
__cr4_set(cr4 ^ mask);
}

Expand Down Expand Up @@ -440,6 +443,7 @@ static inline void __native_flush_tlb_global(void)
raw_local_irq_save(flags);

cr4 = this_cpu_read(cpu_tlbstate.cr4);
BUG_ON(cr4 != __read_cr4());
/* toggle PGE */
native_write_cr4(cr4 ^ X86_CR4_PGE);
/* write old PGE again and flush TLBs */
Expand Down
7 changes: 6 additions & 1 deletion arch/x86/kernel/process.c
Expand Up @@ -43,6 +43,8 @@
#include <asm/spec-ctrl.h>
#include <asm/io_bitmap.h>
#include <asm/proto.h>
#include <asm/elf.h>
#include <linux/sizes.h>

#include "process.h"

Expand Down Expand Up @@ -913,7 +915,10 @@ unsigned long arch_align_stack(unsigned long sp)

unsigned long arch_randomize_brk(struct mm_struct *mm)
{
return randomize_page(mm->brk, 0x02000000);
if (mmap_is_ia32())
return mm->brk + get_random_long() % SZ_32M + PAGE_SIZE;
else
return mm->brk + get_random_long() % SZ_1G + PAGE_SIZE;
}

/*
Expand Down
14 changes: 2 additions & 12 deletions arch/x86/kernel/sys_x86_64.c
Expand Up @@ -53,13 +53,6 @@ static unsigned long get_align_bits(void)
return va_align.bits & get_align_mask();
}

unsigned long align_vdso_addr(unsigned long addr)
{
unsigned long align_mask = get_align_mask();
addr = (addr + align_mask) & ~align_mask;
return addr | get_align_bits();
}

static int __init control_va_addr_alignment(char *str)
{
/* guard against enabling this on other CPU families */
Expand Down Expand Up @@ -121,10 +114,7 @@ static void find_start_end(unsigned long addr, unsigned long flags,
}

*begin = get_mmap_base(1);
if (in_32bit_syscall())
*end = task_size_32bit();
else
*end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
*end = get_mmap_base(0);
}

unsigned long
Expand Down Expand Up @@ -201,7 +191,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,

info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = PAGE_SIZE;
info.low_limit = get_mmap_base(1);
info.high_limit = get_mmap_base(0);

/*
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/mm/init_32.c
Expand Up @@ -561,9 +561,9 @@ static void __init pagetable_init(void)

#define DEFAULT_PTE_MASK ~(_PAGE_NX | _PAGE_GLOBAL)
/* Bits supported by the hardware: */
pteval_t __supported_pte_mask __read_mostly = DEFAULT_PTE_MASK;
pteval_t __supported_pte_mask __ro_after_init = DEFAULT_PTE_MASK;
/* Bits allowed in normal kernel mappings: */
pteval_t __default_kernel_pte_mask __read_mostly = DEFAULT_PTE_MASK;
pteval_t __default_kernel_pte_mask __ro_after_init = DEFAULT_PTE_MASK;
EXPORT_SYMBOL_GPL(__supported_pte_mask);
/* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
EXPORT_SYMBOL(__default_kernel_pte_mask);
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/mm/init_64.c
Expand Up @@ -97,9 +97,9 @@ DEFINE_ENTRY(pte, pte, init)
*/

/* Bits supported by the hardware: */
pteval_t __supported_pte_mask __read_mostly = ~0;
pteval_t __supported_pte_mask __ro_after_init = ~0;
/* Bits allowed in normal kernel mappings: */
pteval_t __default_kernel_pte_mask __read_mostly = ~0;
pteval_t __default_kernel_pte_mask __ro_after_init = ~0;
EXPORT_SYMBOL_GPL(__supported_pte_mask);
/* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
EXPORT_SYMBOL(__default_kernel_pte_mask);
Expand Down