Skip to content
This repository was archived by the owner on Jan 10, 2023. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 19
SUBLEVEL = 18
EXTRAVERSION =
NAME = "People's Front"

Expand Down
3 changes: 1 addition & 2 deletions arch/arc/include/asm/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,7 @@ static const char * const arc_pmu_ev_hw_map[] = {

/* counts condition */
[PERF_COUNT_HW_INSTRUCTIONS] = "iall",
/* All jump instructions that are taken */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */
[PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
#ifdef CONFIG_ISA_ARCV2
[PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
Expand Down
40 changes: 8 additions & 32 deletions arch/arc/lib/memset-archs.S
Original file line number Diff line number Diff line change
Expand Up @@ -7,39 +7,11 @@
*/

#include <linux/linkage.h>
#include <asm/cache.h>

/*
* The memset implementation below is optimized to use prefetchw and prealloc
* instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6)
* If you want to implement optimized memset for other possible L1 data cache
* line lengths (32B and 128B) you should rewrite code carefully checking
* we don't call any prefetchw/prealloc instruction for L1 cache lines which
* don't belongs to memset area.
*/

#if L1_CACHE_SHIFT == 6

.macro PREALLOC_INSTR reg, off
prealloc [\reg, \off]
.endm

.macro PREFETCHW_INSTR reg, off
prefetchw [\reg, \off]
.endm

#else

.macro PREALLOC_INSTR
.endm

.macro PREFETCHW_INSTR
.endm

#endif
#undef PREALLOC_NOT_AVAIL

ENTRY_CFI(memset)
PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
prefetchw [r0] ; Prefetch the write location
mov.f 0, r2
;;; if size is zero
jz.d [blink]
Expand Down Expand Up @@ -76,8 +48,11 @@ ENTRY_CFI(memset)

lpnz @.Lset64bytes
;; LOOP START
PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching

#ifdef PREALLOC_NOT_AVAIL
prefetchw [r3, 64] ;Prefetch the next write location
#else
prealloc [r3, 64]
#endif
#ifdef CONFIG_ARC_HAS_LL64
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
Expand Down Expand Up @@ -110,6 +85,7 @@ ENTRY_CFI(memset)
lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
lpnz .Lset32bytes
;; LOOP START
prefetchw [r3, 32] ;Prefetch the next write location
#ifdef CONFIG_ARC_HAS_LL64
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
Expand Down
3 changes: 1 addition & 2 deletions arch/arc/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -138,8 +138,7 @@ void __init setup_arch_memory(void)
*/

memblock_add_node(low_mem_start, low_mem_sz, 0);
memblock_reserve(CONFIG_LINUX_LINK_BASE,
__pa(_end) - CONFIG_LINUX_LINK_BASE);
memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);

#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start)
Expand Down
5 changes: 3 additions & 2 deletions arch/s390/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
int cpu = smp_processor_id();

if (prev == next)
return;
S390_lowcore.user_asce = next->context.asce;
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
/* Clear previous user-ASCE from CR1 and CR7 */
Expand All @@ -100,8 +102,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
__ctl_load(S390_lowcore.vdso_asce, 7, 7);
clear_cpu_flag(CIF_ASCE_SECONDARY);
}
if (prev != next)
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
}

#define finish_arch_post_lock_switch finish_arch_post_lock_switch
Expand Down
4 changes: 2 additions & 2 deletions arch/s390/kernel/early.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,10 +64,10 @@ static noinline __init void detect_machine_type(void)
if (stsi(vmms, 3, 2, 2) || !vmms->count)
return;

/* Detect known hypervisors */
/* Running under KVM? If not we assume z/VM */
if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
else
S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
}

Expand Down
2 changes: 0 additions & 2 deletions arch/s390/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -882,8 +882,6 @@ void __init setup_arch(char **cmdline_p)
pr_info("Linux is running under KVM in 64-bit mode\n");
else if (MACHINE_IS_LPAR)
pr_info("Linux is running natively in 64-bit mode\n");
else
pr_info("Linux is running as a guest in 64-bit mode\n");

/* Have one command line that is parsed and saved in /proc/cmdline */
/* boot_command_line has been already set up in early.c */
Expand Down
12 changes: 2 additions & 10 deletions arch/s390/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -371,13 +371,9 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
*/
void smp_call_ipl_cpu(void (*func)(void *), void *data)
{
struct lowcore *lc = pcpu_devices->lowcore;

if (pcpu_devices[0].address == stap())
lc = &S390_lowcore;

pcpu_delegate(&pcpu_devices[0], func, data,
lc->panic_stack - PANIC_FRAME_OFFSET + PAGE_SIZE);
pcpu_devices->lowcore->panic_stack -
PANIC_FRAME_OFFSET + PAGE_SIZE);
}

int smp_find_processor_id(u16 address)
Expand Down Expand Up @@ -1156,11 +1152,7 @@ static ssize_t __ref rescan_store(struct device *dev,
{
int rc;

rc = lock_device_hotplug_sysfs();
if (rc)
return rc;
rc = smp_rescan_cpus();
unlock_device_hotplug();
return rc ? rc : count;
}
static DEVICE_ATTR_WO(rescan);
Expand Down
6 changes: 2 additions & 4 deletions arch/x86/entry/entry_64_compat.S
Original file line number Diff line number Diff line change
Expand Up @@ -356,8 +356,7 @@ ENTRY(entry_INT80_compat)

/* Need to switch before accessing the thread stack. */
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
/* In the Xen PV case we already run on the thread stack. */
ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
movq %rsp, %rdi
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp

pushq 6*8(%rdi) /* regs->ss */
Expand All @@ -366,9 +365,8 @@ ENTRY(entry_INT80_compat)
pushq 3*8(%rdi) /* regs->cs */
pushq 2*8(%rdi) /* regs->ip */
pushq 1*8(%rdi) /* regs->orig_ax */
pushq (%rdi) /* pt_regs->di */
.Lint80_keep_stack:

pushq (%rdi) /* pt_regs->di */
pushq %rsi /* pt_regs->si */
xorl %esi, %esi /* nospec si */
pushq %rdx /* pt_regs->dx */
Expand Down
18 changes: 0 additions & 18 deletions arch/x86/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -178,10 +178,6 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)

void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);

/*
* Init a new mm. Used on mm copies, like at fork()
* and on mm's that are brand-new, like at execve().
*/
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
Expand Down Expand Up @@ -232,22 +228,8 @@ do { \
} while (0)
#endif

static inline void arch_dup_pkeys(struct mm_struct *oldmm,
struct mm_struct *mm)
{
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
return;

/* Duplicate the oldmm pkey state in mm: */
mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
#endif
}

static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
{
arch_dup_pkeys(oldmm, mm);
paravirt_arch_dup_mmap(oldmm, mm);
return ldt_dup_context(oldmm, mm);
}
Expand Down
7 changes: 2 additions & 5 deletions arch/x86/kernel/kvm.c
Original file line number Diff line number Diff line change
Expand Up @@ -457,7 +457,6 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
#else
u64 ipi_bitmap = 0;
#endif
long ret;

if (cpumask_empty(mask))
return;
Expand All @@ -483,19 +482,17 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
} else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
max = apic_id < max ? max : apic_id;
} else {
ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
min = max = apic_id;
ipi_bitmap = 0;
}
__set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
}

if (ipi_bitmap) {
ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
}

local_irq_restore(flags);
Expand Down
14 changes: 5 additions & 9 deletions arch/x86/kvm/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -8290,11 +8290,11 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
if (r < 0)
goto out_vmcs02;

vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
if (!vmx->nested.cached_vmcs12)
goto out_cached_vmcs12;

vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
if (!vmx->nested.cached_shadow_vmcs12)
goto out_cached_shadow_vmcs12;

Expand Down Expand Up @@ -11733,7 +11733,7 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
!nested_exit_intr_ack_set(vcpu) ||
(vmcs12->posted_intr_nv & 0xff00) ||
(vmcs12->posted_intr_desc_addr & 0x3f) ||
(vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu))))
(!page_address_valid(vcpu, vmcs12->posted_intr_desc_addr))))
return -EINVAL;

/* tpr shadow is needed by all apicv features. */
Expand Down Expand Up @@ -13984,17 +13984,13 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
else if (enable_shadow_vmcs && !vmx->nested.sync_shadow_vmcs)
copy_shadow_to_vmcs12(vmx);

/*
* Copy over the full allocated size of vmcs12 rather than just the size
* of the struct.
*/
if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE))
if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12)))
return -EFAULT;

if (nested_cpu_has_shadow_vmcs(vmcs12) &&
vmcs12->vmcs_link_pointer != -1ull) {
if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
get_shadow_vmcs12(vcpu), VMCS12_SIZE))
get_shadow_vmcs12(vcpu), sizeof(*vmcs12)))
return -EFAULT;
}

Expand Down
5 changes: 3 additions & 2 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -6277,7 +6277,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip);
if (r == EMULATE_DONE && ctxt->tf)
if (r == EMULATE_DONE &&
(ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
kvm_vcpu_do_singlestep(vcpu, &r);
if (!ctxt->have_exception ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP)
Expand Down Expand Up @@ -6867,10 +6868,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
case KVM_HC_CLOCK_PAIRING:
ret = kvm_pv_clock_pairing(vcpu, a0, a1);
break;
#endif
case KVM_HC_SEND_IPI:
ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
break;
#endif
default:
ret = -KVM_ENOSYS;
break;
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/lib/kaslr.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@ static inline u16 i8254(void)
u16 status, timer;

do {
outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0,
I8254_PORT_CONTROL);
outb(I8254_PORT_CONTROL,
I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
status = inb(I8254_PORT_COUNTER0);
timer = inb(I8254_PORT_COUNTER0);
timer |= inb(I8254_PORT_COUNTER0) << 8;
Expand Down
Loading