Skip to content

Commit

Permalink
Merge tag 'v6.1.50' into 6.1-main
Browse files Browse the repository at this point in the history
This is the 6.1.50 stable release

# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmTvTg0ACgkQONu9yGCS
# aT7XrQ/+NXoPhqMm2HteD9lWbji5b/c45igLPyE4RKH3qn8NVc3AkLZfb+IbEPYs
# hQwDE85ZubeRkP0aJKnbCbNANtH3UKtIa54DphPlnN1H68lDLEqlPDY5wwydY1Oc
# eD3UsgDvzc1zcVzAsJb9a1L18v3Ne+wbuQrH7MgAxMcElBKLDn5gnP4swVMIau04
# oo/j9FVhq990MX/x+YrFRtXSHP3ER7KW/BOAZieRwVdRcm34awTHh+ZlHJg4tLAP
# 5cDOnS8YPgJmg8NKQ6VOJfCg5G8Pw+cLT10fRQEjDvndV29VLyiNuiRMQZhpO6DE
# fW3nR0CtBQVmKZpj7cWXJ4XNaLvT1KscMmUjc6aRVx7DVdeOG6muFGRWjlSw0w8M
# kDwO3Zh+2khxG5JZnn0nTidUGiy3gZMMvsrjdbZRiAh3+URr1k49ysku7scpB/G9
# TXcXL8ZappNWv65W45dS9bg7cQKxBm2hDHGw1y77cdaOYyCPti9loqpeuR4alVg6
# aZ8fH97jfCrKsSWiqDUWU4WDhc9vrOYR77P7/1TGxyf7qwsMRtsBBde0Y+z0wIfW
# ggsflQUiCigYf7m9Q+7+0udZCnckvFNRlOEeHNg0q47T5khkN+emot+EBlvDUPDc
# KaYuHEMuaJl9zFugMuMtSzgy95S/Dk2j2fz60r3zeAS7C/tJ49Q=
# =Z9mR
# -----END PGP SIGNATURE-----
# gpg: Signature made Wed Aug 30 16:11:25 2023 CEST
# gpg:                using RSA key 647F28654894E3BD457199BE38DBBDC86092693E
# gpg: Can't check signature: No public key
  • Loading branch information
frank-w committed Aug 31, 2023
2 parents 67cdc75 + a2943d2 commit 36d6b77
Show file tree
Hide file tree
Showing 157 changed files with 1,356 additions and 1,011 deletions.
2 changes: 1 addition & 1 deletion MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -6027,7 +6027,7 @@ S: Supported
F: Documentation/networking/devlink
F: include/net/devlink.h
F: include/uapi/linux/devlink.h
F: net/core/devlink.c
F: net/devlink/

DH ELECTRONICS IMX6 DHCOM BOARD SUPPORT
M: Christoph Niedermaier <cniedermaier@dh-electronics.com>
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 49
SUBLEVEL = 50
EXTRAVERSION =
NAME = Curry Ramen

Expand Down
21 changes: 19 additions & 2 deletions arch/mips/include/asm/cpu-features.h
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,24 @@
#define cpu_has_4k_cache __isa_ge_or_opt(1, MIPS_CPU_4K_CACHE)
#endif
#ifndef cpu_has_octeon_cache
#define cpu_has_octeon_cache 0
#define cpu_has_octeon_cache \
({ \
int __res; \
\
switch (boot_cpu_type()) { \
case CPU_CAVIUM_OCTEON: \
case CPU_CAVIUM_OCTEON_PLUS: \
case CPU_CAVIUM_OCTEON2: \
case CPU_CAVIUM_OCTEON3: \
__res = 1; \
break; \
\
default: \
__res = 0; \
} \
\
__res; \
})
#endif
/* Don't override `cpu_has_fpu' to 1 or the "nofpu" option won't work. */
#ifndef cpu_has_fpu
Expand Down Expand Up @@ -351,7 +368,7 @@
({ \
int __res; \
\
switch (current_cpu_type()) { \
switch (boot_cpu_type()) { \
case CPU_M14KC: \
case CPU_74K: \
case CPU_1074K: \
Expand Down
28 changes: 17 additions & 11 deletions arch/riscv/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -447,24 +447,30 @@ config TOOLCHAIN_HAS_ZIHINTPAUSE
config TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
def_bool y
# https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=aed44286efa8ae8717a77d94b51ac3614e2ca6dc
depends on AS_IS_GNU && AS_VERSION >= 23800
# https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=98416dbb0a62579d4a7a4a76bab51b5b52fec2cd
depends on AS_IS_GNU && AS_VERSION >= 23600
help
Newer binutils versions default to ISA spec version 20191213 which
moves some instructions from the I extension to the Zicsr and Zifencei
extensions.
Binutils-2.38 and GCC-12.1.0 bumped the default ISA spec to the newer
20191213 version, which moves some instructions from the I extension to
the Zicsr and Zifencei extensions. This requires explicitly specifying
Zicsr and Zifencei when binutils >= 2.38 or GCC >= 12.1.0. Zicsr
and Zifencei are supported in binutils from version 2.36 onwards.
To make life easier, and avoid forcing toolchains that default to a
newer ISA spec to version 2.2, relax the check to binutils >= 2.36.
For clang < 17 or GCC < 11.3.0, for which this is not possible or need
special treatment, this is dealt with in TOOLCHAIN_NEEDS_OLD_ISA_SPEC.

config TOOLCHAIN_NEEDS_OLD_ISA_SPEC
def_bool y
depends on TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
# https://github.com/llvm/llvm-project/commit/22e199e6afb1263c943c0c0d4498694e15bf8a16
depends on CC_IS_CLANG && CLANG_VERSION < 170000
# https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=d29f5d6ab513c52fd872f532c492e35ae9fd6671
depends on (CC_IS_CLANG && CLANG_VERSION < 170000) || (CC_IS_GCC && GCC_VERSION < 110300)
help
Certain versions of clang do not support zicsr and zifencei via -march
but newer versions of binutils require it for the reasons noted in the
help text of CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI. This
option causes an older ISA spec compatible with these older versions
of clang to be passed to GAS, which has the same result as passing zicsr
and zifencei to -march.
Certain versions of clang and GCC do not support zicsr and zifencei via
-march. This option causes an older ISA spec compatible with these older
versions of clang and GCC to be passed to GAS, which has the same result
as passing zicsr and zifencei to -march.

config FPU
bool "FPU support"
Expand Down
8 changes: 7 additions & 1 deletion arch/riscv/kernel/compat_vdso/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,13 @@ compat_vdso-syms += flush_icache
COMPAT_CC := $(CC)
COMPAT_LD := $(LD)

COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32
# binutils 2.35 does not support the zifencei extension, but in the ISA
# spec 20191213, G stands for IMAFD_ZICSR_ZIFENCEI.
ifdef CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32
else
COMPAT_CC_FLAGS := -march=rv32imafd -mabi=ilp32
endif
COMPAT_LD_FLAGS := -melf32lriscv

# Disable attributes, as they're useless and break the build.
Expand Down
3 changes: 1 addition & 2 deletions arch/x86/kernel/fpu/context.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,7 @@
* FPU state for a task MUST let the rest of the kernel know that the
* FPU registers are no longer valid for this task.
*
* Either one of these invalidation functions is enough. Invalidate
* a resource you control: CPU if using the CPU for something else
* Invalidate a resource you control: CPU if using the CPU for something else
* (with preemption disabled), FPU for the current task, or a task that
* is prevented from running by the current task.
*/
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/fpu/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -679,7 +679,7 @@ static void fpu_reset_fpregs(void)
struct fpu *fpu = &current->thread.fpu;

fpregs_lock();
fpu__drop(fpu);
__fpu_invalidate_fpregs_state(fpu);
/*
* This does not change the actual hardware registers. It just
* resets the memory image and sets TIF_NEED_FPU_LOAD so a
Expand Down
7 changes: 7 additions & 0 deletions arch/x86/kernel/fpu/xstate.c
Original file line number Diff line number Diff line change
Expand Up @@ -882,6 +882,13 @@ void __init fpu__init_system_xstate(unsigned int legacy_size)
goto out_disable;
}

/*
* CPU capabilities initialization runs before FPU init. So
* X86_FEATURE_OSXSAVE is not set. Now that XSAVE is completely
* functional, set the feature bit so depending code works.
*/
setup_force_cpu_cap(X86_FEATURE_OSXSAVE);

print_xstate_offset_size();
pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
fpu_kernel_cfg.max_features,
Expand Down
3 changes: 2 additions & 1 deletion arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -4212,7 +4212,8 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
* root was invalidated by a memslot update or a relevant mmu_notifier fired.
*/
static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault, int mmu_seq)
struct kvm_page_fault *fault,
unsigned long mmu_seq)
{
struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root.hpa);

Expand Down
121 changes: 56 additions & 65 deletions arch/x86/kvm/mmu/tdp_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,17 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
if (!kvm->arch.tdp_mmu_enabled)
return;

/* Also waits for any queued work items. */
/*
* Invalidate all roots, which besides the obvious, schedules all roots
* for zapping and thus puts the TDP MMU's reference to each root, i.e.
* ultimately frees all roots.
*/
kvm_tdp_mmu_invalidate_all_roots(kvm);

/*
* Destroying a workqueue also first flushes the workqueue, i.e. no
* need to invoke kvm_tdp_mmu_zap_invalidated_roots().
*/
destroy_workqueue(kvm->arch.tdp_mmu_zap_wq);

WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
Expand Down Expand Up @@ -127,16 +137,6 @@ static void tdp_mmu_schedule_zap_root(struct kvm *kvm, struct kvm_mmu_page *root
queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work);
}

static inline bool kvm_tdp_root_mark_invalid(struct kvm_mmu_page *page)
{
union kvm_mmu_page_role role = page->role;
role.invalid = true;

/* No need to use cmpxchg, only the invalid bit can change. */
role.word = xchg(&page->role.word, role.word);
return role.invalid;
}

void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
bool shared)
{
Expand All @@ -145,45 +145,12 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
return;

WARN_ON(!root->tdp_mmu_page);

/*
* The root now has refcount=0. It is valid, but readers already
* cannot acquire a reference to it because kvm_tdp_mmu_get_root()
* rejects it. This remains true for the rest of the execution
* of this function, because readers visit valid roots only
* (except for tdp_mmu_zap_root_work(), which however
* does not acquire any reference itself).
*
* Even though there are flows that need to visit all roots for
* correctness, they all take mmu_lock for write, so they cannot yet
* run concurrently. The same is true after kvm_tdp_root_mark_invalid,
* since the root still has refcount=0.
*
* However, tdp_mmu_zap_root can yield, and writers do not expect to
* see refcount=0 (see for example kvm_tdp_mmu_invalidate_all_roots()).
* So the root temporarily gets an extra reference, going to refcount=1
* while staying invalid. Readers still cannot acquire any reference;
* but writers are now allowed to run if tdp_mmu_zap_root yields and
* they might take an extra reference if they themselves yield.
* Therefore, when the reference is given back by the worker,
* there is no guarantee that the refcount is still 1. If not, whoever
* puts the last reference will free the page, but they will not have to
* zap the root because a root cannot go from invalid to valid.
* The TDP MMU itself holds a reference to each root until the root is
* explicitly invalidated, i.e. the final reference should be never be
* put for a valid root.
*/
if (!kvm_tdp_root_mark_invalid(root)) {
refcount_set(&root->tdp_mmu_root_count, 1);

/*
* Zapping the root in a worker is not just "nice to have";
* it is required because kvm_tdp_mmu_invalidate_all_roots()
* skips already-invalid roots. If kvm_tdp_mmu_put_root() did
* not add the root to the workqueue, kvm_tdp_mmu_zap_all_fast()
* might return with some roots not zapped yet.
*/
tdp_mmu_schedule_zap_root(kvm, root);
return;
}
KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm);

spin_lock(&kvm->arch.tdp_mmu_pages_lock);
list_del_rcu(&root->link);
Expand Down Expand Up @@ -329,7 +296,14 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
root = tdp_mmu_alloc_sp(vcpu);
tdp_mmu_init_sp(root, NULL, 0, role);

refcount_set(&root->tdp_mmu_root_count, 1);
/*
* TDP MMU roots are kept until they are explicitly invalidated, either
* by a memslot update or by the destruction of the VM. Initialize the
* refcount to two; one reference for the vCPU, and one reference for
* the TDP MMU itself, which is held until the root is invalidated and
* is ultimately put by tdp_mmu_zap_root_work().
*/
refcount_set(&root->tdp_mmu_root_count, 2);

spin_lock(&kvm->arch.tdp_mmu_pages_lock);
list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
Expand Down Expand Up @@ -1027,32 +1001,49 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
/*
* Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
* is about to be zapped, e.g. in response to a memslots update. The actual
* zapping is performed asynchronously, so a reference is taken on all roots.
* Using a separate workqueue makes it easy to ensure that the destruction is
* performed before the "fast zap" completes, without keeping a separate list
* of invalidated roots; the list is effectively the list of work items in
* the workqueue.
*
* Get a reference even if the root is already invalid, the asynchronous worker
* assumes it was gifted a reference to the root it processes. Because mmu_lock
* is held for write, it should be impossible to observe a root with zero refcount,
* i.e. the list of roots cannot be stale.
* zapping is performed asynchronously. Using a separate workqueue makes it
* easy to ensure that the destruction is performed before the "fast zap"
* completes, without keeping a separate list of invalidated roots; the list is
* effectively the list of work items in the workqueue.
*
* This has essentially the same effect for the TDP MMU
* as updating mmu_valid_gen does for the shadow MMU.
* Note, the asynchronous worker is gifted the TDP MMU's reference.
* See kvm_tdp_mmu_get_vcpu_root_hpa().
*/
void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
{
struct kvm_mmu_page *root;

lockdep_assert_held_write(&kvm->mmu_lock);
list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
if (!root->role.invalid &&
!WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root))) {
/*
* mmu_lock must be held for write to ensure that a root doesn't become
* invalid while there are active readers (invalidating a root while
* there are active readers may or may not be problematic in practice,
* but it's uncharted territory and not supported).
*
* Waive the assertion if there are no users of @kvm, i.e. the VM is
* being destroyed after all references have been put, or if no vCPUs
* have been created (which means there are no roots), i.e. the VM is
* being destroyed in an error path of KVM_CREATE_VM.
*/
if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
refcount_read(&kvm->users_count) && kvm->created_vcpus)
lockdep_assert_held_write(&kvm->mmu_lock);

/*
* As above, mmu_lock isn't held when destroying the VM! There can't
* be other references to @kvm, i.e. nothing else can invalidate roots
* or be consuming roots, but walking the list of roots does need to be
* guarded against roots being deleted by the asynchronous zap worker.
*/
rcu_read_lock();

list_for_each_entry_rcu(root, &kvm->arch.tdp_mmu_roots, link) {
if (!root->role.invalid) {
root->role.invalid = true;
tdp_mmu_schedule_zap_root(kvm, root);
}
}

rcu_read_unlock();
}

/*
Expand Down
3 changes: 0 additions & 3 deletions drivers/block/ublk_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -1223,9 +1223,6 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
__func__, cmd->cmd_op, ub_cmd->q_id, tag,
ub_cmd->result);

if (!(issue_flags & IO_URING_F_SQE128))
goto out;

if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
goto out;

Expand Down
13 changes: 7 additions & 6 deletions drivers/clk/clk-devres.c
Original file line number Diff line number Diff line change
Expand Up @@ -205,18 +205,19 @@ EXPORT_SYMBOL(devm_clk_put);
struct clk *devm_get_clk_from_child(struct device *dev,
struct device_node *np, const char *con_id)
{
struct clk **ptr, *clk;
struct devm_clk_state *state;
struct clk *clk;

ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL);
if (!state)
return ERR_PTR(-ENOMEM);

clk = of_clk_get_by_name(np, con_id);
if (!IS_ERR(clk)) {
*ptr = clk;
devres_add(dev, ptr);
state->clk = clk;
devres_add(dev, state);
} else {
devres_free(ptr);
devres_free(state);
}

return clk;
Expand Down
18 changes: 9 additions & 9 deletions drivers/dma-buf/sw_sync.c
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,7 @@ static const struct dma_fence_ops timeline_fence_ops = {
*/
static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
{
LIST_HEAD(signalled);
struct sync_pt *pt, *next;

trace_sync_timeline(obj);
Expand All @@ -203,21 +204,20 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
if (!timeline_fence_signaled(&pt->base))
break;

list_del_init(&pt->link);
dma_fence_get(&pt->base);

list_move_tail(&pt->link, &signalled);
rb_erase(&pt->node, &obj->pt_tree);

/*
* A signal callback may release the last reference to this
* fence, causing it to be freed. That operation has to be
* last to avoid a use after free inside this loop, and must
* be after we remove the fence from the timeline in order to
* prevent deadlocking on timeline->lock inside
* timeline_fence_release().
*/
dma_fence_signal_locked(&pt->base);
}

spin_unlock_irq(&obj->lock);

list_for_each_entry_safe(pt, next, &signalled, link) {
list_del_init(&pt->link);
dma_fence_put(&pt->base);
}
}

/**
Expand Down

0 comments on commit 36d6b77

Please sign in to comment.