diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2026-03-18 08:06:30 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2026-03-18 08:06:30 -0700 |
| commit | efa0adb5041591a3bf63e30b36239e4537211222 (patch) | |
| tree | 6661e768d6f450ce0d3a42593206847136367f9a | |
| parent | a989fde763f4f24209e4702f50a45be572340e68 (diff) | |
| parent | c252c12d1f55bd5737e3b8e7839914ccdc7a701c (diff) | |
Merge tag 'loongarch-fixes-7.0-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
Pull LoongArch fixes from Huacai Chen:
- only use SC.Q when supported by the assembler to fix a build failure
- fix calling smp_processor_id() in preemptible code
- make a BPF helper arch_protect_bpf_trampoline() return 0 to fix a
kernel memory access failure
- fix a typo issue in kvm_vm_init_features()
* tag 'loongarch-fixes-7.0-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson:
LoongArch: KVM: Fix typo issue in kvm_vm_init_features()
LoongArch: BPF: Make arch_protect_bpf_trampoline() return 0
LoongArch: No need to flush icache if text copy failed
LoongArch: Check return values for set_memory_{rw,rox}
LoongArch: Give more information if kmem access failed
LoongArch: Fix calling smp_processor_id() in preemptible code
LoongArch: Only use SC.Q when supported by the assembler
| -rw-r--r-- | arch/loongarch/Kconfig | 3 | ||||
| -rw-r--r-- | arch/loongarch/include/asm/cmpxchg.h | 5 | ||||
| -rw-r--r-- | arch/loongarch/include/asm/uaccess.h | 14 | ||||
| -rw-r--r-- | arch/loongarch/kernel/inst.c | 31 | ||||
| -rw-r--r-- | arch/loongarch/kvm/vm.c | 4 | ||||
| -rw-r--r-- | arch/loongarch/net/bpf_jit.c | 11 |
6 files changed, 58 insertions, 10 deletions
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index d211c6572b0a..92068ff38685 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -304,6 +304,9 @@ config AS_HAS_LBT_EXTENSION config AS_HAS_LVZ_EXTENSION def_bool $(as-instr,hvcl 0) +config AS_HAS_SCQ_EXTENSION + def_bool $(as-instr,sc.q \$t0$(comma)\$t1$(comma)\$t2) + config CC_HAS_ANNOTATE_TABLEJUMP def_bool $(cc-option,-mannotate-tablejump) diff --git a/arch/loongarch/include/asm/cmpxchg.h b/arch/loongarch/include/asm/cmpxchg.h index 58cabab6d90d..909f9274fe71 100644 --- a/arch/loongarch/include/asm/cmpxchg.h +++ b/arch/loongarch/include/asm/cmpxchg.h @@ -238,6 +238,8 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, unsigned int arch_cmpxchg((ptr), (o), (n)); \ }) +#ifdef CONFIG_AS_HAS_SCQ_EXTENSION + union __u128_halves { u128 full; struct { @@ -290,6 +292,9 @@ union __u128_halves { BUILD_BUG_ON(sizeof(*(ptr)) != 16); \ __arch_cmpxchg128(ptr, o, n, ""); \ }) + +#endif /* CONFIG_AS_HAS_SCQ_EXTENSION */ + #else #include <asm-generic/cmpxchg-local.h> #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n)) diff --git a/arch/loongarch/include/asm/uaccess.h b/arch/loongarch/include/asm/uaccess.h index 4e259d490e45..438269313e78 100644 --- a/arch/loongarch/include/asm/uaccess.h +++ b/arch/loongarch/include/asm/uaccess.h @@ -253,8 +253,13 @@ do { \ \ __get_kernel_common(*((type *)(dst)), sizeof(type), \ (__force type *)(src)); \ - if (unlikely(__gu_err)) \ + if (unlikely(__gu_err)) { \ + pr_info("%s: memory access failed, ecode 0x%x\n", \ + __func__, read_csr_excode()); \ + pr_info("%s: the caller is %pS\n", \ + __func__, __builtin_return_address(0)); \ goto err_label; \ + } \ } while (0) #define __put_kernel_nofault(dst, src, type, err_label) \ @@ -264,8 +269,13 @@ do { \ \ __pu_val = *(__force type *)(src); \ __put_kernel_common(((type *)(dst)), sizeof(type)); \ - if (unlikely(__pu_err)) \ + if (unlikely(__pu_err)) { \ + pr_info("%s: memory access failed, ecode 0x%x\n", \ + __func__, read_csr_excode()); \ + pr_info("%s: the caller is %pS\n", \ + __func__, __builtin_return_address(0)); \ goto err_label; \ + } \ } while (0) extern unsigned long __copy_user(void *to, const void *from, __kernel_size_t n); diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c index bf037f0c6b26..1a728082944c 100644 --- a/arch/loongarch/kernel/inst.c +++ b/arch/loongarch/kernel/inst.c @@ -246,32 +246,51 @@ static int text_copy_cb(void *data) if (smp_processor_id() == copy->cpu) { ret = copy_to_kernel_nofault(copy->dst, copy->src, copy->len); - if (ret) + if (ret) { pr_err("%s: operation failed\n", __func__); + return ret; + } } flush_icache_range((unsigned long)copy->dst, (unsigned long)copy->dst + copy->len); - return ret; + return 0; } int larch_insn_text_copy(void *dst, void *src, size_t len) { int ret = 0; + int err = 0; size_t start, end; struct insn_copy copy = { .dst = dst, .src = src, .len = len, - .cpu = smp_processor_id(), + .cpu = raw_smp_processor_id(), }; + /* + * Ensure copy.cpu won't be hot removed before stop_machine. + * If it is removed nobody will really update the text. + */ + lockdep_assert_cpus_held(); + start = round_down((size_t)dst, PAGE_SIZE); end = round_up((size_t)dst + len, PAGE_SIZE); - set_memory_rw(start, (end - start) / PAGE_SIZE); - ret = stop_machine(text_copy_cb, ©, cpu_online_mask); - set_memory_rox(start, (end - start) / PAGE_SIZE); + err = set_memory_rw(start, (end - start) / PAGE_SIZE); + if (err) { + pr_info("%s: set_memory_rw() failed\n", __func__); + return err; + } + + ret = stop_machine_cpuslocked(text_copy_cb, ©, cpu_online_mask); + + err = set_memory_rox(start, (end - start) / PAGE_SIZE); + if (err) { + pr_info("%s: set_memory_rox() failed\n", __func__); + return err; + } return ret; } diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c index 17b3d5b36cfc..8cc5ee1c53ef 100644 --- a/arch/loongarch/kvm/vm.c +++ b/arch/loongarch/kvm/vm.c @@ -49,8 +49,8 @@ static void kvm_vm_init_features(struct kvm *kvm) kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PMU); /* Enable all PV features by default */ - kvm->arch.pv_features = BIT(KVM_FEATURE_IPI); - kvm->arch.kvm_features = BIT(KVM_LOONGARCH_VM_FEAT_PV_IPI); + kvm->arch.pv_features |= BIT(KVM_FEATURE_IPI); + kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PV_IPI); if (kvm_pvtime_supported()) { kvm->arch.pv_features |= BIT(KVM_FEATURE_PREEMPT); kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME); diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index 3bd89f55960d..9cb796e16379 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -1379,9 +1379,11 @@ void *bpf_arch_text_copy(void *dst, void *src, size_t len) { int ret; + cpus_read_lock(); mutex_lock(&text_mutex); ret = larch_insn_text_copy(dst, src, len); mutex_unlock(&text_mutex); + cpus_read_unlock(); return ret ? ERR_PTR(-EINVAL) : dst; } @@ -1429,10 +1431,12 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t, if (ret) return ret; + cpus_read_lock(); mutex_lock(&text_mutex); if (memcmp(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES)) ret = larch_insn_text_copy(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES); mutex_unlock(&text_mutex); + cpus_read_unlock(); return ret; } @@ -1450,10 +1454,12 @@ int bpf_arch_text_invalidate(void *dst, size_t len) for (i = 0; i < (len / sizeof(u32)); i++) inst[i] = INSN_BREAK; + cpus_read_lock(); mutex_lock(&text_mutex); if (larch_insn_text_copy(dst, inst, len)) ret = -EINVAL; mutex_unlock(&text_mutex); + cpus_read_unlock(); kvfree(inst); @@ -1568,6 +1574,11 @@ void arch_free_bpf_trampoline(void *image, unsigned int size) bpf_prog_pack_free(image, size); } +int arch_protect_bpf_trampoline(void *image, unsigned int size) +{ + return 0; +} + /* * Sign-extend the register if necessary */ |
