From 0089fde60dcd5d7c7fb2f64766519c3f929d7656 Mon Sep 17 00:00:00 2001 From: cheng2000160 Date: Tue, 28 May 2024 18:39:17 +0800 Subject: [PATCH 1/2] lib/tnsynchronize: 1. Use gcc atomic operation APIs to implement spinlock(ticket/binary), instead of assembly code 2. Maintain a new tnsynchronize lib for managing synchronization mechanisms Signed-off-by: cheng2000160 --- arch/arm/arm64/include/uk/asm/spinlock.h | 94 --------- arch/x86/x86_64/include/uk/asm/spinlock.h | 92 --------- drivers/ukintctlr/gic/gic-v2.c | 2 +- drivers/ukintctlr/gic/gic-v3.c | 2 +- .../ukintctlr/gic/include/uk/intctlr/gic.h | 6 +- drivers/virtio/9p/virtio_9p.c | 20 +- include/uk/plat/spinlock.h | 10 +- lib/Makefile.uk | 1 + lib/tnsynchronize/Config.uk | 26 +++ lib/tnsynchronize/Makefile.uk | 14 ++ lib/tnsynchronize/exportsyms.uk | 8 + .../tnsynchronize/include/tn}/spinlock.h | 107 ++++++---- lib/tnsynchronize/spinlock.c | 191 ++++++++++++++++++ lib/tnsynchronize/tests/test_spinlock.c | 126 ++++++++++++ lib/uk9p/9pdev.c | 6 +- lib/uklock/include/uk/arch/arm64/ticketlock.h | 66 ++---- lib/uklock/include/uk/mutex.h | 22 +- lib/uklock/include/uk/spinlock.h | 37 +--- lib/uklock/mutex.c | 10 +- lib/uknetdev/include/uk/netdev.h | 24 +-- lib/uknetdev/include/uk/netdev_core.h | 2 +- lib/uknetdev/stats.c | 4 +- lib/uksched/include/uk/wait.h | 2 +- lib/uksched/include/uk/wait_types.h | 2 +- lib/uksched/sched.c | 1 + lib/vfscore/include/vfscore/eventpoll.h | 2 +- plat/xen/drivers/9p/9pfront.c | 22 +- plat/xen/include/xenbus/xenbus.h | 2 +- plat/xen/xenbus/client.c | 10 +- plat/xen/xenbus/xs_comms.c | 26 +-- plat/xen/xenbus/xs_watch.c | 2 +- 31 files changed, 546 insertions(+), 393 deletions(-) delete mode 100644 arch/arm/arm64/include/uk/asm/spinlock.h delete mode 100644 arch/x86/x86_64/include/uk/asm/spinlock.h create mode 100644 lib/tnsynchronize/Config.uk create mode 100644 lib/tnsynchronize/Makefile.uk create mode 100644 lib/tnsynchronize/exportsyms.uk rename {include/uk/arch => lib/tnsynchronize/include/tn}/spinlock.h (37%) create mode 100644 lib/tnsynchronize/spinlock.c create mode 100644 lib/tnsynchronize/tests/test_spinlock.c diff --git a/arch/arm/arm64/include/uk/asm/spinlock.h b/arch/arm/arm64/include/uk/asm/spinlock.h deleted file mode 100644 index 757d66ff..00000000 --- a/arch/arm/arm64/include/uk/asm/spinlock.h +++ /dev/null @@ -1,94 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause */ -/* - * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved. - * Copyright (c) 2021 OpenSynergy GmbH. All rights reserved. - * Copyright (c) 2021 Karlsruhe Institute of Technology (KIT). - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef __UKARCH_SPINLOCK_H__ -#error Do not include this header directly -#endif - -#include -#include - -struct __align(8) __spinlock { - volatile int lock; -}; - -/* Initialize a spinlock to unlocked state */ -#define UKARCH_SPINLOCK_INITIALIZER() { 0 } - -static inline void ukarch_spin_init(struct __spinlock *lock) -{ - lock->lock = 0; -} - -static inline void ukarch_spin_lock(struct __spinlock *lock) -{ - register int r, locked = 1; - - __asm__ __volatile__( - " sevl\n" /* set event locally */ - "1: wfe\n" /* wait for event */ - "2: ldaxr %w0, [%1]\n" /* exclusive load lock value */ - " cbnz %w0, 1b\n" /* check if already locked */ - " stxr %w0, %w2, [%1]\n"/* try to lock it */ - " cbnz %w0, 2b\n" /* jump to l2 if we failed */ - : "=&r" (r) - : "r" (&lock->lock), "r" (locked)); -} - -static inline void ukarch_spin_unlock(struct __spinlock *lock) -{ - __asm__ __volatile__( - "stlr wzr, [%0]\n" /* unlock lock */ - "sev\n" /* wake up any waiters */ - : - : "r" (&lock->lock)); -} - -static inline int ukarch_spin_trylock(struct __spinlock *lock) -{ - register int r, locked = 1; - - __asm__ __volatile__( - " ldaxr %w0, [%1]\n" /* exclusive load lock value */ - " cbnz %w0, 1f\n" /* bail out if locked */ - " stxr %w0, %w2, [%1]\n"/* try to lock it */ - "1:\n" - : "=&r" (r) - : "r" (&lock->lock), "r" (locked)); - - return !r; -} - -static inline int ukarch_spin_is_locked(struct __spinlock *lock) -{ - return UK_READ_ONCE(lock->lock); -} diff --git a/arch/x86/x86_64/include/uk/asm/spinlock.h b/arch/x86/x86_64/include/uk/asm/spinlock.h deleted file mode 100644 index 6d97be9b..00000000 --- a/arch/x86/x86_64/include/uk/asm/spinlock.h +++ /dev/null @@ -1,92 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause */ -/* - * Copyright (c) 2021 Karlsruhe Institute of Technology (KIT). - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef __UKARCH_SPINLOCK_H__ -#error Do not include this header directly -#endif - -#include - -struct __spinlock { - volatile int lock; -}; - -/* Initialize a spinlock to unlocked state */ -#define UKARCH_SPINLOCK_INITIALIZER() { 0 } - -static inline void ukarch_spin_init(struct __spinlock *lock) -{ - lock->lock = 0; -} - -static inline void ukarch_spin_lock(struct __spinlock *lock) -{ - register int locked = 1; - - __asm__ __volatile__( - "1: mov (%0), %%eax\n" /* read current value */ - " test %%eax, %%eax\n" /* check if locked */ - " jz 3f\n" /* if not locked, try get it */ - "2: pause\n" /* is locked, hint spinning */ - " jmp 1b\n" /* retry */ - "3: lock; cmpxchg %1, (%0)\n" /* try to acquire spinlock */ - " jnz 2b\n" /* if unsuccessful, retry */ - : - : "r" (&lock->lock), "r" (locked) - : "eax"); -} - -static inline void ukarch_spin_unlock(struct __spinlock *lock) -{ - UK_WRITE_ONCE(lock->lock, 0); -} - -static inline int ukarch_spin_trylock(struct __spinlock *lock) -{ - register int r = 0, locked = 1; - - __asm__ __volatile__( - " mov (%1), %%eax\n" /* read current value */ - " test %%eax, %%eax\n" /* bail out if locked */ - " jnz 1f\n" - " lock; cmpxchg %2, (%1)\n" /* try to acquire spinlock */ - " cmove %2, %0\n" /* store if successful */ - "1:\n" - : "+&r" (r) - : "r" (&lock->lock), "r" (locked) - : "eax"); - - return r; -} - -static inline int ukarch_spin_is_locked(struct __spinlock *lock) -{ - return UK_READ_ONCE(lock->lock); -} diff --git a/drivers/ukintctlr/gic/gic-v2.c b/drivers/ukintctlr/gic/gic-v2.c index c3102f0d..bca64271 100644 --- a/drivers/ukintctlr/gic/gic-v2.c +++ b/drivers/ukintctlr/gic/gic-v2.c @@ -43,7 +43,7 @@ #include #endif /* CONFIG_UKPLAT_ACPI */ #include -#include +#include #include #include #include diff --git a/drivers/ukintctlr/gic/gic-v3.c b/drivers/ukintctlr/gic/gic-v3.c index 1667a04c..0ec95562 100644 --- a/drivers/ukintctlr/gic/gic-v3.c +++ b/drivers/ukintctlr/gic/gic-v3.c @@ -48,7 +48,7 @@ #include #endif /* CONFIG_UKPLAT_ACPI */ #include -#include +#include #include #include #include diff --git a/drivers/ukintctlr/gic/include/uk/intctlr/gic.h b/drivers/ukintctlr/gic/include/uk/intctlr/gic.h index ba2335ba..976d7b6f 100644 --- a/drivers/ukintctlr/gic/include/uk/intctlr/gic.h +++ b/drivers/ukintctlr/gic/include/uk/intctlr/gic.h @@ -33,7 +33,7 @@ #include #include #include -#include +#include /* Shared Peripheral Interrupt (SPI) definitions */ #define GIC_SPI_TYPE 0 @@ -51,8 +51,8 @@ /* Distributor lock functions */ #ifdef CONFIG_HAVE_SMP -#define dist_lock(gdev) ukarch_spin_lock(gdev.dist_lock) -#define dist_unlock(gdev) ukarch_spin_unlock(gdev.dist_lock) +#define dist_lock(gdev) tn_spin_lock(gdev.dist_lock) +#define dist_unlock(gdev) tn_spin_unlock(gdev.dist_lock) #else /* CONFIG_HAVE_SMP */ #define dist_lock(gdev) {} #define dist_unlock(gdev) {} diff --git a/drivers/virtio/9p/virtio_9p.c b/drivers/virtio/9p/virtio_9p.c index 376f3593..a2c44497 100644 --- a/drivers/virtio/9p/virtio_9p.c +++ b/drivers/virtio/9p/virtio_9p.c @@ -40,7 +40,7 @@ #include #include #include -#include +#include #define DRIVER_NAME "virtio-9p" #define NUM_SEGMENTS 128 /** The number of virtqueue descriptors. */ @@ -78,7 +78,7 @@ static int virtio_9p_connect(struct uk_9pdev *p9dev, int rc = 0; int found = 0; - ukarch_spin_lock(&virtio_9p_device_list_lock); + tn_spin_lock(&virtio_9p_device_list_lock); uk_list_for_each_entry(dev, &virtio_9p_device_list, _list) { if (!strcmp(dev->tag, device_identifier)) { if (dev->p9dev != NULL) { @@ -109,7 +109,7 @@ static int virtio_9p_connect(struct uk_9pdev *p9dev, p9dev->priv = dev; out: - ukarch_spin_unlock(&virtio_9p_device_list_lock); + tn_spin_unlock(&virtio_9p_device_list_lock); return rc; } @@ -120,9 +120,9 @@ static int virtio_9p_disconnect(struct uk_9pdev *p9dev) UK_ASSERT(p9dev); dev = p9dev->priv; - ukarch_spin_lock(&virtio_9p_device_list_lock); + tn_spin_lock(&virtio_9p_device_list_lock); dev->p9dev = NULL; - ukarch_spin_unlock(&virtio_9p_device_list_lock); + tn_spin_unlock(&virtio_9p_device_list_lock); return 0; } @@ -250,9 +250,9 @@ static int virtio_9p_recv(struct virtqueue *vq, void *priv) * Protect against data races with virtio_9p_request() calls * which are trying to enqueue to the same vq. */ - ukarch_spin_lock(&dev->spinlock); + tn_spin_lock(&dev->spinlock); rc = virtqueue_buffer_dequeue(vq, (void **)&req, &len); - ukarch_spin_unlock(&dev->spinlock); + tn_spin_unlock(&dev->spinlock); if (rc < 0) break; @@ -436,7 +436,7 @@ static int virtio_9p_add_dev(struct virtio_dev *vdev) rc = -ENOMEM; goto out; } - ukarch_spin_init(&d->spinlock); + tn_spin_init(&d->spinlock); d->vdev = vdev; virtio_9p_feature_set(d); rc = virtio_9p_configure(d); @@ -446,9 +446,9 @@ static int virtio_9p_add_dev(struct virtio_dev *vdev) if (rc) goto out_free; - ukarch_spin_lock(&virtio_9p_device_list_lock); + tn_spin_lock(&virtio_9p_device_list_lock); uk_list_add(&d->_list, &virtio_9p_device_list); - ukarch_spin_unlock(&virtio_9p_device_list_lock); + tn_spin_unlock(&virtio_9p_device_list_lock); out: return rc; out_free: diff --git a/include/uk/plat/spinlock.h b/include/uk/plat/spinlock.h index 54801afe..dddda7e7 100644 --- a/include/uk/plat/spinlock.h +++ b/include/uk/plat/spinlock.h @@ -29,30 +29,30 @@ #ifndef __UKPLAT_SPINLOCK_H__ #define __UKPLAT_SPINLOCK_H__ -#include +#include #include #define ukplat_spin_lock_irq(lock) \ do { \ ukplat_lcpu_disable_irq(); \ - ukarch_spin_lock(lock); \ + tn_spin_lock(lock); \ } while (0) #define ukplat_spin_unlock_irq(lock) \ do { \ - ukarch_spin_unlock(lock); \ + tn_spin_unlock(lock); \ ukplat_lcpu_enable_irq(); \ } while (0) #define ukplat_spin_lock_irqsave(lock, flags) \ do { \ flags = ukplat_lcpu_save_irqf(); \ - ukarch_spin_lock(lock); \ + tn_spin_lock(lock); \ } while (0) #define ukplat_spin_unlock_irqrestore(lock, flags) \ do { \ - ukarch_spin_unlock(lock); \ + tn_spin_unlock(lock); \ ukplat_lcpu_restore_irqf(flags); \ } while (0) diff --git a/lib/Makefile.uk b/lib/Makefile.uk index cfdb3262..e0fd36a3 100644 --- a/lib/Makefile.uk +++ b/lib/Makefile.uk @@ -73,3 +73,4 @@ $(eval $(call import_lib,$(CONFIG_UK_BASE)/lib/ukofw)) $(eval $(call import_lib,$(CONFIG_UK_BASE)/lib/tnsystick)) $(eval $(call import_lib,$(CONFIG_UK_BASE)/lib/tntimer)) $(eval $(call import_lib,$(CONFIG_UK_BASE)/lib/tntrace)) +$(eval $(call import_lib,$(CONFIG_UK_BASE)/lib/tnsynchronize)) diff --git a/lib/tnsynchronize/Config.uk b/lib/tnsynchronize/Config.uk new file mode 100644 index 00000000..e01e6d23 --- /dev/null +++ b/lib/tnsynchronize/Config.uk @@ -0,0 +1,26 @@ +menuconfig LIBTNSYNCHRONIZE + bool "tnsynchronize: System synchronization mechanism" + default n + +if LIBTNSYNCHRONIZE + choice + prompt "Spinlock algorithm" + default LIBTNSYNCHRONIZE_SPINLOCK + depends on ARCH_ARM_64 + + config LIBTNSYNCHRONIZE_SPINLOCK + bool "Simple binary Spinlocks" + + config LIBTNSYNCHRONIZE_TICKETLOCK + bool "Ticketlocks" + endchoice + + config LIBTNSYNCHRONIZE_SPINLOCK_PROTECT + bool "Prevents deadlock" + default n + + config LIBTNSYNCHRONIZE_TEST + bool "Enable unit tests" + default n + select LIBUKTEST +endif diff --git a/lib/tnsynchronize/Makefile.uk b/lib/tnsynchronize/Makefile.uk new file mode 100644 index 00000000..f6f6a076 --- /dev/null +++ b/lib/tnsynchronize/Makefile.uk @@ -0,0 +1,14 @@ +$(eval $(call addlib_s,libtnsynchronize,$(CONFIG_LIBTNSYNCHRONIZE))) + +ASINCLUDES-$(CONFIG_LIBTNSYNCHRONIZE) += -I$(LIBTNSYNCHRONIZE_BASE)/include +CINCLUDES-$(CONFIG_LIBTNSYNCHRONIZE) += -I$(LIBTNSYNCHRONIZE_BASE)/include +CXXINCLUDES-$(CONFIG_LIBTNSYNCHRONIZE) += -I$(LIBTNSYNCHRONIZE_BASE)/include +LIBTNSYNCHRONIZE_ASINCLUDES-y += -I$(UK_PLAT_COMMON_BASE)/include +LIBTNSYNCHRONIZE_CINCLUDES-y += -I$(UK_PLAT_COMMON_BASE)/include + + +LIBTNSYNCHRONIZE_SRCS-$(CONFIG_LIBTNSYNCHRONIZE) += $(LIBTNSYNCHRONIZE_BASE)/spinlock.c + +ifneq ($(filter y,$(CONFIG_LIBTNSYNCHRONIZE_TEST) $(CONFIG_LIBUKTEST_ALL)),) +LIBTNSYNCHRONIZE_SRCS-y += $(LIBTNSYNCHRONIZE_BASE)/tests/test_spinlock.c +endif diff --git a/lib/tnsynchronize/exportsyms.uk b/lib/tnsynchronize/exportsyms.uk new file mode 100644 index 00000000..3008077f --- /dev/null +++ b/lib/tnsynchronize/exportsyms.uk @@ -0,0 +1,8 @@ +tn_spin_init +tn_spin_lock +tn_spin_unlock +tn_spin_trylock +tn_spin_is_locked +tn_spin_lock_recursion_check +tn_spin_unlock_recursion_check +tn_spin_lock_until diff --git a/include/uk/arch/spinlock.h b/lib/tnsynchronize/include/tn/spinlock.h similarity index 37% rename from include/uk/arch/spinlock.h rename to lib/tnsynchronize/include/tn/spinlock.h index e561b23c..a54b88d6 100644 --- a/include/uk/arch/spinlock.h +++ b/lib/tnsynchronize/include/tn/spinlock.h @@ -1,29 +1,20 @@ -/* SPDX-License-Identifier: BSD-2-Clause */ -/* - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. +/* Copyright 2023 Hangzhou Yingyi Technology Co., Ltd * - * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ -#ifndef __UKARCH_SPINLOCK_H__ -#define __UKARCH_SPINLOCK_H__ +#ifndef __TN_SPINLOCK_H__ +#define __TN_SPINLOCK_H__ #ifdef __cplusplus extern "C" { @@ -31,28 +22,46 @@ extern "C" { #include #include +#include +#ifdef CONFIG_LIBTNSYSTICK +#include +#endif #ifdef CONFIG_HAVE_SMP -#include -/* Unless you know what you are doing, use struct uk_spinlock instead. */ +struct __align(8) __spinlock { +#ifdef CONFIG_LIBTNSYNCHRONIZE_SPINLOCK_PROTECT + struct lcpu *current_lcpu; + uint32_t lock_time; +#endif +#ifdef CONFIG_LIBTNSYNCHRONIZE_TICKETLOCK + __u16 current; /* currently served */ + __u16 next; /* next available ticket */ +#else + + volatile int lock; +#endif /* CONFIG_LIBTNSYNCHRONIZE_TICKETLOCK */ +}; + typedef struct __spinlock __spinlock; +#define TN_SPINLOCK_INITIALIZER() { 0 } + /** - * UKARCH_SPINLOCK_INITIALIZER() macro + * TN_SPINLOCK_INITIALIZER() macro * * Statically initialize a spinlock to unlocked state. */ -#ifndef UKARCH_SPINLOCK_INITIALIZER -#error The spinlock implementation must define UKARCH_SPINLOCK_INITIALIZER -#endif /* UKARCH_SPINLOCK_INITIALIZER */ +#ifndef TN_SPINLOCK_INITIALIZER +#error The spinlock implementation must define TN_SPINLOCK_INITIALIZER +#endif /* TN_SPINLOCK_INITIALIZER */ /** * Initialize a spinlock to unlocked state. * * @param [in,out] lock Pointer to spinlock. */ -void ukarch_spin_init(__spinlock *lock); +void tn_spin_init(struct __spinlock *lock); /** * Acquire spinlock. It is guaranteed that the spinlock will be held @@ -60,14 +69,22 @@ void ukarch_spin_init(__spinlock *lock); * * @param [in,out] lock Pointer to spinlock. */ -void ukarch_spin_lock(__spinlock *lock); +void tn_spin_lock(struct __spinlock *lock); + +/** + * Acquire spinlock. It is guaranteed that the spinlock will be held + * exclusively. + * + * @param [in,out] lock Pointer to spinlock. + */ +void tn_spin_lock(struct __spinlock *lock); /** * Release previously acquired spinlock. * * @param [in,out] lock Pointer to spinlock. */ -void ukarch_spin_unlock(__spinlock *lock); +void tn_spin_unlock(struct __spinlock *lock); /** * Try to acquire spinlock. If the lock is already acquired (busy), this @@ -75,18 +92,24 @@ void ukarch_spin_unlock(__spinlock *lock); * * @param [in,out] lock Pointer to spinlock. * - * @return A non-zero value if spinlock was acquired, 0 otherwise. + * @return 0 if spinlock was acquired, -EBUSY otherwise. */ -int ukarch_spin_trylock(__spinlock *lock); +int tn_spin_trylock(struct __spinlock *lock); /** * Read spinlock state. No lock/unlock operations are performed on the lock. * * @param [in,out] lock Pointer to spinlock. * - * @return A non-zero value if spinlock is acquired, 0 otherwise. + * @return 0 if spinlock is acquired, A non-zero value otherwise. */ -int ukarch_spin_is_locked(__spinlock *lock); +int tn_spin_is_locked(struct __spinlock *lock); + +#ifdef CONFIG_LIBTNSYNCHRONIZE_SPINLOCK_PROTECT +int tn_spin_lock_recursion_check(struct __spinlock *lock); +int tn_spin_unlock_recursion_check(struct __spinlock *lock); +int tn_spin_lock_until(struct __spinlock *lock, systick_t until); +#endif /* CONFIG_LIBTNSYNCHRONIZE_SPINLOCK_PROTECT */ #else /* CONFIG_HAVE_SMP */ /* N.B.: For single-core systems we remove spinlocks by mapping functions to @@ -99,13 +122,13 @@ typedef struct __spinlock { } __spinlock; #define UKARCH_SPINLOCK_INITIALIZER() {} -#define ukarch_spin_init(lock) (void)(lock) -#define ukarch_spin_lock(lock) \ +#define tn_spin_init(lock) (void)(lock) +#define tn_spin_lock(lock) \ do { barrier(); (void)(lock); } while (0) -#define ukarch_spin_unlock(lock) \ +#define tn_spin_unlock(lock) \ do { barrier(); (void)(lock); } while (0) -#define ukarch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; }) -#define ukarch_spin_is_locked(lock) ({ barrier(); (void)(lock); 0; }) +#define tn_spin_trylock(lock) ({ barrier(); (void)(lock); 1; }) +#define tn_spin_is_locked(lock) ({ barrier(); (void)(lock); 0; }) #endif /* CONFIG_HAVE_SMP */ @@ -113,4 +136,4 @@ typedef struct __spinlock { } #endif -#endif /* __UKARCH_SPINLOCK_H__ */ +#endif /* __TN_SPINLOCK_H__ */ diff --git a/lib/tnsynchronize/spinlock.c b/lib/tnsynchronize/spinlock.c new file mode 100644 index 00000000..e61ab069 --- /dev/null +++ b/lib/tnsynchronize/spinlock.c @@ -0,0 +1,191 @@ +/* Copyright 2023 Hangzhou Yingyi Technology Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#ifdef CONFIG_HAVE_SMP +#include +#include + +static inline void wait_for_event(void) +{ + __asm__ __volatile__("wfe" ::: "memory"); +} + +static inline void send_event_local(void) +{ + __asm__ __volatile__("sevl" ::: "memory"); +} + +#ifdef CONFIG_LIBTNSYNCHRONIZE_SPINLOCK_PROTECT +static inline bool tn_spin_lock_recursion_protect(struct __spinlock *lock) +{ + if (lock->current_lcpu != NULL) { + if (lock->current_lcpu == lcpu_get_current()) + return false; + } + return true; +} + +static inline bool tn_spin_unlock_recursion_protect(struct __spinlock *lock) +{ + + if (lock->current_lcpu != lcpu_get_current()) + return false; + + uk_store_n(&lock->current_lcpu, NULL); + return true; +} + +static inline tn_spin_lock_timeout_protect(struct __spinlock *lock) +{ + ; +} +#endif /* CONFIG_LIBTNSYNCHRONIZE_SPINLOCK_PROTECT */ + +void tn_spin_init(struct __spinlock *lock) +{ +#ifdef CONFIG_LIBTNSYNCHRONIZE_SPINLOCK_PROTECT + uk_store_n(&lock->current_lcpu, NULL); + uk_store_n(&lock->lock_time, 0); +#endif +#ifdef CONFIG_LIBTNSYNCHRONIZE_TICKETLOCK + uk_store_n(&lock->next, 0); + uk_store_n(&lock->current, 0); +#else + uk_store_n(&lock->lock, 0); +#endif /* CONFIG_LIBTNSYNCHRONIZE_TICKETLOCK */ +} + +void tn_spin_lock(struct __spinlock *lock) +{ +#ifdef CONFIG_LIBTNSYNCHRONIZE_TICKETLOCK + __u16 ticket = uk_load_n(&lock->current); + + while (!uk_compare_exchange_n(&lock->next, &ticket, ticket)) + wait_for_event(); +#else + int old_value = 0; + int new_value = 1; + + send_event_local(); + while (!uk_compare_exchange_n(&lock->lock, &old_value, new_value)) + wait_for_event(); +#endif /* CONFIG_LIBTNSYNCHRONIZE_TICKETLOCK */ +#ifdef CONFIG_LIBTNSYNCHRONIZE_SPINLOCK_PROTECT + uk_store_n(&lock->current_lcpu, lcpu_get_current()); +#endif /* CONFIG_LIBTNSYNCHRONIZE_SPINLOCK_PROTECT */ +} + +void tn_spin_unlock(struct __spinlock *lock) +{ +#ifdef CONFIG_LIBTNSYNCHRONIZE_TICKETLOCK + uk_fetch_add(&lock->current, 1); +#else + uk_store_n(&lock->lock, 0); +#endif /* CONFIG_LIBTNSYNCHRONIZE_TICKETLOCK */ +#ifdef CONFIG_LIBTNSYNCHRONIZE_SPINLOCK_PROTECT + uk_store_n(&lock->current_lcpu, NULL); +#endif /* CONFIG_LIBTNSYNCHRONIZE_SPINLOCK_PROTECT */ +} + +int tn_spin_trylock(struct __spinlock *lock) +{ +#ifdef CONFIG_LIBTNSYNCHRONIZE_TICKETLOCK + + __u16 ticket = uk_load_n(&lock->current); + + if (!uk_compare_exchange_n(&lock->next, &ticket, ticket + 1)) + return -EBUSY; +#else + int old_value = 0; + int new_value = 1; + + if (!uk_compare_exchange_n(&lock->lock, &old_value, new_value)) + return -EBUSY; +#endif /* CONFIG_LIBTNSYNCHRONIZE_TICKETLOCK */ +#ifdef CONFIG_LIBTNSYNCHRONIZE_SPINLOCK_PROTECT + uk_store_n(&lock->current_lcpu, lcpu_get_current()); +#endif /* CONFIG_LIBTNSYNCHRONIZE_SPINLOCK_PROTECT */ + return 0; +} + +int tn_spin_is_locked(struct __spinlock *lock) +{ +#ifdef CONFIG_LIBTNSYNCHRONIZE_TICKETLOCK + __u16 ticket = uk_load_n(&lock->current); + + return !uk_compare_exchange_n(&lock->next, &ticket, ticket); +#else + return UK_READ_ONCE(lock->lock); +#endif /* CONFIG_LIBTNSYNCHRONIZE_TICKETLOCK */ +} + +#ifdef CONFIG_LIBTNSYNCHRONIZE_SPINLOCK_PROTECT +int tn_spin_lock_recursion_check(struct __spinlock *lock) +{ + if (!tn_spin_lock_recursion_protect(lock)) { + uk_pr_debug("Spin lock recursion!\n"); + return -1; + } + + tn_spin_lock(lock); + return 0; +} + +int tn_spin_unlock_recursion_check(struct __spinlock *lock) +{ + if (!tn_spin_unlock_recursion_protect(lock)) { + uk_pr_debug("Spin lock recursion!\n"); + return -1; + } + + tn_spin_unlock(lock); + return 0; +} + +#ifdef CONFIG_LIBTNSYSTICK +int tn_spin_lock_until(struct __spinlock *lock, systick_t until) +{ + systick_t curr_time = tn_systick_get_tick(); + systick_t time_out = curr_time + until; + + int rc; + + do { + rc = tn_spin_trylock(lock); + } while ((tn_systick_get_tick() < time_out) && (rc == -EBUSY)); + + if (tn_systick_get_tick() > time_out) + return -1; + + return 0; +} +#endif /* CONFIG_LIBTNSYSTICK */ +#endif /* CONFIG_LIBTNSYNCHRONIZE_SPINLOCK_PROTECT */ + +#else /* CONFIG_HAVE_SMP */ +#define tn_spin_init(lock) (void)(lock) +#define tn_spin_lock(lock) \ + do { barrier(); (void)(lock); } while (0) +#define tn_spin_unlock(lock) \ + do { barrier(); (void)(lock); } while (0) +#define tn_spin_trylock(lock) ({ barrier(); (void)(lock); 1; }) +#define tn_spin_is_locked(lock) ({ barrier(); (void)(lock); 0; }) + +#endif diff --git a/lib/tnsynchronize/tests/test_spinlock.c b/lib/tnsynchronize/tests/test_spinlock.c new file mode 100644 index 00000000..dbf9ab8b --- /dev/null +++ b/lib/tnsynchronize/tests/test_spinlock.c @@ -0,0 +1,126 @@ +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_LIBUKSCHEDCOOP +#include +#elif CONFIG_LIBTNSCHEDPRIO +#include +#endif +#include +#ifdef CONFIG_HAVE_SMP +#define TEST_STACK_SIZE 512 +#define HIGHEST_PRIORITY 255 + +uint64_t counter; +struct __spinlock test_multithread_lock; + +void *thread_B_entry(void *args) +{ + int rc; + + tn_spin_lock(&test_multithread_lock); + counter++; + tn_spin_unlock(&test_multithread_lock); +} + +void *thread_A_entry(void *args) +{ + struct uk_thread *test_threadB = NULL; + struct uk_alloc *a = uk_alloc_get_default(); + struct uk_sched *s = uk_sched_get_default(); + + tn_spin_lock(&test_multithread_lock); + test_threadB = uk_thread_prio_create_fn1(a, thread_B_entry, NULL, + a, TEST_STACK_SIZE, a, 0, + a, false, "test_thread_B", + HIGHEST_PRIORITY, NULL, NULL); + + uk_sched_thread_add(s, test_threadB); + + s->reschedule(s); + tn_spin_unlock(&test_multithread_lock); +} + +UK_TESTCASE_DESC(tnsynchronize, test_spinlock_basic_functions, + "Verify spinlock basic functions, excluding tn_spin_lock") +{ + struct __spinlock test_lock; + int rc; + + tn_spin_init(&test_lock); +#ifndef CONFIG_LIBTNSYNCHRONIZE_TICKETLOCK + UK_TEST_EXPECT_ZERO(test_lock.lock); +#else + UK_TEST_EXPECT_ZERO(test_lock.current); + UK_TEST_EXPECT_ZERO(test_lock.next); +#endif /* CONFIG_LIBTNSYNCHRONIZE_TICKETLOCK */ + rc = tn_spin_trylock(&test_lock); + UK_TEST_EXPECT_ZERO(rc); + rc = tn_spin_is_locked(&test_lock); + UK_TEST_EXPECT_NOT_ZERO(rc); +#ifdef CONFIG_LIBTNSYNCHRONIZE_SPINLOCK_PROTECT + UK_TEST_EXPECT_SNUM_EQ(test_lock.current_lcpu, lcpu_get_current()); +#endif /* CONFIG_LIBTNSYNCHRONIZE_SPINLOCK_PROTECT */ + rc = tn_spin_trylock(&test_lock); + UK_TEST_EXPECT_SNUM_EQ(rc, -16); + tn_spin_unlock(&test_lock); + rc = tn_spin_is_locked(&test_lock); + UK_TEST_EXPECT_ZERO(rc); +} + +#ifdef CONFIG_LIBUKSCHED_THREAD_PRIORITY +UK_TESTCASE_DESC(tnsynchronize, Verify_multithreads_race_spinlock, + "Verify tn_spin_lock/unlock via multithreads racing one spinlock") +{ + struct uk_thread *test_threadA = NULL; + struct uk_alloc *a = uk_alloc_get_default(); + struct uk_sched *s = uk_sched_get_default(); + + tn_spin_init(&test_multithread_lock); + + test_threadA = uk_thread_prio_create_fn1(a, thread_A_entry, NULL, + a, TEST_STACK_SIZE, a, 0, + a, false, "test_thread_A", + HIGHEST_PRIORITY - 1, NULL, NULL); + + uk_sched_thread_add(s, test_threadA); + + UK_TEST_EXPECT_SNUM_EQ(counter, 1); +} +#endif /* CONFIG_LIBUKSCHED_THREAD_PRIORITY */ + +#ifdef CONFIG_LIBTNSYNCHRONIZE_SPINLOCK_PROTECT +UK_TESTCASE_DESC(tnsynchronize, Verify_spinlock_recursion_protect, + "Verify basic recursion_check function") +{ + struct __spinlock test_lock; + int rc; + + tn_spin_init(&test_lock); + rc = tn_spin_trylock(&test_lock); + rc = tn_spin_lock_recursion_check(&test_lock); + UK_TEST_EXPECT_SNUM_EQ(rc, -1); + tn_spin_unlock(&test_lock); + rc = tn_spin_unlock_recursion_check(&test_lock); + UK_TEST_EXPECT_SNUM_EQ(rc, -1); +} + +UK_TESTCASE_DESC(tnsynchronize, Verify_spinlock_timeout_protect, + "Verify timeout api that prohibit deadlock") +{ + struct __spinlock test_lock; + int rc; + + tn_spin_init(&test_lock); + tn_spin_lock(&test_lock); + rc = tn_spin_lock_until(&test_lock, 1000); + UK_TEST_EXPECT_SNUM_EQ(rc, -1); + tn_spin_unlock(&test_lock); +} +#endif /* CONFIG_LIBTNSYNCHRONIZE_SPINLOCK_PROTECT */ + +uk_testsuite_register(tnsynchronize, NULL); +#endif /* CONFIG_HAVE_SMP */ diff --git a/lib/uk9p/9pdev.c b/lib/uk9p/9pdev.c index 4fac6d32..5fb6dddb 100644 --- a/lib/uk9p/9pdev.c +++ b/lib/uk9p/9pdev.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include #include #include @@ -51,7 +51,7 @@ static void _fid_mgmt_init(struct uk_9pdev_fid_mgmt *fid_mgmt) { - ukarch_spin_init(&fid_mgmt->spinlock); + tn_spin_init(&fid_mgmt->spinlock); fid_mgmt->next_fid = 0; UK_INIT_LIST_HEAD(&fid_mgmt->fid_free_list); UK_INIT_LIST_HEAD(&fid_mgmt->fid_active_list); @@ -126,7 +126,7 @@ static void _fid_mgmt_cleanup(struct uk_9pdev_fid_mgmt *fid_mgmt) static void _req_mgmt_init(struct uk_9pdev_req_mgmt *req_mgmt) { - ukarch_spin_init(&req_mgmt->spinlock); + tn_spin_init(&req_mgmt->spinlock); uk_bitmap_zero(req_mgmt->tag_bm, UK_9P_NUMTAGS); UK_INIT_LIST_HEAD(&req_mgmt->req_list); UK_INIT_LIST_HEAD(&req_mgmt->req_free_list); diff --git a/lib/uklock/include/uk/arch/arm64/ticketlock.h b/lib/uklock/include/uk/arch/arm64/ticketlock.h index dfc50365..1b96b69c 100644 --- a/lib/uklock/include/uk/arch/arm64/ticketlock.h +++ b/lib/uklock/include/uk/arch/arm64/ticketlock.h @@ -52,69 +52,41 @@ struct __align(8) __ticketlock { static inline void ukarch_ticket_init(struct __ticketlock *lock) { - lock->next = 0; - lock->current = 0; + uk_store_n(&lock->next, 0); + uk_store_n(&lock->current, 0); } static inline void ukarch_ticket_lock(struct __ticketlock *lock) { - unsigned int r, r1, r2; - - __asm__ __volatile__( - " prfm pstl1keep, [%3]\n" /* preload lock */ - "1: ldaxr %w0, [%3]\n" /* read current/next */ - " add %w1, %w0, #0x10000\n"/* increment next */ - " stxr %w2, %w1, [%3]\n" /* try store-exclusive */ - " cbnz %w2, 1b\n" /* retry if excl failed */ - " and %w2, %w0, #0xffff\n" /* get current */ - " cmp %w2, %w0, lsr #16\n" /* is it our ticket? */ - " b.eq 3f\n" /* if yes, lock acquired */ - " sevl\n" /* invalidate next wfe */ - "2: wfe\n" /* wait for unlock event */ - " ldaxrh %w2, [%3]\n" /* load current */ - " cmp %w2, %w0, lsr #16\n" /* is it our ticket? */ - " b.ne 2b\n" /* if not, try again */ - "3:" /* critical section */ - : "=&r" (r), "=&r" (r1), "=&r" (r2) - : "r" (lock) - : "memory"); + __u16 ticket = uk_fetch_add(&lock->next, 1); + + __asm__ __volatile__("sevl" ::: "memory"); + do { + __asm__ __volatile__("wfe" ::: "memory"); + } while (uk_load_n(&lock->current)!= ticket); + } static inline void ukarch_ticket_unlock(struct __ticketlock *lock) { - unsigned int r; - - __asm__ __volatile__( - " ldrh %w0, %1\n" /* read current */ - " add %w0, %w0, #1\n"/* increment current */ - " stlrh %w0, %1\n" /* update lock */ - : "=&r" (r), "=Q" (lock->current) - : - : "memory"); + uk_fetch_add(&lock->current, 1); } static inline int ukarch_ticket_trylock(struct __ticketlock *lock) { - unsigned int r, r1; - - __asm__ __volatile__( - " prfm pstl1keep, [%2]\n" /* preload lock */ - "1: ldaxr %w0, [%2]\n" /* read current/next */ - " eor %w1, %w0, %w0, ror #16\n"/* current == next ? */ - " cbnz %w1, 2f\n" /* bail if locked */ - " add %w1, %w0, #0x10000\n" /* increment next */ - " stxr %w0, %w1, [%2]\n" /* try update next */ - " cbnz %w0, 1b\n" /* retry if failed */ - "2:" - : "=&r" (r), "=&r" (r1) - : "r" (lock) - : "memory"); - return !r; + __u16 ticket = uk_load_n(&lock->current); + + if (!uk_compare_exchange_n(&lock->next, &ticket, ticket)) + return -1; + + return 0; } static inline int ukarch_ticket_is_locked(struct __ticketlock *lock) { - return !(lock->next == lock->current); + __u16 ticket = uk_load_n(&lock->current); + + return !uk_compare_exchange_n(&lock->next, &ticket, ticket); } #else /* CONFIG_HAVE_SMP */ diff --git a/lib/uklock/include/uk/mutex.h b/lib/uklock/include/uk/mutex.h index 2b62caf2..01b1c12b 100644 --- a/lib/uklock/include/uk/mutex.h +++ b/lib/uklock/include/uk/mutex.h @@ -45,7 +45,7 @@ #include #ifdef CONFIG_LIBUKLOCK_MUTEX_METRICS -#include +#include #include #endif /* CONFIG_LIBUKLOCK_MUTEX_METRICS */ @@ -143,11 +143,11 @@ static inline void uk_mutex_lock(struct uk_mutex *m) } #ifdef CONFIG_LIBUKLOCK_MUTEX_METRICS - ukarch_spin_lock(&_uk_mutex_metrics_lock); + tn_spin_lock(&_uk_mutex_metrics_lock); _uk_mutex_metrics.active_locked += (m->lock_count == 1); _uk_mutex_metrics.active_unlocked -= (m->lock_count == 1); _uk_mutex_metrics.total_locks++; - ukarch_spin_unlock(&_uk_mutex_metrics_lock); + tn_spin_unlock(&_uk_mutex_metrics_lock); #endif /* CONFIG_LIBUKLOCK_MUTEX_METRICS */ TN_TRACE_OBJ_FUNC_EXIT(uk_mutex, lock, m); @@ -169,9 +169,9 @@ static inline int uk_mutex_trylock(struct uk_mutex *m) m->lock_count++; #ifdef CONFIG_LIBUKLOCK_MUTEX_METRICS - ukarch_spin_lock(&_uk_mutex_metrics_lock); + tn_spin_lock(&_uk_mutex_metrics_lock); _uk_mutex_metrics.total_ok_trylocks++; - ukarch_spin_unlock(&_uk_mutex_metrics_lock); + tn_spin_unlock(&_uk_mutex_metrics_lock); #endif /* CONFIG_LIBUKLOCK_MUTEX_METRICS */ return 1; @@ -185,11 +185,11 @@ static inline int uk_mutex_trylock(struct uk_mutex *m) m->lock_count = 1; #ifdef CONFIG_LIBUKLOCK_MUTEX_METRICS - ukarch_spin_lock(&_uk_mutex_metrics_lock); + tn_spin_lock(&_uk_mutex_metrics_lock); _uk_mutex_metrics.active_locked++; _uk_mutex_metrics.active_unlocked--; _uk_mutex_metrics.total_ok_trylocks++; - ukarch_spin_unlock(&_uk_mutex_metrics_lock); + tn_spin_unlock(&_uk_mutex_metrics_lock); #endif /* CONFIG_LIBUKLOCK_MUTEX_METRICS */ TN_TRACE_OBJ_FUNC_EXIT(uk_mutex, trylock, m); @@ -198,9 +198,9 @@ static inline int uk_mutex_trylock(struct uk_mutex *m) } #ifdef CONFIG_LIBUKLOCK_MUTEX_METRICS - ukarch_spin_lock(&_uk_mutex_metrics_lock); + tn_spin_lock(&_uk_mutex_metrics_lock); _uk_mutex_metrics.total_failed_trylocks++; - ukarch_spin_unlock(&_uk_mutex_metrics_lock); + tn_spin_unlock(&_uk_mutex_metrics_lock); #endif /* CONFIG_LIBUKLOCK_MUTEX_METRICS */ TN_TRACE_OBJ_FUNC_EXIT(uk_mutex, trylock, m); @@ -231,11 +231,11 @@ static inline void uk_mutex_unlock(struct uk_mutex *m) } #ifdef CONFIG_LIBUKLOCK_MUTEX_METRICS - ukarch_spin_lock(&_uk_mutex_metrics_lock); + tn_spin_lock(&_uk_mutex_metrics_lock); _uk_mutex_metrics.active_locked -= (m->lock_count == 0); _uk_mutex_metrics.active_unlocked += (m->lock_count == 0); _uk_mutex_metrics.total_unlocks++; - ukarch_spin_unlock(&_uk_mutex_metrics_lock); + tn_spin_unlock(&_uk_mutex_metrics_lock); #endif /* CONFIG_LIBUKLOCK_MUTEX_METRICS */ TN_TRACE_OBJ_FUNC_EXIT(uk_mutex, unlock, m); diff --git a/lib/uklock/include/uk/spinlock.h b/lib/uklock/include/uk/spinlock.h index df270e83..9a3c7d5b 100644 --- a/lib/uklock/include/uk/spinlock.h +++ b/lib/uklock/include/uk/spinlock.h @@ -35,6 +35,7 @@ #include #include +#include #ifdef __cplusplus extern "C" { @@ -42,40 +43,16 @@ extern "C" { /* See uk/arch/spinlock.h for the interface documentation */ -#ifndef CONFIG_LIBUKLOCK_TICKETLOCK - #ifndef uk_spinlock -#include - #define uk_spinlock __spinlock - -#define UK_SPINLOCK_INITIALIZER() UKARCH_SPINLOCK_INITIALIZER() -#define uk_spin_init(lock) ukarch_spin_init(lock) -#define uk_spin_lock(lock) ukarch_spin_lock(lock) -#define uk_spin_unlock(lock) ukarch_spin_unlock(lock) -#define uk_spin_trylock(lock) ukarch_spin_trylock(lock) -#define uk_spin_is_locked(lock) ukarch_spin_is_locked(lock) -#endif /* uk_spinlock */ - -#else /* CONFIG_LIBUKLOCK_TICKETLOCK */ - -#ifndef uk_spinlock - -#ifdef CONFIG_ARCH_ARM_64 -#include -#endif - -#define uk_spinlock __ticketlock - -#define UK_SPINLOCK_INITIALIZER() UKARCH_TICKETLOCK_INITIALIZER() -#define uk_spin_init(lock) ukarch_ticket_init(lock) -#define uk_spin_lock(lock) ukarch_ticket_lock(lock) -#define uk_spin_unlock(lock) ukarch_ticket_unlock(lock) -#define uk_spin_trylock(lock) ukarch_ticket_trylock(lock) -#define uk_spin_is_locked(lock) ukarch_ticket_is_locked(lock) #endif /* uk_spinlock */ -#endif /* CONFIG_LIBUKLOCK_TICKETLOCK */ +#define UK_SPINLOCK_INITIALIZER() TN_SPINLOCK_INITIALIZER() +#define uk_spin_init(lock) tn_spin_init(lock) +#define uk_spin_lock(lock) tn_spin_lock(lock) +#define uk_spin_unlock(lock) tn_spin_unlock(lock) +#define uk_spin_trylock(lock) tn_spin_trylock(lock) +#define uk_spin_is_locked(lock) tn_spin_is_locked(lock) #define uk_spin_lock_irq(lock) \ do { \ diff --git a/lib/uklock/mutex.c b/lib/uklock/mutex.c index 94b21322..297775e9 100644 --- a/lib/uklock/mutex.c +++ b/lib/uklock/mutex.c @@ -16,9 +16,9 @@ void uk_mutex_init_config(struct uk_mutex *m, unsigned int flags) uk_waitq_init(&m->wait); #ifdef CONFIG_LIBUKLOCK_MUTEX_METRICS - ukarch_spin_lock(&_uk_mutex_metrics_lock); + tn_spin_lock(&_uk_mutex_metrics_lock); _uk_mutex_metrics.active_unlocked++; - ukarch_spin_unlock(&_uk_mutex_metrics_lock); + tn_spin_unlock(&_uk_mutex_metrics_lock); #endif /* CONFIG_LIBUKLOCK_MUTEX_METRICS */ } @@ -31,7 +31,7 @@ void uk_mutex_init_config(struct uk_mutex *m, unsigned int flags) */ static int mutex_metrics_ctor(void) { - ukarch_spin_init(&_uk_mutex_metrics_lock); + tn_spin_init(&_uk_mutex_metrics_lock); return 0; } @@ -45,8 +45,8 @@ void uk_mutex_get_metrics(struct uk_mutex_metrics *dst) { UK_ASSERT(dst); - ukarch_spin_lock(&_uk_mutex_metrics_lock); + tn_spin_lock(&_uk_mutex_metrics_lock); memcpy(dst, &_uk_mutex_metrics, sizeof(*dst)); - ukarch_spin_unlock(&_uk_mutex_metrics_lock); + tn_spin_unlock(&_uk_mutex_metrics_lock); } #endif /* CONFIG_LIBUKLOCK_MUTEX_METRICS */ diff --git a/lib/uknetdev/include/uk/netdev.h b/lib/uknetdev/include/uk/netdev.h index 865c9fb7..381a5bcd 100644 --- a/lib/uknetdev/include/uk/netdev.h +++ b/lib/uknetdev/include/uk/netdev.h @@ -490,23 +490,23 @@ static inline int uk_netdev_rx_one(struct uk_netdev *dev, uint16_t queue_id, if (ret >= 0 && (ret & UK_NETDEV_STATUS_SUCCESS)) { struct uk_netbuf *nb; - ukarch_spin_lock(&dev->_stats_lock); + tn_spin_lock(&dev->_stats_lock); UK_NETBUF_CHAIN_FOREACH(nb, *pkt) dev->_stats.rx_m.bytes += (*pkt)->len; dev->_stats.rx_m.packets++; - ukarch_spin_unlock(&dev->_stats_lock); + tn_spin_unlock(&dev->_stats_lock); return ret; } if (ret >= 0 && (ret & UK_NETDEV_STATUS_UNDERRUN)) { - ukarch_spin_lock(&dev->_stats_lock); + tn_spin_lock(&dev->_stats_lock); dev->_stats.rx_m.fifo++; - ukarch_spin_unlock(&dev->_stats_lock); + tn_spin_unlock(&dev->_stats_lock); return ret; } if (ret < 0) { - ukarch_spin_lock(&dev->_stats_lock); + tn_spin_lock(&dev->_stats_lock); dev->_stats.rx_m.errors++; - ukarch_spin_unlock(&dev->_stats_lock); + tn_spin_unlock(&dev->_stats_lock); return ret; } #endif /* CONFIG_LIBUKNETDEV_STATS */ @@ -558,23 +558,23 @@ static inline int uk_netdev_tx_one(struct uk_netdev *dev, uint16_t queue_id, if (ret >= 0 && (ret & UK_NETDEV_STATUS_SUCCESS)) { struct uk_netbuf *nb; - ukarch_spin_lock(&dev->_stats_lock); + tn_spin_lock(&dev->_stats_lock); UK_NETBUF_CHAIN_FOREACH(nb, pkt) dev->_stats.tx_m.bytes += nb->len; dev->_stats.tx_m.packets++; - ukarch_spin_unlock(&dev->_stats_lock); + tn_spin_unlock(&dev->_stats_lock); return ret; } if (ret >= 0 && (ret & UK_NETDEV_STATUS_UNDERRUN)) { - ukarch_spin_lock(&dev->_stats_lock); + tn_spin_lock(&dev->_stats_lock); dev->_stats.tx_m.fifo++; - ukarch_spin_unlock(&dev->_stats_lock); + tn_spin_unlock(&dev->_stats_lock); return ret; } if (ret < 0) { - ukarch_spin_lock(&dev->_stats_lock); + tn_spin_lock(&dev->_stats_lock); dev->_stats.tx_m.errors++; - ukarch_spin_unlock(&dev->_stats_lock); + tn_spin_unlock(&dev->_stats_lock); return ret; } #endif /* CONFIG_LIBUKNETDEV_STATS */ diff --git a/lib/uknetdev/include/uk/netdev_core.h b/lib/uknetdev/include/uk/netdev_core.h index 67ea399c..23ac55c8 100644 --- a/lib/uknetdev/include/uk/netdev_core.h +++ b/lib/uknetdev/include/uk/netdev_core.h @@ -48,7 +48,7 @@ #include #endif #ifdef CONFIG_LIBUKNETDEV_STATS -#include +#include #endif /* CONFIG_LIBUKNETDEV_STATS */ diff --git a/lib/uknetdev/stats.c b/lib/uknetdev/stats.c index 45f126cc..825e7166 100644 --- a/lib/uknetdev/stats.c +++ b/lib/uknetdev/stats.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include static int get_tx_bytes(void *cookie, __u64 *out) @@ -146,7 +146,7 @@ int uk_netdev_stats_init(struct uk_netdev *dev) char *obj_name; memset(&dev->_stats, 0, sizeof(dev->_stats)); - ukarch_spin_init(&dev->_stats_lock); + tn_spin_init(&dev->_stats_lock); /* Create stats object */ res = asprintf(&obj_name, "netdev%d", dev_id); diff --git a/lib/uksched/include/uk/wait.h b/lib/uksched/include/uk/wait.h index 0b370b81..b4dc1712 100644 --- a/lib/uksched/include/uk/wait.h +++ b/lib/uksched/include/uk/wait.h @@ -40,7 +40,7 @@ extern "C" { static inline void uk_waitq_init(struct uk_waitq *wq) { - ukarch_spin_init(&(wq->sl)); + tn_spin_init(&(wq->sl)); __WAIT_QUEUE_INIT(wq); } diff --git a/lib/uksched/include/uk/wait_types.h b/lib/uksched/include/uk/wait_types.h index c7676359..5853db03 100644 --- a/lib/uksched/include/uk/wait_types.h +++ b/lib/uksched/include/uk/wait_types.h @@ -27,7 +27,7 @@ #define __UK_SCHED_WAIT_TYPES_H__ #include -#include +#include #ifdef __cplusplus extern "C" { diff --git a/lib/uksched/sched.c b/lib/uksched/sched.c index a5ae6fc7..3d2afb39 100644 --- a/lib/uksched/sched.c +++ b/lib/uksched/sched.c @@ -391,6 +391,7 @@ int uk_sched_thread_add(struct uk_sched *s, struct uk_thread *t) UK_TAILQ_INSERT_TAIL(&s->thread_list, t, thread_list); ukplat_lcpu_restore_irqf(flags); + printf("start reschedule\n"); if (uk_sched_current() == s) s->reschedule(s); diff --git a/lib/vfscore/include/vfscore/eventpoll.h b/lib/vfscore/include/vfscore/eventpoll.h index 499d04c8..f68b5379 100644 --- a/lib/vfscore/include/vfscore/eventpoll.h +++ b/lib/vfscore/include/vfscore/eventpoll.h @@ -39,7 +39,7 @@ extern "C" { #include #include -#include +#include #include #include #include diff --git a/plat/xen/drivers/9p/9pfront.c b/plat/xen/drivers/9p/9pfront.c index 8f76ffb2..0a43f42e 100644 --- a/plat/xen/drivers/9p/9pfront.c +++ b/plat/xen/drivers/9p/9pfront.c @@ -45,7 +45,7 @@ #include #include #include -#include +#include #include #include #include @@ -221,7 +221,7 @@ static int p9front_allocate_dev_ring(struct p9front_dev *p9fdev, int idx) ring = &p9fdev->rings[idx]; UK_ASSERT(!ring->initialized); - ukarch_spin_init(&ring->spinlock); + tn_spin_init(&ring->spinlock); ring->dev = p9fdev; /* Allocate ring intf page. */ @@ -337,7 +337,7 @@ static int p9front_connect(struct uk_9pdev *p9dev, int rc = 0; int found = 0; - ukarch_spin_lock(&p9front_device_list_lock); + tn_spin_lock(&p9front_device_list_lock); uk_list_for_each_entry(p9fdev, &p9front_device_list, _list) { if (!strcmp(p9fdev->tag, device_identifier)) { if (p9fdev->p9dev != NULL) { @@ -363,7 +363,7 @@ static int p9front_connect(struct uk_9pdev *p9dev, found = 1; out: - ukarch_spin_unlock(&p9front_device_list_lock); + tn_spin_unlock(&p9front_device_list_lock); return rc; } @@ -374,9 +374,9 @@ static int p9front_disconnect(struct uk_9pdev *p9dev __unused) UK_ASSERT(p9dev); p9fdev = p9dev->priv; - ukarch_spin_lock(&p9front_device_list_lock); + tn_spin_lock(&p9front_device_list_lock); p9fdev->p9dev = NULL; - ukarch_spin_unlock(&p9front_device_list_lock); + tn_spin_unlock(&p9front_device_list_lock); return 0; } @@ -401,7 +401,7 @@ static int p9front_request(struct uk_9pdev *p9dev, ring = &p9fdev->rings[ring_idx]; /* Protect against concurrent writes to the out ring. */ - ukarch_spin_lock(&ring->spinlock); + tn_spin_lock(&ring->spinlock); cons = ring->intf->out_cons; prod = ring->intf->out_prod; xen_mb(); @@ -411,7 +411,7 @@ static int p9front_request(struct uk_9pdev *p9dev, if (ring_size - xen_9pfs_queued(prod, cons, ring_size) < req->xmit.size + req->xmit.zc_size) { - ukarch_spin_unlock(&ring->spinlock); + tn_spin_unlock(&ring->spinlock); return -ENOSPC; } @@ -424,7 +424,7 @@ static int p9front_request(struct uk_9pdev *p9dev, prod += req->xmit.size + req->xmit.zc_size; ring->intf->out_prod = prod; - ukarch_spin_unlock(&ring->spinlock); + tn_spin_unlock(&ring->spinlock); notify_remote_via_evtchn(ring->evtchn); return 0; @@ -492,9 +492,9 @@ static int p9front_add_dev(struct xenbus_device *xendev) } rc = 0; - ukarch_spin_lock(&p9front_device_list_lock); + tn_spin_lock(&p9front_device_list_lock); uk_list_add(&p9fdev->_list, &p9front_device_list); - ukarch_spin_unlock(&p9front_device_list_lock); + tn_spin_unlock(&p9front_device_list_lock); uk_pr_info(DRIVER_NAME": Connected 9pfront dev: tag=%s,rings=%d,order=%d\n", p9fdev->tag, p9fdev->nb_rings, p9fdev->ring_order); diff --git a/plat/xen/include/xenbus/xenbus.h b/plat/xen/include/xenbus/xenbus.h index f018c039..378c1be6 100644 --- a/plat/xen/include/xenbus/xenbus.h +++ b/plat/xen/include/xenbus/xenbus.h @@ -33,7 +33,7 @@ #ifndef __XENBUS_H__ #define __XENBUS_H__ -#include +#include #include #include #include diff --git a/plat/xen/xenbus/client.c b/plat/xen/xenbus/client.c index 1e28dd1b..4d166887 100644 --- a/plat/xen/xenbus/client.c +++ b/plat/xen/xenbus/client.c @@ -103,19 +103,19 @@ int xenbus_watch_wait_event(struct xenbus_watch *watch) return -EINVAL; while (1) { - ukarch_spin_lock(&watch->lock); + tn_spin_lock(&watch->lock); if (watch->pending_events > 0) break; - ukarch_spin_unlock(&watch->lock); + tn_spin_unlock(&watch->lock); uk_waitq_wait_event(&watch->wq, (watch->pending_events > 0)); } watch->pending_events--; - ukarch_spin_unlock(&watch->lock); + tn_spin_unlock(&watch->lock); return 0; } @@ -125,10 +125,10 @@ int xenbus_watch_notify_event(struct xenbus_watch *watch) if (watch == NULL) return -EINVAL; - ukarch_spin_lock(&watch->lock); + tn_spin_lock(&watch->lock); watch->pending_events++; uk_waitq_wake_up(&watch->wq); - ukarch_spin_unlock(&watch->lock); + tn_spin_unlock(&watch->lock); return 0; } diff --git a/plat/xen/xenbus/xs_comms.c b/plat/xen/xenbus/xs_comms.c index 6b1ab9ec..9eec4b80 100644 --- a/plat/xen/xenbus/xs_comms.c +++ b/plat/xen/xenbus/xs_comms.c @@ -43,7 +43,7 @@ #include #include #include -#include +#include #include #include #include @@ -129,7 +129,7 @@ static void xs_request_pool_init(struct xs_request_pool *pool) pool->num_live = 0; pool->last_probed = -1; - ukarch_spin_init(&pool->lock); + tn_spin_init(&pool->lock); uk_waitq_init(&pool->waitq); UK_TAILQ_INIT(&pool->queued); uk_bitmap_zero(pool->entries_bm, XS_REQ_POOL_SIZE); @@ -150,12 +150,12 @@ static struct xs_request *xs_request_get(void) /* wait for an available entry */ while (1) { - ukarch_spin_lock(&xs_req_pool.lock); + tn_spin_lock(&xs_req_pool.lock); if (xs_req_pool.num_live < XS_REQ_POOL_SIZE) break; - ukarch_spin_unlock(&xs_req_pool.lock); + tn_spin_unlock(&xs_req_pool.lock); uk_waitq_wait_event(&xs_req_pool.waitq, (xs_req_pool.num_live < XS_REQ_POOL_SIZE)); @@ -174,7 +174,7 @@ static struct xs_request *xs_request_get(void) xs_req_pool.last_probed = entry_idx; xs_req_pool.num_live++; - ukarch_spin_unlock(&xs_req_pool.lock); + tn_spin_unlock(&xs_req_pool.lock); return &xs_req_pool.entries[entry_idx]; } @@ -184,7 +184,7 @@ static void xs_request_put(struct xs_request *xs_req) { __u32 reqid = xs_req->hdr.req_id; - ukarch_spin_lock(&xs_req_pool.lock); + tn_spin_lock(&xs_req_pool.lock); UK_ASSERT(uk_test_bit(reqid, xs_req_pool.entries_bm) == 1); @@ -194,36 +194,36 @@ static void xs_request_put(struct xs_request *xs_req) if (xs_req_pool.num_live == XS_REQ_POOL_SIZE - 1) uk_waitq_wake_up(&xs_req_pool.waitq); - ukarch_spin_unlock(&xs_req_pool.lock); + tn_spin_unlock(&xs_req_pool.lock); } static struct xs_request *xs_request_peek(void) { struct xs_request *xs_req; - ukarch_spin_lock(&xs_req_pool.lock); + tn_spin_lock(&xs_req_pool.lock); xs_req = UK_TAILQ_FIRST(&xs_req_pool.queued); - ukarch_spin_unlock(&xs_req_pool.lock); + tn_spin_unlock(&xs_req_pool.lock); return xs_req; } static void xs_request_enqueue(struct xs_request *xs_req) { - ukarch_spin_lock(&xs_req_pool.lock); + tn_spin_lock(&xs_req_pool.lock); UK_TAILQ_INSERT_TAIL(&xs_req_pool.queued, xs_req, next); - ukarch_spin_unlock(&xs_req_pool.lock); + tn_spin_unlock(&xs_req_pool.lock); } static struct xs_request *xs_request_dequeue(void) { struct xs_request *xs_req; - ukarch_spin_lock(&xs_req_pool.lock); + tn_spin_lock(&xs_req_pool.lock); xs_req = UK_TAILQ_FIRST(&xs_req_pool.queued); if (xs_req) UK_TAILQ_REMOVE(&xs_req_pool.queued, xs_req, next); - ukarch_spin_unlock(&xs_req_pool.lock); + tn_spin_unlock(&xs_req_pool.lock); return xs_req; } diff --git a/plat/xen/xenbus/xs_watch.c b/plat/xen/xenbus/xs_watch.c index 28ca8db5..661d101d 100644 --- a/plat/xen/xenbus/xs_watch.c +++ b/plat/xen/xenbus/xs_watch.c @@ -62,7 +62,7 @@ struct xs_watch *xs_watch_create(const char *path) if (!xsw) return ERR2PTR(-ENOMEM); - ukarch_spin_init(&xsw->base.lock); + tn_spin_init(&xsw->base.lock); xsw->base.pending_events = 0; uk_waitq_init(&xsw->base.wq); -- Gitee From 55678989156731b1097c249f2d9b44c45e8bc748 Mon Sep 17 00:00:00 2001 From: cheng2000160 Date: Fri, 7 Jun 2024 18:29:48 +0800 Subject: [PATCH 2/2] lib: Using new spinlock api --- lib/tnsynchronize/spinlock.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/tnsynchronize/spinlock.c b/lib/tnsynchronize/spinlock.c index e61ab069..a61c8d1e 100644 --- a/lib/tnsynchronize/spinlock.c +++ b/lib/tnsynchronize/spinlock.c @@ -20,7 +20,11 @@ #include #ifdef CONFIG_HAVE_SMP #include -#include +#endif /* CONFIG_HAVE_SMP */ +#ifdef CONFIG_LIBTNSYSTICK +#include +#endif +#ifdef CONFIG_HAVE_SMP static inline void wait_for_event(void) { -- Gitee