1 Star 0 Fork 163

junhe_arm/glibc

forked from src-openEuler/glibc 
加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
0006-Sw64-Atomic-and-Locking-Implementation.patch 40.64 KB
一键复制 编辑 原始数据 按行查看 历史
swcompiler 提交于 2024-12-17 04:02 +08:00 . Sw64: Add Sw64 ISA support
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123
From d05ecf05cf6abb524216e0a8416fdd78c3d7e6bf Mon Sep 17 00:00:00 2001
From: swcompiler <lc@wxiat.com>
Date: Fri, 29 Nov 2024 13:59:09 +0800
Subject: [PATCH 06/23] Sw64: Atomic and Locking Implementation
---
sysdeps/sw_64/atomic-machine.h | 393 ++++++++++++++++++
sysdeps/sw_64/nptl/bits/struct_rwlock.h | 43 ++
sysdeps/sw_64/nptl/pthread_spin_lock.S | 55 +++
sysdeps/sw_64/nptl/pthread_spin_trylock.S | 56 +++
sysdeps/sw_64/sw8a/atomic-machine.h | 371 +++++++++++++++++
sysdeps/sw_64/sw8a/nptl/bits/struct_rwlock.h | 43 ++
sysdeps/sw_64/sw8a/nptl/pthread_spin_lock.S | 43 ++
.../sw_64/sw8a/nptl/pthread_spin_trylock.S | 44 ++
8 files changed, 1048 insertions(+)
create mode 100644 sysdeps/sw_64/atomic-machine.h
create mode 100644 sysdeps/sw_64/nptl/bits/struct_rwlock.h
create mode 100644 sysdeps/sw_64/nptl/pthread_spin_lock.S
create mode 100644 sysdeps/sw_64/nptl/pthread_spin_trylock.S
create mode 100644 sysdeps/sw_64/sw8a/atomic-machine.h
create mode 100644 sysdeps/sw_64/sw8a/nptl/bits/struct_rwlock.h
create mode 100644 sysdeps/sw_64/sw8a/nptl/pthread_spin_lock.S
create mode 100644 sysdeps/sw_64/sw8a/nptl/pthread_spin_trylock.S
diff --git a/sysdeps/sw_64/atomic-machine.h b/sysdeps/sw_64/atomic-machine.h
new file mode 100644
index 00000000..7f379fbe
--- /dev/null
+++ b/sysdeps/sw_64/atomic-machine.h
@@ -0,0 +1,393 @@
+/* Copyright (C) 2003-2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <stdint.h>
+
+typedef int8_t atomic8_t;
+typedef uint8_t uatomic8_t;
+typedef int_fast8_t atomic_fast8_t;
+typedef uint_fast8_t uatomic_fast8_t;
+
+typedef int16_t atomic16_t;
+typedef uint16_t uatomic16_t;
+typedef int_fast16_t atomic_fast16_t;
+typedef uint_fast16_t uatomic_fast16_t;
+
+typedef int32_t atomic32_t;
+typedef uint32_t uatomic32_t;
+typedef int_fast32_t atomic_fast32_t;
+typedef uint_fast32_t uatomic_fast32_t;
+
+typedef int64_t atomic64_t;
+typedef uint64_t uatomic64_t;
+typedef int_fast64_t atomic_fast64_t;
+typedef uint_fast64_t uatomic_fast64_t;
+
+typedef intptr_t atomicptr_t;
+typedef uintptr_t uatomicptr_t;
+typedef intmax_t atomic_max_t;
+typedef uintmax_t uatomic_max_t;
+
+#define __HAVE_64B_ATOMICS 1
+#define USE_ATOMIC_COMPILER_BUILTINS 0
+
+/* XXX Is this actually correct? */
+#define ATOMIC_EXCHANGE_USES_CAS 1
+
+#define __MB " memb\n"
+
+/* Compare and exchange. For all of the "xxx" routines, we expect a
+ "__prev" and a "__cmp" variable to be provided by the enclosing scope,
+ in which values are returned. */
+// delete memb after the rd_f
+#define __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2) \
+ ({ \
+ unsigned long __tmp, __snew, __addr64; \
+ __asm__ __volatile__( \
+ mb1 " bic %[__addr8],7,%[__addr64]\n" \
+ " ins0b %[__new],%[__addr8],%[__snew]\n" \
+ "1: lldl %[__tmp],0(%[__addr64])\n" \
+ " ext0b %[__tmp],%[__addr8],%[__prev]\n" \
+ " cmpeq %[__prev],%[__old],%[__cmp]\n" \
+ " wr_f %[__cmp]\n" \
+ " mask0b %[__tmp],%[__addr8],%[__tmp]\n" \
+ " or %[__snew],%[__tmp],%[__tmp]\n" \
+ " lstl %[__tmp],0(%[__addr64])\n" \
+ " rd_f %[__tmp]\n" \
+ " beq %[__cmp],2f\n" \
+ " beq %[__tmp],1b\n" \
+ "2:" \
+ : [__prev] "=&r"(__prev), [__snew] "=&r"(__snew), \
+ [__tmp] "=&r"(__tmp), [__cmp] "=&r"(__cmp), \
+ [__addr64] "=&r"(__addr64) \
+ : [__addr8] "r"(mem), \
+ [__old] "Ir"((uint64_t) (uint8_t) (uint64_t) (old)), \
+ [__new] "r"(new) \
+ : "memory"); \
+ })
+
+#define __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2) \
+ ({ \
+ unsigned long __tmp, __snew, __addr64; \
+ __asm__ __volatile__( \
+ mb1 " bic %[__addr16],7,%[__addr64]\n" \
+ " ins1b %[__new],%[__addr16],%[__snew]\n" \
+ "1: lldl %[__tmp],0(%[__addr64])\n" \
+ " ext1b %[__tmp],%[__addr16],%[__prev]\n" \
+ " cmpeq %[__prev],%[__old],%[__cmp]\n" \
+ " wr_f %[__cmp]\n" \
+ " mask1b %[__tmp],%[__addr16],%[__tmp]\n" \
+ " or %[__snew],%[__tmp],%[__tmp]\n" \
+ " lstl %[__tmp],0(%[__addr64])\n" \
+ " rd_f %[__tmp]\n" \
+ " beq %[__cmp],2f\n" \
+ " beq %[__tmp],1b\n" \
+ "2:" \
+ : [__prev] "=&r"(__prev), [__snew] "=&r"(__snew), \
+ [__tmp] "=&r"(__tmp), [__cmp] "=&r"(__cmp), \
+ [__addr64] "=&r"(__addr64) \
+ : [__addr16] "r"(mem), \
+ [__old] "Ir"((uint64_t) (uint16_t) (uint64_t) (old)), \
+ [__new] "r"(new) \
+ : "memory"); \
+ })
+#define __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2) \
+ ({ \
+ unsigned long __addr, __tmp; \
+ __asm__ __volatile__( \
+ mb1 " ldi %[__addr],%[__mem]\n" \
+ "1: lldw %[__prev],0(%[__addr])\n" \
+ " cmpeq %[__prev],%[__old],%[__tmp]\n" \
+ " wr_f %[__tmp]\n" \
+ " mov %[__new],%[__cmp]\n" \
+ " lstw %[__cmp],0(%[__addr])\n" \
+ " rd_f %[__cmp]\n" \
+ " beq %[__tmp],2f\n" \
+ " beq %[__cmp],1b\n" \
+ "2:" \
+ : [__prev] "=&r"(__prev), [__cmp] "=&r"(__cmp), [__tmp] "=&r"(__tmp), \
+ [__addr] "=&r"(__addr) \
+ : [__mem] "m"(*(mem)), \
+ [__old] "Ir"((uint64_t) (atomic32_t) (uint64_t) (old)), \
+ [__new] "Ir"(new) \
+ : "memory"); \
+ })
+
+#define __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2) \
+ ({ \
+ unsigned long __addr, __tmp; \
+ __asm__ __volatile__(mb1 " ldi %[__addr],%[__mem]\n" \
+ "1: lldl %[__prev],0(%[__addr])\n" \
+ " cmpeq %[__prev],%[__old],%[__tmp]\n" \
+ " wr_f %[__tmp]\n" \
+ " mov %[__new],%[__cmp]\n" \
+ " lstl %[__cmp],0(%[__addr])\n" \
+ " rd_f %[__cmp]\n" \
+ " beq %[__tmp],2f\n" \
+ " beq %[__cmp],1b\n" \
+ "2:" \
+ : [__prev] "=&r"(__prev), [__cmp] "=&r"(__cmp), \
+ [__tmp] "=&r"(__tmp), [__addr] "=&r"(__addr) \
+ : [__mem] "m"(*(mem)), \
+ [__old] "Ir"((uint64_t) (old)), [__new] "Ir"(new) \
+ : "memory"); \
+ })
+/* For all "bool" routines, we return FALSE if exchange succesful. */
+
+#define __arch_compare_and_exchange_bool_8_int(mem, new, old, mb1, mb2) \
+ ({ \
+ unsigned long __prev; \
+ int __cmp; \
+ __arch_compare_and_exchange_xxx_8_int (mem, new, old, mb1, mb2); \
+ !__cmp; \
+ })
+
+#define __arch_compare_and_exchange_bool_16_int(mem, new, old, mb1, mb2) \
+ ({ \
+ unsigned long __prev; \
+ int __cmp; \
+ __arch_compare_and_exchange_xxx_16_int (mem, new, old, mb1, mb2); \
+ !__cmp; \
+ })
+
+#define __arch_compare_and_exchange_bool_32_int(mem, new, old, mb1, mb2) \
+ ({ \
+ unsigned long __prev; \
+ int __cmp; \
+ __arch_compare_and_exchange_xxx_32_int (mem, new, old, mb1, mb2); \
+ !__cmp; \
+ })
+
+#define __arch_compare_and_exchange_bool_64_int(mem, new, old, mb1, mb2) \
+ ({ \
+ unsigned long __prev; \
+ int __cmp; \
+ __arch_compare_and_exchange_xxx_64_int (mem, new, old, mb1, mb2); \
+ !__cmp; \
+ })
+
+/* For all "val" routines, return the old value whether exchange
+ successful or not. */
+
+#define __arch_compare_and_exchange_val_8_int(mem, new, old, mb1, mb2) \
+ ({ \
+ unsigned long __prev; \
+ int __cmp; \
+ __arch_compare_and_exchange_xxx_8_int (mem, new, old, mb1, mb2); \
+ (typeof (*mem)) __prev; \
+ })
+
+#define __arch_compare_and_exchange_val_16_int(mem, new, old, mb1, mb2) \
+ ({ \
+ unsigned long __prev; \
+ int __cmp; \
+ __arch_compare_and_exchange_xxx_16_int (mem, new, old, mb1, mb2); \
+ (typeof (*mem)) __prev; \
+ })
+
+#define __arch_compare_and_exchange_val_32_int(mem, new, old, mb1, mb2) \
+ ({ \
+ unsigned long __prev; \
+ int __cmp; \
+ __arch_compare_and_exchange_xxx_32_int (mem, new, old, mb1, mb2); \
+ (typeof (*mem)) __prev; \
+ })
+
+#define __arch_compare_and_exchange_val_64_int(mem, new, old, mb1, mb2) \
+ ({ \
+ unsigned long __prev; \
+ int __cmp; \
+ __arch_compare_and_exchange_xxx_64_int (mem, new, old, mb1, mb2); \
+ (typeof (*mem)) __prev; \
+ })
+
+/* Compare and exchange with "acquire" semantics, ie barrier after. */
+
+#define atomic_compare_and_exchange_bool_acq(mem, new, old) \
+ __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, mem, new, old, \
+ "", __MB)
+
+#define atomic_compare_and_exchange_val_acq(mem, new, old) \
+ __atomic_val_bysize (__arch_compare_and_exchange_val, int, mem, new, old, \
+ "", __MB)
+
+/* Compare and exchange with "release" semantics, ie barrier before. */
+
+#define atomic_compare_and_exchange_val_rel(mem, new, old) \
+ __atomic_val_bysize (__arch_compare_and_exchange_val, int, mem, new, old, \
+ __MB, "")
+
+/* Atomically store value and return the previous value. */
+
+#define __arch_exchange_8_int(mem, value, mb1, mb2) \
+ ({ \
+ unsigned long __tmp, __addr64, __sval, __tmp1; \
+ __typeof (*mem) __ret; \
+ __asm__ __volatile__( \
+ mb1 " bic %[__addr8],7,%[__addr64]\n" \
+ " ins0b %[__value],%[__addr8],%[__sval]\n" \
+ "1: lldl %[__tmp],0(%[__addr64])\n" \
+ " ldi %[__tmp1],1\n" \
+ " wr_f %[__tmp1]\n" \
+ " ext0b %[__tmp],%[__addr8],%[__ret]\n" \
+ " mask0b %[__tmp],%[__addr8],%[__tmp]\n" \
+ " or %[__sval],%[__tmp],%[__tmp]\n" \
+ " lstl %[__tmp],0(%[__addr64])\n" \
+ " rd_f %[__tmp]\n" \
+ " beq %[__tmp],1b\n" \
+ : [__ret] "=&r"(__ret), [__sval] "=&r"(__sval), [__tmp] "=&r"(__tmp), \
+ [__tmp1] "=&r"(__tmp1), [__addr64] "=&r"(__addr64) \
+ : [__addr8] "r"(mem), [__value] "r"(value) \
+ : "memory"); \
+ __ret; \
+ })
+
+#define __arch_exchange_16_int(mem, value, mb1, mb2) \
+ ({ \
+ unsigned long __tmp, __addr64, __sval, __tmp1; \
+ __typeof (*mem) __ret; \
+ __asm__ __volatile__( \
+ mb1 " bic %[__addr16],7,%[__addr64]\n" \
+ " ins1b %[__value],%[__addr16],%[__sval]\n" \
+ "1: lldl %[__tmp],0(%[__addr64])\n" \
+ " ldi %[__tmp1],1\n" \
+ " wr_f %[__tmp1]\n" \
+ " ext1b %[__tmp],%[__addr16],%[__ret]\n" \
+ " mask1b %[__tmp],%[__addr16],%[__tmp]\n" \
+ " or %[__sval],%[__tmp],%[__tmp]\n" \
+ " lstl %[__tmp],0(%[__addr64])\n" \
+ " rd_f %[__tmp]\n" \
+ " beq %[__tmp],1b\n" \
+ : [__ret] "=&r"(__ret), [__sval] "=&r"(__sval), [__tmp] "=&r"(__tmp), \
+ [__tmp1] "=&r"(__tmp1), [__addr64] "=&r"(__addr64) \
+ : [__addr16] "r"(mem), [__value] "r"(value) \
+ : "memory"); \
+ __ret; \
+ })
+#define __arch_exchange_32_int(mem, value, mb1, mb2) \
+ ({ \
+ signed int __tmp; \
+ __typeof (*mem) __ret; \
+ unsigned long __addr; \
+ __asm__ __volatile__( \
+ mb1 " ldi %[__addr],%[__mem]\n" \
+ "1: lldw %[__ret],0(%[__addr])\n" \
+ " ldi %[__tmp],1\n" \
+ " wr_f %[__tmp]\n" \
+ " mov %[__val],%[__tmp]\n" \
+ " lstw %[__tmp],0(%[__addr])\n" \
+ " rd_f %[__tmp]\n" \
+ " beq %[__tmp],1b\n" \
+ : [__ret] "=&r"(__ret), [__tmp] "=&r"(__tmp), [__addr] "=&r"(__addr) \
+ : [__mem] "m"(*(mem)), [__val] "Ir"(value) \
+ : "memory"); \
+ __ret; \
+ })
+
+#define __arch_exchange_64_int(mem, value, mb1, mb2) \
+ ({ \
+ unsigned long __tmp, __addr; \
+ __typeof (*mem) __ret; \
+ __asm__ __volatile__( \
+ mb1 " ldi %[__addr],%[__mem]\n" \
+ "1: lldl %[__ret],0(%[__addr])\n" \
+ " ldi %[__tmp],1\n" \
+ " wr_f %[__tmp]\n" \
+ " mov %[__val],%[__tmp]\n" \
+ " lstl %[__tmp],0(%[__addr])\n" \
+ " rd_f %[__tmp]\n" \
+ " beq %[__tmp],1b\n" \
+ : [__ret] "=&r"(__ret), [__tmp] "=&r"(__tmp), [__addr] "=&r"(__addr) \
+ : [__mem] "m"(*(mem)), [__val] "Ir"(value) \
+ : "memory"); \
+ __ret; \
+ })
+
+#define atomic_exchange_acq(mem, value) \
+ __atomic_val_bysize (__arch_exchange, int, mem, value, "", __MB)
+
+#define atomic_exchange_rel(mem, value) \
+ __atomic_val_bysize (__arch_exchange, int, mem, value, __MB, "")
+
+/* Atomically add value and return the previous (unincremented) value. */
+
+#define __arch_exchange_and_add_8_int(mem, value, mb1, mb2) \
+ ({ \
+ __builtin_trap (); \
+ 0; \
+ })
+
+#define __arch_exchange_and_add_16_int(mem, value, mb1, mb2) \
+ ({ \
+ __builtin_trap (); \
+ 0; \
+ })
+
+#define __arch_exchange_and_add_32_int(mem, value, mb1, mb2) \
+ ({ \
+ signed int __tmp; \
+ __typeof (*mem) __ret; \
+ unsigned long __addr; \
+ __asm__ __volatile__( \
+ mb1 " ldi %[__addr],%[__mem]\n" \
+ "1: lldw %[__ret],0(%[__addr])\n" \
+ " ldi %[__tmp],1\n" \
+ " wr_f %[__tmp]\n" \
+ " addw %[__ret],%[__val],%[__tmp]\n" \
+ " lstw %[__tmp],0(%[__addr])\n" \
+ " rd_f %[__tmp]\n" \
+ " beq %[__tmp],1b\n" \
+ : [__ret] "=&r"(__ret), [__tmp] "=&r"(__tmp), [__addr] "=&r"(__addr) \
+ : [__mem] "m"(*(mem)), [__val] "Ir"((signed int) (value)) \
+ : "memory"); \
+ __ret; \
+ })
+
+#define __arch_exchange_and_add_64_int(mem, value, mb1, mb2) \
+ ({ \
+ unsigned long __tmp, __addr; \
+ __typeof (*mem) __ret; \
+ __asm__ __volatile__( \
+ mb1 " ldi %[__addr],%[__mem]\n" \
+ "1: lldl %[__ret],0(%[__addr])\n" \
+ " ldi %[__tmp],1\n" \
+ " wr_f %[__tmp]\n" \
+ " addl %[__ret],%[__val],%[__tmp]\n" \
+ " lstl %[__tmp],0(%[__addr])\n" \
+ " rd_f %[__tmp]\n" \
+ " beq %[__tmp],1b\n" \
+ : [__ret] "=&r"(__ret), [__tmp] "=&r"(__tmp), [__addr] "=&r"(__addr) \
+ : [__mem] "m"(*(mem)), [__val] "Ir"((unsigned long) (value)) \
+ : "memory"); \
+ __ret; \
+ })
+
+/* ??? Barrier semantics for atomic_exchange_and_add appear to be
+ undefined. Use full barrier for now, as that's safe. */
+#define atomic_exchange_and_add(mem, value) \
+ __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, __MB, __MB)
+
+/* ??? Blah, I'm lazy. Implement these later. Can do better than the
+ compare-and-exchange loop provided by generic code.
+
+#define atomic_decrement_if_positive(mem)
+#define atomic_bit_test_set(mem, bit)
+
+*/
+#define atomic_full_barrier() __asm("memb" : : : "memory");
+#define atomic_read_barrier() __asm("memb" : : : "memory");
+#define atomic_write_barrier() __asm("memb" : : : "memory");
diff --git a/sysdeps/sw_64/nptl/bits/struct_rwlock.h b/sysdeps/sw_64/nptl/bits/struct_rwlock.h
new file mode 100644
index 00000000..8cbeefc1
--- /dev/null
+++ b/sysdeps/sw_64/nptl/bits/struct_rwlock.h
@@ -0,0 +1,43 @@
+/* Sw_64 internal rwlock struct definitions.
+ Copyright (C) 2019-2023 Free Software Foundation, Inc.
+
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _RWLOCK_INTERNAL_H
+#define _RWLOCK_INTERNAL_H
+
+struct __pthread_rwlock_arch_t
+{
+ unsigned int __readers;
+ unsigned int __writers;
+ unsigned int __wrphase_futex;
+ unsigned int __writers_futex;
+ unsigned int __pad3;
+ unsigned int __pad4;
+ int __cur_writer;
+ int __shared;
+ unsigned long int __pad1;
+ unsigned long int __pad2;
+ /* FLAGS must stay at this position in the structure to maintain
+ binary compatibility. */
+ unsigned int __flags;
+};
+
+#define __PTHREAD_RWLOCK_INITIALIZER(__flags) \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, __flags
+
+#endif
diff --git a/sysdeps/sw_64/nptl/pthread_spin_lock.S b/sysdeps/sw_64/nptl/pthread_spin_lock.S
new file mode 100644
index 00000000..b7e44839
--- /dev/null
+++ b/sysdeps/sw_64/nptl/pthread_spin_lock.S
@@ -0,0 +1,55 @@
+/* Copyright (C) 2003-2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson <rth@twiddle.net>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <shlib-compat.h>
+
+ .text
+ .align 4
+
+ .globl __pthread_spin_lock
+ .ent __pthread_spin_lock
+__pthread_spin_lock:
+ .frame $sp, 0, $26, 0
+ .prologue 0
+
+ memb
+0: lldw $1, 0($16)
+ xor $1, 1, $1
+ ldi $0, 0
+ wr_f $1
+
+ ldi $2, 1
+ lstw $2, 0($16)
+ rd_f $2
+ beq $2, 1f
+ ret
+
+1: ldw $1, 0($16)
+ bne $1, 1b
+ unop
+ br 0b
+
+
+
+
+ .end __pthread_spin_lock
+versioned_symbol (libc, __pthread_spin_lock, pthread_spin_lock, GLIBC_2_34)
+
+#if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_2, GLIBC_2_34)
+compat_symbol (libpthread, __pthread_spin_lock, pthread_spin_lock, GLIBC_2_2)
+#endif
diff --git a/sysdeps/sw_64/nptl/pthread_spin_trylock.S b/sysdeps/sw_64/nptl/pthread_spin_trylock.S
new file mode 100644
index 00000000..8551c34a
--- /dev/null
+++ b/sysdeps/sw_64/nptl/pthread_spin_trylock.S
@@ -0,0 +1,56 @@
+/* Copyright (C) 2003-2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson <rth@twiddle.net>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <shlib-compat.h>
+
+#define _ERRNO_H 1
+#include <bits/errno.h>
+
+ .text
+ .align 4
+
+ .globl __pthread_spin_trylock
+ .ent __pthread_spin_trylock
+__pthread_spin_trylock:
+ .frame $sp, 0, $26, 0
+ .prologue 0
+
+ memb
+0: lldw $1, 0($16)
+ xor $1, 1, $1
+ ldi $2, 1
+ ldi $0, EBUSY
+ wr_f $1
+
+ lstw $2, 0($16)
+ rd_f $2
+ beq $1, 1f
+ beq $2, 2f
+ ldi $0, 0
+
+1: ret
+2: br 0b
+
+ .end __pthread_spin_trylock
+versioned_symbol (libc, __pthread_spin_trylock, pthread_spin_trylock,
+ GLIBC_2_34)
+
+#if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_2, GLIBC_2_34)
+compat_symbol (libpthread, __pthread_spin_trylock, pthread_spin_trylock,
+ GLIBC_2_2)
+#endif
diff --git a/sysdeps/sw_64/sw8a/atomic-machine.h b/sysdeps/sw_64/sw8a/atomic-machine.h
new file mode 100644
index 00000000..db3320fc
--- /dev/null
+++ b/sysdeps/sw_64/sw8a/atomic-machine.h
@@ -0,0 +1,371 @@
+/* Copyright (C) 2003-2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <stdint.h>
+
+typedef int8_t atomic8_t;
+typedef uint8_t uatomic8_t;
+typedef int_fast8_t atomic_fast8_t;
+typedef uint_fast8_t uatomic_fast8_t;
+
+typedef int16_t atomic16_t;
+typedef uint16_t uatomic16_t;
+typedef int_fast16_t atomic_fast16_t;
+typedef uint_fast16_t uatomic_fast16_t;
+
+typedef int32_t atomic32_t;
+typedef uint32_t uatomic32_t;
+typedef int_fast32_t atomic_fast32_t;
+typedef uint_fast32_t uatomic_fast32_t;
+
+typedef int64_t atomic64_t;
+typedef uint64_t uatomic64_t;
+typedef int_fast64_t atomic_fast64_t;
+typedef uint_fast64_t uatomic_fast64_t;
+
+typedef intptr_t atomicptr_t;
+typedef uintptr_t uatomicptr_t;
+typedef intmax_t atomic_max_t;
+typedef uintmax_t uatomic_max_t;
+
+#define __HAVE_64B_ATOMICS 1
+#define USE_ATOMIC_COMPILER_BUILTINS 0
+
+/* XXX Is this actually correct? */
+#define ATOMIC_EXCHANGE_USES_CAS 1
+
+#ifdef UP
+# define __MB /* nothing */
+#else
+# define __MB " memb\n"
+#endif
+
+/* Compare and exchange. For all of the "xxx" routines, we expect a
+ "__prev" and a "__cmp" variable to be provided by the enclosing scope,
+ in which values are returned. */
+// delete memb after the rd_f
+#define __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2) \
+ ({ \
+ unsigned long __tmp, __snew, __addr64; \
+ __asm__ __volatile__( \
+ " bic %[__addr8],7,%[__addr64]\n" \
+ " inslb %[__new],%[__addr8],%[__snew]\n" \
+ "1: lldl %[__tmp],0(%[__addr64])\n" \
+ " extlb %[__tmp],%[__addr8],%[__prev]\n" \
+ " cmpeq %[__prev],%[__old],%[__cmp]\n" \
+ " beq %[__cmp],2f\n" \
+ " masklb %[__tmp],%[__addr8],%[__tmp]\n" \
+ " or %[__snew],%[__tmp],%[__tmp]\n" \
+ " lstl %[__tmp],0(%[__addr64])\n" \
+ " beq %[__tmp],1b\n" \
+ "2:" \
+ : [__prev] "=&r"(__prev), [__snew] "=&r"(__snew), \
+ [__tmp] "=&r"(__tmp), [__cmp] "=&r"(__cmp), \
+ [__addr64] "=&r"(__addr64) \
+ : [__addr8] "r"(mem), \
+ [__old] "Ir"((uint64_t) (uint8_t) (uint64_t) (old)), \
+ [__new] "r"(new) \
+ : "memory"); \
+ })
+
+#define __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2) \
+ ({ \
+ unsigned long __tmp, __snew, __addr64; \
+ __asm__ __volatile__( \
+ " bic %[__addr16],7,%[__addr64]\n" \
+ " inslh %[__new],%[__addr16],%[__snew]\n" \
+ "1: lldl %[__tmp],0(%[__addr64])\n" \
+ " extlh %[__tmp],%[__addr16],%[__prev]\n" \
+ " cmpeq %[__prev],%[__old],%[__cmp]\n" \
+ " beq %[__cmp],2f\n" \
+ " masklh %[__tmp],%[__addr16],%[__tmp]\n" \
+ " or %[__snew],%[__tmp],%[__tmp]\n" \
+ " lstl %[__tmp],0(%[__addr64])\n" \
+ " beq %[__tmp],1b\n" \
+ "2:" \
+ : [__prev] "=&r"(__prev), [__snew] "=&r"(__snew), \
+ [__tmp] "=&r"(__tmp), [__cmp] "=&r"(__cmp), \
+ [__addr64] "=&r"(__addr64) \
+ : [__addr16] "r"(mem), \
+ [__old] "Ir"((uint64_t) (uint16_t) (uint64_t) (old)), \
+ [__new] "r"(new) \
+ : "memory"); \
+ })
+#define __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2) \
+ ({ \
+ unsigned long __addr; \
+ __asm__ __volatile__( \
+ " ldi %[__addr],%[__mem]\n" \
+ "1: lldw %[__prev],0(%[__addr])\n" \
+ " cmpeq %[__prev],%[__old],%[__cmp]\n" \
+ " beq %[__cmp],2f\n" \
+ " mov %[__new],%[__cmp]\n" \
+ " lstw %[__cmp],0(%[__addr])\n" \
+ " beq %[__cmp],1b\n" \
+ "2:" \
+ : \
+ [__prev] "=&r"(__prev), [__cmp] "=&r"(__cmp), [__addr] "=&r"(__addr) \
+ : [__mem] "m"(*(mem)), \
+ [__old] "Ir"((uint64_t) (atomic32_t) (uint64_t) (old)), \
+ [__new] "Ir"(new) \
+ : "memory"); \
+ })
+
+#define __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2) \
+ ({ \
+ unsigned long __addr; \
+ __asm__ __volatile__(" ldi %[__addr],%[__mem]\n" \
+ "1: lldl %[__prev],0(%[__addr])\n" \
+ " cmpeq %[__prev],%[__old],%[__cmp]\n" \
+ " beq %[__cmp],2f\n" \
+ " mov %[__new],%[__cmp]\n" \
+ " lstl %[__cmp],0(%[__addr])\n" \
+ " beq %[__cmp],1b\n" \
+ "2:" \
+ : [__prev] "=&r"(__prev), [__cmp] "=&r"(__cmp), \
+ [__addr] "=&r"(__addr) \
+ : [__mem] "m"(*(mem)), \
+ [__old] "Ir"((uint64_t) (old)), [__new] "Ir"(new) \
+ : "memory"); \
+ })
+/* For all "bool" routines, we return FALSE if exchange succesful. */
+
+#define __arch_compare_and_exchange_bool_8_int(mem, new, old, mb1, mb2) \
+ ({ \
+ unsigned long __prev; \
+ int __cmp; \
+ __arch_compare_and_exchange_xxx_8_int (mem, new, old, mb1, mb2); \
+ !__cmp; \
+ })
+
+#define __arch_compare_and_exchange_bool_16_int(mem, new, old, mb1, mb2) \
+ ({ \
+ unsigned long __prev; \
+ int __cmp; \
+ __arch_compare_and_exchange_xxx_16_int (mem, new, old, mb1, mb2); \
+ !__cmp; \
+ })
+
+#define __arch_compare_and_exchange_bool_32_int(mem, new, old, mb1, mb2) \
+ ({ \
+ unsigned long __prev; \
+ int __cmp; \
+ __arch_compare_and_exchange_xxx_32_int (mem, new, old, mb1, mb2); \
+ !__cmp; \
+ })
+
+#define __arch_compare_and_exchange_bool_64_int(mem, new, old, mb1, mb2) \
+ ({ \
+ unsigned long __prev; \
+ int __cmp; \
+ __arch_compare_and_exchange_xxx_64_int (mem, new, old, mb1, mb2); \
+ !__cmp; \
+ })
+
+/* For all "val" routines, return the old value whether exchange
+ successful or not. */
+
+#define __arch_compare_and_exchange_val_8_int(mem, new, old, mb1, mb2) \
+ ({ \
+ unsigned long __prev; \
+ int __cmp; \
+ __arch_compare_and_exchange_xxx_8_int (mem, new, old, mb1, mb2); \
+ (typeof (*mem)) __prev; \
+ })
+
+#define __arch_compare_and_exchange_val_16_int(mem, new, old, mb1, mb2) \
+ ({ \
+ unsigned long __prev; \
+ int __cmp; \
+ __arch_compare_and_exchange_xxx_16_int (mem, new, old, mb1, mb2); \
+ (typeof (*mem)) __prev; \
+ })
+
+#define __arch_compare_and_exchange_val_32_int(mem, new, old, mb1, mb2) \
+ ({ \
+ unsigned long __prev; \
+ int __cmp; \
+ __arch_compare_and_exchange_xxx_32_int (mem, new, old, mb1, mb2); \
+ (typeof (*mem)) __prev; \
+ })
+
+#define __arch_compare_and_exchange_val_64_int(mem, new, old, mb1, mb2) \
+ ({ \
+ unsigned long __prev; \
+ int __cmp; \
+ __arch_compare_and_exchange_xxx_64_int (mem, new, old, mb1, mb2); \
+ (typeof (*mem)) __prev; \
+ })
+
+/* Compare and exchange with "acquire" semantics, ie barrier after. */
+
+#define atomic_compare_and_exchange_bool_acq(mem, new, old) \
+ __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, mem, new, old, \
+ "", __MB)
+
+#define atomic_compare_and_exchange_val_acq(mem, new, old) \
+ __atomic_val_bysize (__arch_compare_and_exchange_val, int, mem, new, old, \
+ "", __MB)
+
+/* Compare and exchange with "release" semantics, ie barrier before. */
+
+#define atomic_compare_and_exchange_val_rel(mem, new, old) \
+ __atomic_val_bysize (__arch_compare_and_exchange_val, int, mem, new, old, \
+ __MB, "")
+
+/* Atomically store value and return the previous value. */
+
+#define __arch_exchange_8_int(mem, value, mb1, mb2) \
+ ({ \
+ unsigned long __tmp, __addr64, __sval; \
+ __typeof (*mem) __ret; \
+ __asm__ __volatile__(" bic %[__addr8],7,%[__addr64]\n" \
+ " inslb %[__value],%[__addr8],%[__sval]\n" \
+ "1: lldl %[__tmp],0(%[__addr64])\n" \
+ " extlb %[__tmp],%[__addr8],%[__ret]\n" \
+ " masklb %[__tmp],%[__addr8],%[__tmp]\n" \
+ " or %[__sval],%[__tmp],%[__tmp]\n" \
+ " lstl %[__tmp],0(%[__addr64])\n" \
+ " beq %[__tmp],1b\n" \
+ : [__ret] "=&r"(__ret), [__sval] "=&r"(__sval), \
+ [__tmp] "=&r"(__tmp), [__addr64] "=&r"(__addr64) \
+ : [__addr8] "r"(mem), [__value] "r"(value) \
+ : "memory"); \
+ __ret; \
+ })
+
+#define __arch_exchange_16_int(mem, value, mb1, mb2) \
+ ({ \
+ unsigned long __tmp, __addr64, __sval, __tmp1; \
+ __typeof (*mem) __ret; \
+ __asm__ __volatile__(" bic %[__addr16],7,%[__addr64]\n" \
+ " inslh %[__value],%[__addr16],%[__sval]\n" \
+ "1: lldl %[__tmp],0(%[__addr64])\n" \
+ " extlh %[__tmp],%[__addr16],%[__ret]\n" \
+ " masklh %[__tmp],%[__addr16],%[__tmp]\n" \
+ " or %[__sval],%[__tmp],%[__tmp]\n" \
+ " lstl %[__tmp],0(%[__addr64])\n" \
+ " beq %[__tmp],1b\n" \
+ : [__ret] "=&r"(__ret), [__sval] "=&r"(__sval), \
+ [__tmp] "=&r"(__tmp), [__addr64] "=&r"(__addr64) \
+ : [__addr16] "r"(mem), [__value] "r"(value) \
+ : "memory"); \
+ __ret; \
+ })
+#define __arch_exchange_32_int(mem, value, mb1, mb2) \
+ ({ \
+ signed int __tmp; \
+ __typeof (*mem) __ret; \
+ unsigned long __addr; \
+ __asm__ __volatile__( \
+ " ldi %[__addr],%[__mem]\n" \
+ "1: lldw %[__ret],0(%[__addr])\n" \
+ " mov %[__val],%[__tmp]\n" \
+ " lstw %[__tmp],0(%[__addr])\n" \
+ " beq %[__tmp],1b\n" \
+ : [__ret] "=&r"(__ret), [__tmp] "=&r"(__tmp), [__addr] "=&r"(__addr) \
+ : [__mem] "m"(*(mem)), [__val] "Ir"(value) \
+ : "memory"); \
+ __ret; \
+ })
+
+#define __arch_exchange_64_int(mem, value, mb1, mb2) \
+ ({ \
+ unsigned long __tmp, __addr; \
+ __typeof (*mem) __ret; \
+ __asm__ __volatile__( \
+ " ldi %[__addr],%[__mem]\n" \
+ "1: lldl %[__ret],0(%[__addr])\n" \
+ " mov %[__val],%[__tmp]\n" \
+ " lstl %[__tmp],0(%[__addr])\n" \
+ " beq %[__tmp],1b\n" \
+ : [__ret] "=&r"(__ret), [__tmp] "=&r"(__tmp), [__addr] "=&r"(__addr) \
+ : [__mem] "m"(*(mem)), [__val] "Ir"(value) \
+ : "memory"); \
+ __ret; \
+ })
+
+#define atomic_exchange_acq(mem, value) \
+ __atomic_val_bysize (__arch_exchange, int, mem, value, "", __MB)
+
+#define atomic_exchange_rel(mem, value) \
+ __atomic_val_bysize (__arch_exchange, int, mem, value, __MB, "")
+
+/* Atomically add value and return the previous (unincremented) value. */
+
+#define __arch_exchange_and_add_8_int(mem, value, mb1, mb2) \
+ ({ \
+ __builtin_trap (); \
+ 0; \
+ })
+
+#define __arch_exchange_and_add_16_int(mem, value, mb1, mb2) \
+ ({ \
+ __builtin_trap (); \
+ 0; \
+ })
+
+#define __arch_exchange_and_add_32_int(mem, value, mb1, mb2) \
+ ({ \
+ signed int __tmp; \
+ __typeof (*mem) __ret; \
+ unsigned long __addr; \
+ __asm__ __volatile__( \
+ " ldi %[__addr],%[__mem]\n" \
+ "1: lldw %[__ret],0(%[__addr])\n" \
+ " addw %[__ret],%[__val],%[__tmp]\n" \
+ " lstw %[__tmp],0(%[__addr])\n" \
+ " beq %[__tmp],1b\n" \
+ : [__ret] "=&r"(__ret), [__tmp] "=&r"(__tmp), [__addr] "=&r"(__addr) \
+ : [__mem] "m"(*(mem)), [__val] "Ir"((signed int) (value)) \
+ : "memory"); \
+ __ret; \
+ })
+
+#define __arch_exchange_and_add_64_int(mem, value, mb1, mb2) \
+ ({ \
+ unsigned long __tmp, __addr; \
+ __typeof (*mem) __ret; \
+ __asm__ __volatile__( \
+ " ldi %[__addr],%[__mem]\n" \
+ "1: lldl %[__ret],0(%[__addr])\n" \
+ " addl %[__ret],%[__val],%[__tmp]\n" \
+ " lstl %[__tmp],0(%[__addr])\n" \
+ " beq %[__tmp],1b\n" \
+ : [__ret] "=&r"(__ret), [__tmp] "=&r"(__tmp), [__addr] "=&r"(__addr) \
+ : [__mem] "m"(*(mem)), [__val] "Ir"((unsigned long) (value)) \
+ : "memory"); \
+ __ret; \
+ })
+
+/* ??? Barrier semantics for atomic_exchange_and_add appear to be
+ undefined. Use full barrier for now, as that's safe. */
+#define atomic_exchange_and_add(mem, value) \
+ __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, __MB, __MB)
+
+/* ??? Blah, I'm lazy. Implement these later. Can do better than the
+ compare-and-exchange loop provided by generic code.
+
+#define atomic_decrement_if_positive(mem)
+#define atomic_bit_test_set(mem, bit)
+
+*/
+# ifndef UP
+# define atomic_full_barrier() __asm ("memb" : : : "memory");
+# define atomic_read_barrier() __asm ("memb" : : : "memory");
+# define atomic_write_barrier() __asm ("memb" : : : "memory");
+# endif
diff --git a/sysdeps/sw_64/sw8a/nptl/bits/struct_rwlock.h b/sysdeps/sw_64/sw8a/nptl/bits/struct_rwlock.h
new file mode 100644
index 00000000..8cbeefc1
--- /dev/null
+++ b/sysdeps/sw_64/sw8a/nptl/bits/struct_rwlock.h
@@ -0,0 +1,43 @@
+/* Sw_64 internal rwlock struct definitions.
+ Copyright (C) 2019-2023 Free Software Foundation, Inc.
+
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _RWLOCK_INTERNAL_H
+#define _RWLOCK_INTERNAL_H
+
+struct __pthread_rwlock_arch_t
+{
+ unsigned int __readers;
+ unsigned int __writers;
+ unsigned int __wrphase_futex;
+ unsigned int __writers_futex;
+ unsigned int __pad3;
+ unsigned int __pad4;
+ int __cur_writer;
+ int __shared;
+ unsigned long int __pad1;
+ unsigned long int __pad2;
+ /* FLAGS must stay at this position in the structure to maintain
+ binary compatibility. */
+ unsigned int __flags;
+};
+
+#define __PTHREAD_RWLOCK_INITIALIZER(__flags) \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, __flags
+
+#endif
diff --git a/sysdeps/sw_64/sw8a/nptl/pthread_spin_lock.S b/sysdeps/sw_64/sw8a/nptl/pthread_spin_lock.S
new file mode 100644
index 00000000..ab3408a3
--- /dev/null
+++ b/sysdeps/sw_64/sw8a/nptl/pthread_spin_lock.S
@@ -0,0 +1,43 @@
+
+
+/* Copyright (C) 2003-2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson <rth@twiddle.net>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* SHIPS20171102_LOCK_READ_CONDITION_WRITE. */
+ .text
+ .align 4
+
+ .globl pthread_spin_lock
+ .ent pthread_spin_lock
+pthread_spin_lock:
+ .frame $sp, 0, $26, 0
+ .prologue 0
+0: lldw $1, 0($16)
+ ldi $2, 1
+ ldi $0, 0
+ bne $1, 1f
+
+ lstw $2, 0($16)
+ beq $2, 1f
+ ret
+
+1: ldw $1, 0($16)
+ bne $1, 1b
+ unop
+ br 0b
+ .end pthread_spin_lock
diff --git a/sysdeps/sw_64/sw8a/nptl/pthread_spin_trylock.S b/sysdeps/sw_64/sw8a/nptl/pthread_spin_trylock.S
new file mode 100644
index 00000000..374dccae
--- /dev/null
+++ b/sysdeps/sw_64/sw8a/nptl/pthread_spin_trylock.S
@@ -0,0 +1,44 @@
+
+
+/* Copyright (C) 2003-2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson <rth@twiddle.net>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* SHIPS20171102_LOCK_READ_CONDITION_WRITE. */
+#define _ERRNO_H 1
+#include <bits/errno.h>
+
+ .text
+ .align 4
+
+ .globl pthread_spin_trylock
+ .ent pthread_spin_trylock
+pthread_spin_trylock:
+ .frame $sp, 0, $26, 0
+ .prologue 0
+0: lldw $1, 0($16)
+ ldi $2, 1
+ ldi $0, EBUSY
+ bne $1, 1f
+
+ lstw $2, 0($16)
+ beq $2, 2f
+ ldi $0, 0
+
+1: ret
+2: br 0b
+ .end pthread_spin_trylock
--
2.25.1
Loading...
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/junhe_arm/glibc.git
git@gitee.com:junhe_arm/glibc.git
junhe_arm
glibc
glibc
master

搜索帮助