1 Star 0 Fork 15

ocs-commit-check/glibc

forked from OpenCloudOS Stream/glibc 
加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
Loongarch-Add-ifunc-support-for-memcpy-aligned-unali.patch 67.62 KB
一键复制 编辑 原始数据 按行查看 历史
ticat_fp 提交于 2024-03-15 16:04 . LoongArch: sync from glibc upstream
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570
From 9c522272146423c1ef9fb9e071737a8ad26e844e Mon Sep 17 00:00:00 2001
From: dengjianbo <dengjianbo@loongson.cn>
Date: Tue, 15 Aug 2023 09:11:53 +0800
Subject: [PATCH 07/29] Loongarch: Add ifunc support for memcpy{aligned,
unaligned, lsx, lasx} and memmove{aligned, unaligned, lsx, lasx}
These implementations improve the time to copy data in the glibc
microbenchmark as below:
memcpy-lasx reduces the runtime about 8%-76%
memcpy-lsx reduces the runtime about 8%-72%
memcpy-unaligned reduces the runtime of unaligned data copying up to 40%
memcpy-aligned reduece the runtime of unaligned data copying up to 25%
memmove-lasx reduces the runtime about 20%-73%
memmove-lsx reduces the runtime about 50%
memmove-unaligned reduces the runtime of unaligned data moving up to 40%
memmove-aligned reduces the runtime of unaligned data moving up to 25%
Signed-off-by: Peng Fan <fanpeng@loongson.cn>
Signed-off-by: ticat_fp <fanpeng@loongson.cn>
---
sysdeps/loongarch/lp64/multiarch/Makefile | 5 +
.../lp64/multiarch/ifunc-impl-list.c | 19 +
sysdeps/loongarch/lp64/multiarch/ifunc-lasx.h | 45 +
.../loongarch/lp64/multiarch/memcpy-aligned.S | 783 ++++++++++++++++++
.../loongarch/lp64/multiarch/memcpy-lasx.S | 20 +
sysdeps/loongarch/lp64/multiarch/memcpy-lsx.S | 20 +
.../lp64/multiarch/memcpy-unaligned.S | 247 ++++++
sysdeps/loongarch/lp64/multiarch/memcpy.c | 37 +
.../lp64/multiarch/memmove-aligned.S | 20 +
.../loongarch/lp64/multiarch/memmove-lasx.S | 287 +++++++
.../loongarch/lp64/multiarch/memmove-lsx.S | 534 ++++++++++++
.../lp64/multiarch/memmove-unaligned.S | 380 +++++++++
sysdeps/loongarch/lp64/multiarch/memmove.c | 38 +
13 files changed, 2435 insertions(+)
create mode 100644 sysdeps/loongarch/lp64/multiarch/ifunc-lasx.h
create mode 100644 sysdeps/loongarch/lp64/multiarch/memcpy-aligned.S
create mode 100644 sysdeps/loongarch/lp64/multiarch/memcpy-lasx.S
create mode 100644 sysdeps/loongarch/lp64/multiarch/memcpy-lsx.S
create mode 100644 sysdeps/loongarch/lp64/multiarch/memcpy-unaligned.S
create mode 100644 sysdeps/loongarch/lp64/multiarch/memcpy.c
create mode 100644 sysdeps/loongarch/lp64/multiarch/memmove-aligned.S
create mode 100644 sysdeps/loongarch/lp64/multiarch/memmove-lasx.S
create mode 100644 sysdeps/loongarch/lp64/multiarch/memmove-lsx.S
create mode 100644 sysdeps/loongarch/lp64/multiarch/memmove-unaligned.S
create mode 100644 sysdeps/loongarch/lp64/multiarch/memmove.c
diff --git a/sysdeps/loongarch/lp64/multiarch/Makefile b/sysdeps/loongarch/lp64/multiarch/Makefile
index 110a8c5c..afa51041 100644
--- a/sysdeps/loongarch/lp64/multiarch/Makefile
+++ b/sysdeps/loongarch/lp64/multiarch/Makefile
@@ -9,5 +9,10 @@ sysdep_routines += \
strchrnul-aligned \
strchrnul-lsx \
strchrnul-lasx \
+ memcpy-aligned \
+ memcpy-unaligned \
+ memmove-unaligned \
+ memmove-lsx \
+ memmove-lasx \
# sysdep_routines
endif
diff --git a/sysdeps/loongarch/lp64/multiarch/ifunc-impl-list.c b/sysdeps/loongarch/lp64/multiarch/ifunc-impl-list.c
index c7164b45..25eb96b0 100644
--- a/sysdeps/loongarch/lp64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/loongarch/lp64/multiarch/ifunc-impl-list.c
@@ -53,5 +53,24 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
#endif
IFUNC_IMPL_ADD (array, i, strchrnul, 1, __strchrnul_aligned)
)
+
+ IFUNC_IMPL (i, name, memcpy,
+#if !defined __loongarch_soft_float
+ IFUNC_IMPL_ADD (array, i, memcpy, SUPPORT_LASX, __memcpy_lasx)
+ IFUNC_IMPL_ADD (array, i, memcpy, SUPPORT_LSX, __memcpy_lsx)
+#endif
+ IFUNC_IMPL_ADD (array, i, memcpy, SUPPORT_UAL, __memcpy_unaligned)
+ IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_aligned)
+ )
+
+ IFUNC_IMPL (i, name, memmove,
+#if !defined __loongarch_soft_float
+ IFUNC_IMPL_ADD (array, i, memmove, SUPPORT_LASX, __memmove_lasx)
+ IFUNC_IMPL_ADD (array, i, memmove, SUPPORT_LSX, __memmove_lsx)
+#endif
+ IFUNC_IMPL_ADD (array, i, memmove, SUPPORT_UAL, __memmove_unaligned)
+ IFUNC_IMPL_ADD (array, i, memmove, 1, __memmove_aligned)
+ )
+
return i;
}
diff --git a/sysdeps/loongarch/lp64/multiarch/ifunc-lasx.h b/sysdeps/loongarch/lp64/multiarch/ifunc-lasx.h
new file mode 100644
index 00000000..3be67da6
--- /dev/null
+++ b/sysdeps/loongarch/lp64/multiarch/ifunc-lasx.h
@@ -0,0 +1,45 @@
+/* Common definition for ifunc selection implementation.
+ All versions must be listed in ifunc-impl-list.c.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <ldsodefs.h>
+#include <ifunc-init.h>
+
+#if !defined __loongarch_soft_float
+extern __typeof (REDIRECT_NAME) OPTIMIZE (lasx) attribute_hidden;
+extern __typeof (REDIRECT_NAME) OPTIMIZE (lsx) attribute_hidden;
+#endif
+
+extern __typeof (REDIRECT_NAME) OPTIMIZE (aligned) attribute_hidden;
+extern __typeof (REDIRECT_NAME) OPTIMIZE (unaligned) attribute_hidden;
+
+static inline void *
+IFUNC_SELECTOR (void)
+{
+#if !defined __loongarch_soft_float
+ if (SUPPORT_LASX)
+ return OPTIMIZE (lasx);
+ else if (SUPPORT_LSX)
+ return OPTIMIZE (lsx);
+ else
+#endif
+ if (SUPPORT_UAL)
+ return OPTIMIZE (unaligned);
+ else
+ return OPTIMIZE (aligned);
+}
diff --git a/sysdeps/loongarch/lp64/multiarch/memcpy-aligned.S b/sysdeps/loongarch/lp64/multiarch/memcpy-aligned.S
new file mode 100644
index 00000000..299dd49c
--- /dev/null
+++ b/sysdeps/loongarch/lp64/multiarch/memcpy-aligned.S
@@ -0,0 +1,783 @@
+/* Optimized memcpy_aligned implementation using basic Loongarch instructions.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#include <sys/regdef.h>
+#include <sys/asm.h>
+
+#if IS_IN (libc)
+# define MEMCPY_NAME __memcpy_aligned
+# define MEMMOVE_NAME __memmove_aligned
+#else
+# define MEMCPY_NAME memcpy
+# define MEMMOVE_NAME memmove
+#endif
+
+#define LD_64(reg, n) \
+ ld.d t0, reg, n; \
+ ld.d t1, reg, n + 8; \
+ ld.d t2, reg, n + 16; \
+ ld.d t3, reg, n + 24; \
+ ld.d t4, reg, n + 32; \
+ ld.d t5, reg, n + 40; \
+ ld.d t6, reg, n + 48; \
+ ld.d t7, reg, n + 56;
+
+#define ST_64(reg, n) \
+ st.d t0, reg, n; \
+ st.d t1, reg, n + 8; \
+ st.d t2, reg, n + 16; \
+ st.d t3, reg, n + 24; \
+ st.d t4, reg, n + 32; \
+ st.d t5, reg, n + 40; \
+ st.d t6, reg, n + 48; \
+ st.d t7, reg, n + 56;
+
+LEAF(MEMMOVE_NAME, 6)
+ sub.d t0, a0, a1
+ bltu t0, a2, L(copy_back)
+END(MEMMOVE_NAME)
+
+LEAF_NO_ALIGN(MEMCPY_NAME)
+ srai.d a3, a2, 4
+ beqz a3, L(short_data)
+
+ move a4, a0
+ andi a5, a0, 0x7
+ andi a6, a1, 0x7
+ li.d t8, 8
+ beqz a5, L(check_align)
+
+ sub.d t2, t8, a5
+ sub.d a2, a2, t2
+ pcaddi t1, 20
+ slli.d t3, t2, 3
+
+ add.d a1, a1, t2
+ sub.d t1, t1, t3
+ add.d a4, a4, t2
+ jr t1
+
+L(al7):
+ ld.b t0, a1, -7
+ st.b t0, a4, -7
+L(al6):
+ ld.b t0, a1, -6
+ st.b t0, a4, -6
+L(al5):
+ ld.b t0, a1, -5
+ st.b t0, a4, -5
+L(al4):
+ ld.b t0, a1, -4
+ st.b t0, a4, -4
+L(al3):
+ ld.b t0, a1, -3
+ st.b t0, a4, -3
+L(al2):
+ ld.b t0, a1, -2
+ st.b t0, a4, -2
+L(al1):
+ ld.b t0, a1, -1
+ st.b t0, a4, -1
+
+L(check_align):
+ bne a5, a6, L(unalign)
+ srai.d a3, a2, 4
+ beqz a3, L(al_less_16bytes)
+ andi a3, a2, 0x3f
+
+ beq a3, a2, L(al_less_64bytes)
+ sub.d t0, a2, a3
+ move a2, a3
+ add.d a5, a1, t0
+
+L(loop_64bytes):
+ LD_64(a1, 0)
+ addi.d a1, a1, 64
+ ST_64(a4, 0)
+
+ addi.d a4, a4, 64
+ bne a1, a5, L(loop_64bytes)
+
+L(al_less_64bytes):
+ srai.d a3, a2, 5
+ beqz a3, L(al_less_32bytes)
+
+ ld.d t0, a1, 0
+ ld.d t1, a1, 8
+ ld.d t2, a1, 16
+ ld.d t3, a1, 24
+
+ addi.d a1, a1, 32
+ addi.d a2, a2, -32
+
+ st.d t0, a4, 0
+ st.d t1, a4, 8
+ st.d t2, a4, 16
+ st.d t3, a4, 24
+
+ addi.d a4, a4, 32
+
+L(al_less_32bytes):
+ srai.d a3, a2, 4
+ beqz a3, L(al_less_16bytes)
+
+ ld.d t0, a1, 0
+ ld.d t1, a1, 8
+ addi.d a1, a1, 16
+ addi.d a2, a2, -16
+
+ st.d t0, a4, 0
+ st.d t1, a4, 8
+ addi.d a4, a4, 16
+
+L(al_less_16bytes):
+ srai.d a3, a2, 3
+ beqz a3, L(al_less_8bytes)
+
+ ld.d t0, a1, 0
+ addi.d a1, a1, 8
+ addi.d a2, a2, -8
+ st.d t0, a4, 0
+ addi.d a4, a4, 8
+
+L(al_less_8bytes):
+ srai.d a3, a2, 2
+ beqz a3, L(al_less_4bytes)
+
+ ld.w t0, a1, 0
+ addi.d a1, a1, 4
+ addi.d a2, a2, -4
+ st.w t0, a4, 0
+ addi.d a4, a4, 4
+
+L(al_less_4bytes):
+ srai.d a3, a2, 1
+ beqz a3, L(al_less_2bytes)
+
+ ld.h t0, a1, 0
+ addi.d a1, a1, 2
+ addi.d a2, a2, -2
+ st.h t0, a4, 0
+ addi.d a4, a4, 2
+
+L(al_less_2bytes):
+ beqz a2, L(al_less_1byte)
+
+ ld.b t0, a1, 0
+ st.b t0, a4, 0
+
+L(al_less_1byte):
+ jr ra
+
+L(unalign):
+ andi a5, a1, 0x7
+ bstrins.d a1, zero, 2, 0
+ sub.d t8, t8, a5
+ slli.d a5, a5, 3
+
+ ld.d t0, a1, 0
+ addi.d a1, a1, 8
+ slli.d a6, t8, 3
+ srl.d a7, t0, a5
+
+ srai.d a3, a2, 4
+ beqz a3, L(un_less_16bytes)
+ andi a3, a2, 0x3f
+ beq a3, a2, L(un_less_64bytes)
+
+ sub.d t0, a2, a3
+ move a2, a3
+ add.d a3, a1, t0
+
+L(un_long_bytes):
+ ld.d t0, a1, 0
+ ld.d t1, a1, 8
+ ld.d t2, a1, 16
+ ld.d t3, a1, 24
+
+ srl.d t4, t0, a5
+ sll.d t0, t0, a6
+ srl.d t5, t1, a5
+ sll.d t1, t1, a6
+
+ srl.d t6, t2, a5
+ sll.d t2, t2, a6
+ srl.d t7, t3, a5
+ sll.d t3, t3, a6
+
+ or t0, a7, t0
+ or t1, t4, t1
+ or t2, t5, t2
+ or t3, t6, t3
+
+ ld.d t4, a1, 32
+ ld.d t5, a1, 40
+ ld.d t6, a1, 48
+ ld.d a7, a1, 56
+
+ st.d t0, a4, 0
+ st.d t1, a4, 8
+ st.d t2, a4, 16
+ st.d t3, a4, 24
+
+ addi.d a1, a1, 64
+
+ srl.d t0, t4, a5
+ sll.d t4, t4, a6
+ srl.d t1, t5, a5
+ sll.d t5, t5, a6
+
+ srl.d t2, t6, a5
+ sll.d t6, t6, a6
+ sll.d t3, a7, a6
+ srl.d a7, a7, a5
+
+ or t4, t7, t4
+ or t5, t0, t5
+ or t6, t1, t6
+ or t3, t2, t3
+
+ st.d t4, a4, 32
+ st.d t5, a4, 40
+ st.d t6, a4, 48
+ st.d t3, a4, 56
+
+ addi.d a4, a4, 64
+ bne a3, a1, L(un_long_bytes)
+
+L(un_less_64bytes):
+ srai.d a3, a2, 5
+ beqz a3, L(un_less_32bytes)
+
+ ld.d t0, a1, 0
+ ld.d t1, a1, 8
+ ld.d t2, a1, 16
+ ld.d t3, a1, 24
+
+ addi.d a1, a1, 32
+ addi.d a2, a2, -32
+
+ srl.d t4, t0, a5
+ sll.d t0, t0, a6
+ srl.d t5, t1, a5
+ sll.d t1, t1, a6
+
+ srl.d t6, t2, a5
+ sll.d t2, t2, a6
+ or t0, a7, t0
+ srl.d a7, t3, a5
+ sll.d t3, t3, a6
+
+ or t1, t4, t1
+ or t2, t5, t2
+ or t3, t6, t3
+
+ st.d t0, a4, 0
+ st.d t1, a4, 8
+ st.d t2, a4, 16
+ st.d t3, a4, 24
+
+ addi.d a4, a4, 32
+
+L(un_less_32bytes):
+ srai.d a3, a2, 4
+ beqz a3, L(un_less_16bytes)
+
+ ld.d t0, a1, 0
+ ld.d t1, a1, 8
+ addi.d a1, a1, 16
+ addi.d a2, a2, -16
+
+ srl.d t2, t0, a5
+ sll.d t3, t0, a6
+ sll.d t4, t1, a6
+ or t3, a7, t3
+ or t4, t2, t4
+
+ srl.d a7, t1, a5
+ st.d t3, a4, 0
+ st.d t4, a4, 8
+ addi.d a4, a4, 16
+
+L(un_less_16bytes):
+ srai.d a3, a2, 3
+ beqz a3, L(un_less_8bytes)
+
+ ld.d t0, a1, 0
+ addi.d a1, a1, 8
+ addi.d a2, a2, -8
+ sll.d t1, t0, a6
+
+ or t2, a7, t1
+ srl.d a7, t0, a5
+ st.d t2, a4, 0
+ addi.d a4, a4, 8
+
+L(un_less_8bytes):
+ beqz a2, L(un_less_1byte)
+ bge t8, a2, 1f
+
+ ld.d t0, a1, 0
+ sll.d t0, t0, a6
+ or a7, a7, t0
+
+1:
+ srai.d a3, a2, 2
+ beqz a3, L(un_less_4bytes)
+
+ addi.d a2, a2, -4
+ st.w a7, a4, 0
+ addi.d a4, a4, 4
+ srai.d a7, a7, 32
+
+L(un_less_4bytes):
+ srai.d a3, a2, 1
+ beqz a3, L(un_less_2bytes)
+
+ addi.d a2, a2, -2
+ st.h a7, a4, 0
+ addi.d a4, a4, 2
+ srai.d a7, a7, 16
+
+L(un_less_2bytes):
+ beqz a2, L(un_less_1byte)
+ st.b a7, a4, 0
+
+L(un_less_1byte):
+ jr ra
+
+L(short_data):
+ pcaddi t1, 36
+ slli.d t2, a2, 3
+ add.d a4, a0, a2
+ sub.d t1, t1, t2
+ add.d a1, a1, a2
+ jr t1
+
+L(short_15_bytes):
+ ld.b t0, a1, -15
+ st.b t0, a4, -15
+L(short_14_bytes):
+ ld.b t0, a1, -14
+ st.b t0, a4, -14
+L(short_13_bytes):
+ ld.b t0, a1, -13
+ st.b t0, a4, -13
+L(short_12_bytes):
+ ld.b t0, a1, -12
+ st.b t0, a4, -12
+L(short_11_bytes):
+ ld.b t0, a1, -11
+ st.b t0, a4, -11
+L(short_10_bytes):
+ ld.b t0, a1, -10
+ st.b t0, a4, -10
+L(short_9_bytes):
+ ld.b t0, a1, -9
+ st.b t0, a4, -9
+L(short_8_bytes):
+ ld.b t0, a1, -8
+ st.b t0, a4, -8
+L(short_7_bytes):
+ ld.b t0, a1, -7
+ st.b t0, a4, -7
+L(short_6_bytes):
+ ld.b t0, a1, -6
+ st.b t0, a4, -6
+L(short_5_bytes):
+ ld.b t0, a1, -5
+ st.b t0, a4, -5
+L(short_4_bytes):
+ ld.b t0, a1, -4
+ st.b t0, a4, -4
+L(short_3_bytes):
+ ld.b t0, a1, -3
+ st.b t0, a4, -3
+L(short_2_bytes):
+ ld.b t0, a1, -2
+ st.b t0, a4, -2
+L(short_1_bytes):
+ ld.b t0, a1, -1
+ st.b t0, a4, -1
+ jr ra
+
+L(copy_back):
+ srai.d a3, a2, 4
+ beqz a3, L(back_short_data)
+
+ add.d a4, a0, a2
+ add.d a1, a1, a2
+
+ andi a5, a4, 0x7
+ andi a6, a1, 0x7
+ beqz a5, L(back_check_align)
+
+ sub.d a2, a2, a5
+ sub.d a1, a1, a5
+ sub.d a4, a4, a5
+
+ pcaddi t1, 18
+ slli.d t3, a5, 3
+ sub.d t1, t1, t3
+ jr t1
+
+ ld.b t0, a1, 6
+ st.b t0, a4, 6
+ ld.b t0, a1, 5
+ st.b t0, a4, 5
+ ld.b t0, a1, 4
+ st.b t0, a4, 4
+ ld.b t0, a1, 3
+ st.b t0, a4, 3
+ ld.b t0, a1, 2
+ st.b t0, a4, 2
+ ld.b t0, a1, 1
+ st.b t0, a4, 1
+ ld.b t0, a1, 0
+ st.b t0, a4, 0
+
+L(back_check_align):
+ bne a5, a6, L(back_unalign)
+
+ srai.d a3, a2, 4
+ beqz a3, L(back_less_16bytes)
+
+ andi a3, a2, 0x3f
+ beq a3, a2, L(back_less_64bytes)
+
+ sub.d t0, a2, a3
+ move a2, a3
+ sub.d a5, a1, t0
+
+L(back_loop_64bytes):
+ LD_64(a1, -64)
+ addi.d a1, a1, -64
+ ST_64(a4, -64)
+
+ addi.d a4, a4, -64
+ bne a1, a5, L(back_loop_64bytes)
+
+L(back_less_64bytes):
+ srai.d a3, a2, 5
+ beqz a3, L(back_less_32bytes)
+
+ ld.d t0, a1, -32
+ ld.d t1, a1, -24
+ ld.d t2, a1, -16
+ ld.d t3, a1, -8
+
+ addi.d a1, a1, -32
+ addi.d a2, a2, -32
+
+ st.d t0, a4, -32
+ st.d t1, a4, -24
+ st.d t2, a4, -16
+ st.d t3, a4, -8
+
+ addi.d a4, a4, -32
+
+L(back_less_32bytes):
+ srai.d a3, a2, 4
+ beqz a3, L(back_less_16bytes)
+
+ ld.d t0, a1, -16
+ ld.d t1, a1, -8
+
+ addi.d a2, a2, -16
+ addi.d a1, a1, -16
+
+ st.d t0, a4, -16
+ st.d t1, a4, -8
+ addi.d a4, a4, -16
+
+L(back_less_16bytes):
+ srai.d a3, a2, 3
+ beqz a3, L(back_less_8bytes)
+
+ ld.d t0, a1, -8
+ addi.d a2, a2, -8
+ addi.d a1, a1, -8
+
+ st.d t0, a4, -8
+ addi.d a4, a4, -8
+
+L(back_less_8bytes):
+ srai.d a3, a2, 2
+ beqz a3, L(back_less_4bytes)
+
+ ld.w t0, a1, -4
+ addi.d a2, a2, -4
+ addi.d a1, a1, -4
+
+ st.w t0, a4, -4
+ addi.d a4, a4, -4
+
+L(back_less_4bytes):
+ srai.d a3, a2, 1
+ beqz a3, L(back_less_2bytes)
+
+ ld.h t0, a1, -2
+ addi.d a2, a2, -2
+ addi.d a1, a1, -2
+
+ st.h t0, a4, -2
+ addi.d a4, a4, -2
+
+L(back_less_2bytes):
+ beqz a2, L(back_less_1byte)
+
+ ld.b t0, a1, -1
+ st.b t0, a4, -1
+
+L(back_less_1byte):
+ jr ra
+
+L(back_unalign):
+ andi t8, a1, 0x7
+ bstrins.d a1, zero, 2, 0
+
+ sub.d a6, zero, t8
+
+ ld.d t0, a1, 0
+ slli.d a6, a6, 3
+ slli.d a5, t8, 3
+ sll.d a7, t0, a6
+
+ srai.d a3, a2, 4
+ beqz a3, L(back_un_less_16bytes)
+
+ andi a3, a2, 0x3f
+ beq a3, a2, L(back_un_less_64bytes)
+
+ sub.d t0, a2, a3
+ move a2, a3
+ sub.d a3, a1, t0
+
+L(back_un_long_bytes):
+ ld.d t0, a1, -8
+ ld.d t1, a1, -16
+ ld.d t2, a1, -24
+ ld.d t3, a1, -32
+
+ sll.d t4, t0, a6
+ srl.d t0, t0, a5
+
+ sll.d t5, t1, a6
+ srl.d t1, t1, a5
+
+ sll.d t6, t2, a6
+ srl.d t2, t2, a5
+
+ sll.d t7, t3, a6
+ srl.d t3, t3, a5
+
+ or t0, t0, a7
+ or t1, t1, t4
+ or t2, t2, t5
+ or t3, t3, t6
+
+ ld.d t4, a1, -40
+ ld.d t5, a1, -48
+ ld.d t6, a1, -56
+ ld.d a7, a1, -64
+ st.d t0, a4, -8
+ st.d t1, a4, -16
+ st.d t2, a4, -24
+ st.d t3, a4, -32
+
+ addi.d a1, a1, -64
+
+ sll.d t0, t4, a6
+ srl.d t4, t4, a5
+
+ sll.d t1, t5, a6
+ srl.d t5, t5, a5
+
+ sll.d t2, t6, a6
+ srl.d t6, t6, a5
+
+ srl.d t3, a7, a5
+ sll.d a7, a7, a6
+
+ or t4, t7, t4
+ or t5, t0, t5
+ or t6, t1, t6
+ or t3, t2, t3
+
+ st.d t4, a4, -40
+ st.d t5, a4, -48
+ st.d t6, a4, -56
+ st.d t3, a4, -64
+
+ addi.d a4, a4, -64
+ bne a3, a1, L(back_un_long_bytes)
+
+L(back_un_less_64bytes):
+ srai.d a3, a2, 5
+ beqz a3, L(back_un_less_32bytes)
+
+ ld.d t0, a1, -8
+ ld.d t1, a1, -16
+ ld.d t2, a1, -24
+ ld.d t3, a1, -32
+
+ addi.d a1, a1, -32
+ addi.d a2, a2, -32
+
+ sll.d t4, t0, a6
+ srl.d t0, t0, a5
+
+ sll.d t5, t1, a6
+ srl.d t1, t1, a5
+
+ sll.d t6, t2, a6
+ srl.d t2, t2, a5
+
+ or t0, a7, t0
+
+ sll.d a7, t3, a6
+ srl.d t3, t3, a5
+
+ or t1, t4, t1
+ or t2, t5, t2
+ or t3, t6, t3
+
+ st.d t0, a4, -8
+ st.d t1, a4, -16
+ st.d t2, a4, -24
+ st.d t3, a4, -32
+
+ addi.d a4, a4, -32
+
+L(back_un_less_32bytes):
+ srai.d a3, a2, 4
+ beqz a3, L(back_un_less_16bytes)
+
+ ld.d t0, a1, -8
+ ld.d t1, a1, -16
+
+ addi.d a1, a1, -16
+ addi.d a2, a2, -16
+
+ sll.d t2, t0, a6
+ srl.d t3, t0, a5
+
+ srl.d t4, t1, a5
+ or t3, a7, t3
+ or t4, t2, t4
+ sll.d a7, t1, a6
+
+ st.d t3, a4, -8
+ st.d t4, a4, -16
+
+ addi.d a4, a4, -16
+
+L(back_un_less_16bytes):
+ srai.d a3, a2, 3
+ beqz a3, L(back_un_less_8bytes)
+
+ ld.d t0, a1, -8
+
+ addi.d a1, a1, -8
+ addi.d a2, a2, -8
+
+ srl.d t1, t0, a5
+ or t2, a7, t1
+ sll.d a7, t0, a6
+
+ st.d t2, a4, -8
+ addi.d a4, a4, -8
+
+L(back_un_less_8bytes):
+ beqz a2, L(back_end)
+ bge t8, a2, 1f
+
+ ld.d t0, a1, -8
+ srl.d t0, t0, a5
+ or a7, a7, t0
+
+1:
+ srai.d a3, a2, 2
+ beqz a3, L(back_un_less_4bytes)
+
+ srai.d t0, a7, 32
+ addi.d a2, a2, -4
+ st.w t0, a4, -4
+ addi.d a4, a4, -4
+ slli.d a7, a7, 32
+
+L(back_un_less_4bytes):
+ srai.d a3, a2, 1
+ beqz a3, L(back_un_less_2bytes)
+ srai.d t0, a7, 48
+ addi.d a2, a2, -2
+ st.h t0, a4, -2
+ addi.d a4, a4, -2
+ slli.d a7, a7, 16
+L(back_un_less_2bytes):
+ beqz a2, L(back_un_less_1byte)
+ srai.d t0, a7, 56
+ st.b t0, a4, -1
+L(back_un_less_1byte):
+ jr ra
+
+L(back_short_data):
+ pcaddi t1, 34
+ slli.d t2, a2, 3
+ sub.d t1, t1, t2
+ jr t1
+
+ ld.b t0, a1, 14
+ st.b t0, a0, 14
+ ld.b t0, a1, 13
+ st.b t0, a0, 13
+ ld.b t0, a1, 12
+ st.b t0, a0, 12
+ ld.b t0, a1, 11
+ st.b t0, a0, 11
+ ld.b t0, a1, 10
+ st.b t0, a0, 10
+ ld.b t0, a1, 9
+ st.b t0, a0, 9
+ ld.b t0, a1, 8
+ st.b t0, a0, 8
+ ld.b t0, a1, 7
+ st.b t0, a0, 7
+ ld.b t0, a1, 6
+ st.b t0, a0, 6
+ ld.b t0, a1, 5
+ st.b t0, a0, 5
+ ld.b t0, a1, 4
+ st.b t0, a0, 4
+ ld.b t0, a1, 3
+ st.b t0, a0, 3
+ ld.b t0, a1, 2
+ st.b t0, a0, 2
+ ld.b t0, a1, 1
+ st.b t0, a0, 1
+ ld.b t0, a1, 0
+ st.b t0, a0, 0
+L(back_end):
+ jr ra
+
+END(MEMCPY_NAME)
+
+libc_hidden_builtin_def (MEMMOVE_NAME)
+libc_hidden_builtin_def (MEMCPY_NAME)
diff --git a/sysdeps/loongarch/lp64/multiarch/memcpy-lasx.S b/sysdeps/loongarch/lp64/multiarch/memcpy-lasx.S
new file mode 100644
index 00000000..4aae5bf8
--- /dev/null
+++ b/sysdeps/loongarch/lp64/multiarch/memcpy-lasx.S
@@ -0,0 +1,20 @@
+/* Optimized memcpy implementation using Loongarch LASX instructions.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <https://www.gnu.org/licenses/>. */
+
+/* memcpy is part of memmove.S */
diff --git a/sysdeps/loongarch/lp64/multiarch/memcpy-lsx.S b/sysdeps/loongarch/lp64/multiarch/memcpy-lsx.S
new file mode 100644
index 00000000..6ebbe7a2
--- /dev/null
+++ b/sysdeps/loongarch/lp64/multiarch/memcpy-lsx.S
@@ -0,0 +1,20 @@
+/* Optimized memcpy implementation using Loongarch LSX instructions.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <https://www.gnu.org/licenses/>. */
+
+/* memcpy is part of memmove.S */
diff --git a/sysdeps/loongarch/lp64/multiarch/memcpy-unaligned.S b/sysdeps/loongarch/lp64/multiarch/memcpy-unaligned.S
new file mode 100644
index 00000000..8e60a22d
--- /dev/null
+++ b/sysdeps/loongarch/lp64/multiarch/memcpy-unaligned.S
@@ -0,0 +1,247 @@
+/* Optimized unaligned memcpy implementation using basic Loongarch instructions.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#include <sys/regdef.h>
+#include <sys/asm.h>
+
+#if IS_IN (libc)
+
+# define MEMCPY_NAME __memcpy_unaligned
+
+# define LD_64(reg, n) \
+ ld.d t0, reg, n; \
+ ld.d t1, reg, n + 8; \
+ ld.d t2, reg, n + 16; \
+ ld.d t3, reg, n + 24; \
+ ld.d t4, reg, n + 32; \
+ ld.d t5, reg, n + 40; \
+ ld.d t6, reg, n + 48; \
+ ld.d t7, reg, n + 56;
+
+# define ST_64(reg, n) \
+ st.d t0, reg, n; \
+ st.d t1, reg, n + 8; \
+ st.d t2, reg, n + 16; \
+ st.d t3, reg, n + 24; \
+ st.d t4, reg, n + 32; \
+ st.d t5, reg, n + 40; \
+ st.d t6, reg, n + 48; \
+ st.d t7, reg, n + 56;
+
+LEAF(MEMCPY_NAME, 3)
+ add.d a4, a1, a2
+ add.d a3, a0, a2
+ li.w a6, 16
+ bge a6, a2, L(less_16bytes)
+
+ li.w a6, 128
+ blt a6, a2, L(long_bytes)
+ li.w a6, 64
+ blt a6, a2, L(more_64bytes)
+
+ li.w a6, 32
+ blt a6, a2, L(more_32bytes)
+
+ ld.d t0, a1, 0
+ ld.d t1, a1, 8
+ ld.d t2, a4, -16
+ ld.d t3, a4, -8
+
+ st.d t0, a0, 0
+ st.d t1, a0, 8
+ st.d t2, a3, -16
+ st.d t3, a3, -8
+ jr ra
+
+L(more_64bytes):
+ srli.d t8, a0, 3
+ slli.d t8, t8, 3
+ addi.d t8, t8, 0x8
+ sub.d a7, a0, t8
+
+ ld.d t0, a1, 0
+ sub.d a1, a1, a7
+ st.d t0, a0, 0
+ add.d a7, a7, a2
+ addi.d a7, a7, -0x20
+
+L(loop_32):
+ ld.d t0, a1, 0
+ ld.d t1, a1, 8
+ ld.d t2, a1, 16
+ ld.d t3, a1, 24
+
+ st.d t0, t8, 0
+ st.d t1, t8, 8
+ st.d t2, t8, 16
+ st.d t3, t8, 24
+
+ addi.d t8, t8, 0x20
+ addi.d a1, a1, 0x20
+ addi.d a7, a7, -0x20
+ blt zero, a7, L(loop_32)
+
+ ld.d t4, a4, -32
+ ld.d t5, a4, -24
+ ld.d t6, a4, -16
+ ld.d t7, a4, -8
+
+ st.d t4, a3, -32
+ st.d t5, a3, -24
+ st.d t6, a3, -16
+ st.d t7, a3, -8
+
+ jr ra
+
+L(more_32bytes):
+ ld.d t0, a1, 0
+ ld.d t1, a1, 8
+ ld.d t2, a1, 16
+ ld.d t3, a1, 24
+
+ ld.d t4, a4, -32
+ ld.d t5, a4, -24
+ ld.d t6, a4, -16
+ ld.d t7, a4, -8
+
+ st.d t0, a0, 0
+ st.d t1, a0, 8
+ st.d t2, a0, 16
+ st.d t3, a0, 24
+
+ st.d t4, a3, -32
+ st.d t5, a3, -24
+ st.d t6, a3, -16
+ st.d t7, a3, -8
+
+ jr ra
+
+L(less_16bytes):
+ srai.d a6, a2, 3
+ beqz a6, L(less_8bytes)
+
+ ld.d t0, a1, 0
+ ld.d t1, a4, -8
+ st.d t0, a0, 0
+ st.d t1, a3, -8
+
+ jr ra
+
+L(less_8bytes):
+ srai.d a6, a2, 2
+ beqz a6, L(less_4bytes)
+
+ ld.w t0, a1, 0
+ ld.w t1, a4, -4
+ st.w t0, a0, 0
+ st.w t1, a3, -4
+
+ jr ra
+
+L(less_4bytes):
+ srai.d a6, a2, 1
+ beqz a6, L(less_2bytes)
+
+ ld.h t0, a1, 0
+ ld.h t1, a4, -2
+ st.h t0, a0, 0
+ st.h t1, a3, -2
+
+ jr ra
+
+L(less_2bytes):
+ beqz a2, L(less_1bytes)
+
+ ld.b t0, a1, 0
+ st.b t0, a0, 0
+ jr ra
+
+L(less_1bytes):
+ jr ra
+
+L(long_bytes):
+ srli.d t8, a0, 3
+ slli.d t8, t8, 3
+ beq a0, t8, L(start)
+ ld.d t0, a1, 0
+
+ addi.d t8, t8, 0x8
+ st.d t0, a0, 0
+ sub.d a7, a0, t8
+ sub.d a1, a1, a7
+
+L(start):
+ addi.d a5, a3, -0x80
+ blt a5, t8, L(align_end_proc)
+
+L(loop_128):
+ LD_64(a1, 0)
+ ST_64(t8, 0)
+ LD_64(a1, 64)
+ addi.d a1, a1, 0x80
+ ST_64(t8, 64)
+ addi.d t8, t8, 0x80
+ bge a5, t8, L(loop_128)
+
+L(align_end_proc):
+ sub.d a2, a3, t8
+ pcaddi t1, 34
+ andi t2, a2, 0x78
+ sub.d t1, t1, t2
+ jr t1
+
+ ld.d t0, a1, 112
+ st.d t0, t8, 112
+ ld.d t0, a1, 104
+ st.d t0, t8, 104
+ ld.d t0, a1, 96
+ st.d t0, t8, 96
+ ld.d t0, a1, 88
+ st.d t0, t8, 88
+ ld.d t0, a1, 80
+ st.d t0, t8, 80
+ ld.d t0, a1, 72
+ st.d t0, t8, 72
+ ld.d t0, a1, 64
+ st.d t0, t8, 64
+ ld.d t0, a1, 56
+ st.d t0, t8, 56
+ ld.d t0, a1, 48
+ st.d t0, t8, 48
+ ld.d t0, a1, 40
+ st.d t0, t8, 40
+ ld.d t0, a1, 32
+ st.d t0, t8, 32
+ ld.d t0, a1, 24
+ st.d t0, t8, 24
+ ld.d t0, a1, 16
+ st.d t0, t8, 16
+ ld.d t0, a1, 8
+ st.d t0, t8, 8
+ ld.d t0, a1, 0
+ st.d t0, t8, 0
+ ld.d t0, a4, -8
+ st.d t0, a3, -8
+
+ jr ra
+END(MEMCPY_NAME)
+
+libc_hidden_builtin_def (MEMCPY_NAME)
+#endif
diff --git a/sysdeps/loongarch/lp64/multiarch/memcpy.c b/sysdeps/loongarch/lp64/multiarch/memcpy.c
new file mode 100644
index 00000000..93b238ce
--- /dev/null
+++ b/sysdeps/loongarch/lp64/multiarch/memcpy.c
@@ -0,0 +1,37 @@
+/* Multiple versions of memcpy.
+ All versions must be listed in ifunc-impl-list.c.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+/* Define multiple versions only for the definition in libc. */
+#if IS_IN (libc)
+# define memcpy __redirect_memcpy
+# include <string.h>
+# undef memcpy
+
+# define SYMBOL_NAME memcpy
+# include "ifunc-lasx.h"
+
+libc_ifunc_redirected (__redirect_memcpy, memcpy,
+ IFUNC_SELECTOR ());
+
+# ifdef SHARED
+__hidden_ver1 (memcpy, __GI_memcpy, __redirect_memcpy)
+ __attribute__ ((visibility ("hidden"))) __attribute_copy__ (memcmp);
+# endif
+
+#endif
diff --git a/sysdeps/loongarch/lp64/multiarch/memmove-aligned.S b/sysdeps/loongarch/lp64/multiarch/memmove-aligned.S
new file mode 100644
index 00000000..5354f383
--- /dev/null
+++ b/sysdeps/loongarch/lp64/multiarch/memmove-aligned.S
@@ -0,0 +1,20 @@
+/* Optimized memmove_aligned implementation using basic Loongarch instructions.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <https://www.gnu.org/licenses/>. */
+
+/* memmove_aligned is part of memcpy_aligned, see memcpy-aligned.S. */
diff --git a/sysdeps/loongarch/lp64/multiarch/memmove-lasx.S b/sysdeps/loongarch/lp64/multiarch/memmove-lasx.S
new file mode 100644
index 00000000..ff68e7a2
--- /dev/null
+++ b/sysdeps/loongarch/lp64/multiarch/memmove-lasx.S
@@ -0,0 +1,287 @@
+/* Optimized memmove implementation using Loongarch LASX instructions.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#include <sys/regdef.h>
+#include <sys/asm.h>
+
+#if IS_IN (libc) && !defined __loongarch_soft_float
+
+#ifndef MEMCPY_NAME
+# define MEMCPY_NAME __memcpy_lasx
+#endif
+
+#ifndef MEMMOVE_NAME
+# define MEMMOVE_NAME __memmove_lasx
+#endif
+
+LEAF(MEMCPY_NAME, 6)
+ li.d t0, 32
+ add.d a3, a0, a2
+ add.d a4, a1, a2
+ bgeu t0, a2, L(less_32bytes)
+
+ li.d t1, 64
+ bltu t1, a2, L(copy_long)
+ xvld xr0, a1, 0
+ xvld xr1, a4, -32
+
+ xvst xr0, a0, 0
+ xvst xr1, a3, -32
+ jr ra
+L(less_32bytes):
+ srli.d t0, a2, 4
+
+ beqz t0, L(less_16bytes)
+ vld vr0, a1, 0
+ vld vr1, a4, -16
+ vst vr0, a0, 0
+
+
+ vst vr1, a3, -16
+ jr ra
+L(less_16bytes):
+ srli.d t0, a2, 3
+ beqz t0, L(less_8bytes)
+
+ ld.d t0, a1, 0
+ ld.d t1, a4, -8
+ st.d t0, a0, 0
+ st.d t1, a3, -8
+
+ jr ra
+L(less_8bytes):
+ srli.d t0, a2, 2
+ beqz t0, L(less_4bytes)
+ ld.w t0, a1, 0
+
+ ld.w t1, a4, -4
+ st.w t0, a0, 0
+ st.w t1, a3, -4
+ jr ra
+
+
+L(less_4bytes):
+ srli.d t0, a2, 1
+ beqz t0, L(less_2bytes)
+ ld.h t0, a1, 0
+ ld.h t1, a4, -2
+
+ st.h t0, a0, 0
+ st.h t1, a3, -2
+ jr ra
+L(less_2bytes):
+ beqz a2, L(less_1bytes)
+
+ ld.b t0, a1, 0
+ st.b t0, a0, 0
+L(less_1bytes):
+ jr ra
+END(MEMCPY_NAME)
+
+LEAF(MEMMOVE_NAME, 6)
+
+ li.d t0, 32
+ add.d a3, a0, a2
+ add.d a4, a1, a2
+ bgeu t0, a2, L(less_32bytes)
+
+ li.d t1, 64
+ bltu t1, a2, L(move_long)
+ xvld xr0, a1, 0
+ xvld xr1, a4, -32
+
+ xvst xr0, a0, 0
+ xvst xr1, a3, -32
+ jr ra
+L(move_long):
+ sub.d t2, a0, a1
+
+ bltu t2, a2, L(copy_back)
+L(copy_long):
+ andi t2, a0, 0x1f
+ addi.d a2, a2, -1
+ sub.d t2, t0, t2
+
+
+ xvld xr8, a1, 0
+ xvld xr9, a4, -32
+ sub.d t3, a2, t2
+ add.d a5, a0, t2
+
+ andi a2, t3, 0xff
+ add.d a1, a1, t2
+ beq a2, t3, L(lt256)
+ sub.d a6, a4, a2
+
+ addi.d a6, a6, -1
+L(loop_256):
+ xvld xr0, a1, 0
+ xvld xr1, a1, 32
+ xvld xr2, a1, 64
+
+ xvld xr3, a1, 96
+ xvld xr4, a1, 128
+ xvld xr5, a1, 160
+ xvld xr6, a1, 192
+
+
+ xvld xr7, a1, 224
+ addi.d a1, a1, 256
+ xvst xr0, a5, 0
+ xvst xr1, a5, 32
+
+ xvst xr2, a5, 64
+ xvst xr3, a5, 96
+ xvst xr4, a5, 128
+ xvst xr5, a5, 160
+
+ xvst xr6, a5, 192
+ xvst xr7, a5, 224
+ addi.d a5, a5, 256
+ bne a1, a6, L(loop_256)
+
+L(lt256):
+ srli.d t2, a2, 7
+ beqz t2, L(lt128)
+ xvld xr0, a1, 0
+ xvld xr1, a1, 32
+
+
+ xvld xr2, a1, 64
+ xvld xr3, a1, 96
+ addi.d a1, a1, 128
+ addi.d a2, a2, -128
+
+ xvst xr0, a5, 0
+ xvst xr1, a5, 32
+ xvst xr2, a5, 64
+ xvst xr3, a5, 96
+
+ addi.d a5, a5, 128
+L(lt128):
+ bltu a2, t1, L(lt64)
+ xvld xr0, a1, 0
+ xvld xr1, a1, 32
+
+ addi.d a1, a1, 64
+ addi.d a2, a2, -64
+ xvst xr0, a5, 0
+ xvst xr1, a5, 32
+
+
+ addi.d a5, a5, 64
+L(lt64):
+ bltu a2, t0, L(lt32)
+ xvld xr0, a1, 0
+ xvst xr0, a5, 0
+
+L(lt32):
+ xvst xr8, a0, 0
+ xvst xr9, a3, -32
+ jr ra
+ nop
+
+L(copy_back):
+ addi.d a3, a3, -1
+ addi.d a2, a2, -2
+ andi t2, a3, 0x1f
+ xvld xr8, a1, 0
+
+ xvld xr9, a4, -32
+ sub.d t3, a2, t2
+ sub.d a5, a3, t2
+ sub.d a4, a4, t2
+
+
+ andi a2, t3, 0xff
+ beq a2, t3, L(back_lt256)
+ add.d a6, a1, a2
+ addi.d a6, a6, 2
+
+L(back_loop_256):
+ xvld xr0, a4, -33
+ xvld xr1, a4, -65
+ xvld xr2, a4, -97
+ xvld xr3, a4, -129
+
+ xvld xr4, a4, -161
+ xvld xr5, a4, -193
+ xvld xr6, a4, -225
+ xvld xr7, a4, -257
+
+ addi.d a4, a4, -256
+ xvst xr0, a5, -32
+ xvst xr1, a5, -64
+ xvst xr2, a5, -96
+
+
+ xvst xr3, a5, -128
+ xvst xr4, a5, -160
+ xvst xr5, a5, -192
+ xvst xr6, a5, -224
+
+ xvst xr7, a5, -256
+ addi.d a5, a5, -256
+ bne a4, a6, L(back_loop_256)
+L(back_lt256):
+ srli.d t2, a2, 7
+
+ beqz t2, L(back_lt128)
+ xvld xr0, a4, -33
+ xvld xr1, a4, -65
+ xvld xr2, a4, -97
+
+ xvld xr3, a4, -129
+ addi.d a2, a2, -128
+ addi.d a4, a4, -128
+ xvst xr0, a5, -32
+
+
+ xvst xr1, a5, -64
+ xvst xr2, a5, -96
+ xvst xr3, a5, -128
+ addi.d a5, a5, -128
+
+L(back_lt128):
+ blt a2, t1, L(back_lt64)
+ xvld xr0, a4, -33
+ xvld xr1, a4, -65
+ addi.d a2, a2, -64
+
+ addi.d a4, a4, -64
+ xvst xr0, a5, -32
+ xvst xr1, a5, -64
+ addi.d a5, a5, -64
+
+L(back_lt64):
+ bltu a2, t0, L(back_lt32)
+ xvld xr0, a4, -33
+ xvst xr0, a5, -32
+L(back_lt32):
+ xvst xr8, a0, 0
+
+
+ xvst xr9, a3, -31
+ jr ra
+END(MEMMOVE_NAME)
+
+libc_hidden_builtin_def (MEMCPY_NAME)
+libc_hidden_builtin_def (MEMMOVE_NAME)
+#endif
diff --git a/sysdeps/loongarch/lp64/multiarch/memmove-lsx.S b/sysdeps/loongarch/lp64/multiarch/memmove-lsx.S
new file mode 100644
index 00000000..9e1502a7
--- /dev/null
+++ b/sysdeps/loongarch/lp64/multiarch/memmove-lsx.S
@@ -0,0 +1,534 @@
+/* Optimized memmove implementation using Loongarch LSX instructions.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#include <sys/regdef.h>
+#include <sys/asm.h>
+
+#if IS_IN (libc) && !defined __loongarch_soft_float
+
+# define MEMCPY_NAME __memcpy_lsx
+# define MEMMOVE_NAME __memmove_lsx
+
+LEAF(MEMCPY_NAME, 6)
+ li.d t6, 16
+ add.d a3, a0, a2
+ add.d a4, a1, a2
+ bgeu t6, a2, L(less_16bytes)
+
+ li.d t8, 64
+ li.d t7, 32
+ bltu t8, a2, L(copy_long)
+ bltu t7, a2, L(more_32bytes)
+
+ vld vr0, a1, 0
+ vld vr1, a4, -16
+ vst vr0, a0, 0
+ vst vr1, a3, -16
+
+ jr ra
+L(more_32bytes):
+ vld vr0, a1, 0
+ vld vr1, a1, 16
+ vld vr2, a4, -32
+
+
+ vld vr3, a4, -16
+ vst vr0, a0, 0
+ vst vr1, a0, 16
+ vst vr2, a3, -32
+
+ vst vr3, a3, -16
+ jr ra
+L(less_16bytes):
+ srli.d t0, a2, 3
+ beqz t0, L(less_8bytes)
+
+ vldrepl.d vr0, a1, 0
+ vldrepl.d vr1, a4, -8
+ vstelm.d vr0, a0, 0, 0
+ vstelm.d vr1, a3, -8, 0
+
+ jr ra
+L(less_8bytes):
+ srli.d t0, a2, 2
+ beqz t0, L(less_4bytes)
+ vldrepl.w vr0, a1, 0
+
+
+ vldrepl.w vr1, a4, -4
+ vstelm.w vr0, a0, 0, 0
+ vstelm.w vr1, a3, -4, 0
+ jr ra
+
+L(less_4bytes):
+ srli.d t0, a2, 1
+ beqz t0, L(less_2bytes)
+ vldrepl.h vr0, a1, 0
+ vldrepl.h vr1, a4, -2
+
+ vstelm.h vr0, a0, 0, 0
+ vstelm.h vr1, a3, -2, 0
+ jr ra
+L(less_2bytes):
+ beqz a2, L(less_1bytes)
+
+ ld.b t0, a1, 0
+ st.b t0, a0, 0
+L(less_1bytes):
+ jr ra
+ nop
+END(MEMCPY_NAME)
+
+LEAF(MEMMOVE_NAME, 6)
+ li.d t6, 16
+ add.d a3, a0, a2
+ add.d a4, a1, a2
+ bgeu t6, a2, L(less_16bytes)
+
+ li.d t8, 64
+ li.d t7, 32
+ bltu t8, a2, L(move_long)
+ bltu t7, a2, L(more_32bytes)
+
+ vld vr0, a1, 0
+ vld vr1, a4, -16
+ vst vr0, a0, 0
+ vst vr1, a3, -16
+
+ jr ra
+ nop
+L(move_long):
+ sub.d t0, a0, a1
+ bltu t0, a2, L(copy_back)
+
+
+L(copy_long):
+ vld vr2, a1, 0
+ andi t0, a0, 0xf
+ sub.d t0, t6, t0
+ add.d a1, a1, t0
+
+ sub.d a2, a2, t0
+ andi t1, a1, 0xf
+ bnez t1, L(unaligned)
+ vld vr0, a1, 0
+
+ addi.d a2, a2, -16
+ vst vr2, a0, 0
+ andi t2, a2, 0x7f
+ add.d a5, a0, t0
+
+ beq a2, t2, L(al_less_128)
+ sub.d t3, a2, t2
+ move a2, t2
+ add.d a6, a1, t3
+
+
+L(al_loop):
+ vld vr1, a1, 16
+ vld vr2, a1, 32
+ vld vr3, a1, 48
+ vld vr4, a1, 64
+
+ vld vr5, a1, 80
+ vld vr6, a1, 96
+ vld vr7, a1, 112
+ vst vr0, a5, 0
+
+ vld vr0, a1, 128
+ addi.d a1, a1, 128
+ vst vr1, a5, 16
+ vst vr2, a5, 32
+
+ vst vr3, a5, 48
+ vst vr4, a5, 64
+ vst vr5, a5, 80
+ vst vr6, a5, 96
+
+
+ vst vr7, a5, 112
+ addi.d a5, a5, 128
+ bne a1, a6, L(al_loop)
+L(al_less_128):
+ blt a2, t8, L(al_less_64)
+
+ vld vr1, a1, 16
+ vld vr2, a1, 32
+ vld vr3, a1, 48
+ addi.d a2, a2, -64
+
+ vst vr0, a5, 0
+ vld vr0, a1, 64
+ addi.d a1, a1, 64
+ vst vr1, a5, 16
+
+ vst vr2, a5, 32
+ vst vr3, a5, 48
+ addi.d a5, a5, 64
+L(al_less_64):
+ blt a2, t7, L(al_less_32)
+
+
+ vld vr1, a1, 16
+ addi.d a2, a2, -32
+ vst vr0, a5, 0
+ vld vr0, a1, 32
+
+ addi.d a1, a1, 32
+ vst vr1, a5, 16
+ addi.d a5, a5, 32
+L(al_less_32):
+ blt a2, t6, L(al_less_16)
+
+ vst vr0, a5, 0
+ vld vr0, a1, 16
+ addi.d a5, a5, 16
+L(al_less_16):
+ vld vr1, a4, -16
+
+ vst vr0, a5, 0
+ vst vr1, a3, -16
+ jr ra
+ nop
+
+
+L(magic_num):
+ .dword 0x0706050403020100
+ .dword 0x0f0e0d0c0b0a0908
+L(unaligned):
+ pcaddi t2, -4
+ bstrins.d a1, zero, 3, 0
+ vld vr8, t2, 0
+ vld vr0, a1, 0
+
+ vld vr1, a1, 16
+ addi.d a2, a2, -16
+ vst vr2, a0, 0
+ add.d a5, a0, t0
+
+ vreplgr2vr.b vr9, t1
+ andi t2, a2, 0x7f
+ vadd.b vr9, vr9, vr8
+ addi.d a1, a1, 32
+
+
+ beq t2, a2, L(un_less_128)
+ sub.d t3, a2, t2
+ move a2, t2
+ add.d a6, a1, t3
+
+L(un_loop):
+ vld vr2, a1, 0
+ vld vr3, a1, 16
+ vld vr4, a1, 32
+ vld vr5, a1, 48
+
+ vld vr6, a1, 64
+ vld vr7, a1, 80
+ vshuf.b vr8, vr1, vr0, vr9
+ vld vr0, a1, 96
+
+ vst vr8, a5, 0
+ vshuf.b vr8, vr2, vr1, vr9
+ vld vr1, a1, 112
+ vst vr8, a5, 16
+
+
+ addi.d a1, a1, 128
+ vshuf.b vr2, vr3, vr2, vr9
+ vshuf.b vr3, vr4, vr3, vr9
+ vst vr2, a5, 32
+
+ vshuf.b vr4, vr5, vr4, vr9
+ vst vr3, a5, 48
+ vshuf.b vr5, vr6, vr5, vr9
+ vst vr4, a5, 64
+
+ vshuf.b vr6, vr7, vr6, vr9
+ vst vr5, a5, 80
+ vshuf.b vr7, vr0, vr7, vr9
+ vst vr6, a5, 96
+
+ vst vr7, a5, 112
+ addi.d a5, a5, 128
+ bne a1, a6, L(un_loop)
+L(un_less_128):
+ blt a2, t8, L(un_less_64)
+
+
+ vld vr2, a1, 0
+ vld vr3, a1, 16
+ vshuf.b vr4, vr1, vr0, vr9
+ vld vr0, a1, 32
+
+ vst vr4, a5, 0
+ addi.d a2, a2, -64
+ vshuf.b vr4, vr2, vr1, vr9
+ vld vr1, a1, 48
+
+ addi.d a1, a1, 64
+ vst vr4, a5, 16
+ vshuf.b vr2, vr3, vr2, vr9
+ vshuf.b vr3, vr0, vr3, vr9
+
+ vst vr2, a5, 32
+ vst vr3, a5, 48
+ addi.d a5, a5, 64
+L(un_less_64):
+ blt a2, t7, L(un_less_32)
+
+
+ vshuf.b vr3, vr1, vr0, vr9
+ vld vr0, a1, 0
+ vst vr3, a5, 0
+ addi.d a2, a2, -32
+
+ vshuf.b vr3, vr0, vr1, vr9
+ vld vr1, a1, 16
+ addi.d a1, a1, 32
+ vst vr3, a5, 16
+
+ addi.d a5, a5, 32
+L(un_less_32):
+ blt a2, t6, L(un_less_16)
+ vshuf.b vr2, vr1, vr0, vr9
+ vor.v vr0, vr1, vr1
+
+ vld vr1, a1, 0
+ vst vr2, a5, 0
+ addi.d a5, a5, 16
+L(un_less_16):
+ vld vr2, a4, -16
+
+
+ vshuf.b vr0, vr1, vr0, vr9
+ vst vr0, a5, 0
+ vst vr2, a3, -16
+ jr ra
+
+L(copy_back):
+ addi.d t0, a3, -1
+ vld vr2, a4, -16
+ andi t0, t0, 0xf
+ addi.d t0, t0, 1
+
+ sub.d a4, a4, t0
+ sub.d a2, a2, t0
+ andi t1, a4, 0xf
+ bnez t1, L(back_unaligned)
+
+ vld vr0, a4, -16
+ addi.d a2, a2, -16
+ vst vr2, a3, -16
+ andi t2, a2, 0x7f
+
+
+ sub.d a3, a3, t0
+ beq t2, a2, L(back_al_less_128)
+ sub.d t3, a2, t2
+ move a2, t2
+
+ sub.d a6, a4, t3
+L(back_al_loop):
+ vld vr1, a4, -32
+ vld vr2, a4, -48
+ vld vr3, a4, -64
+
+ vld vr4, a4, -80
+ vld vr5, a4, -96
+ vld vr6, a4, -112
+ vld vr7, a4, -128
+
+ vst vr0, a3, -16
+ vld vr0, a4, -144
+ addi.d a4, a4, -128
+ vst vr1, a3, -32
+
+
+ vst vr2, a3, -48
+ vst vr3, a3, -64
+ vst vr4, a3, -80
+ vst vr5, a3, -96
+
+ vst vr6, a3, -112
+ vst vr7, a3, -128
+ addi.d a3, a3, -128
+ bne a4, a6, L(back_al_loop)
+
+L(back_al_less_128):
+ blt a2, t8, L(back_al_less_64)
+ vld vr1, a4, -32
+ vld vr2, a4, -48
+ vld vr3, a4, -64
+
+ addi.d a2, a2, -64
+ vst vr0, a3, -16
+ vld vr0, a4, -80
+ addi.d a4, a4, -64
+
+
+ vst vr1, a3, -32
+ vst vr2, a3, -48
+ vst vr3, a3, -64
+ addi.d a3, a3, -64
+
+L(back_al_less_64):
+ blt a2, t7, L(back_al_less_32)
+ vld vr1, a4, -32
+ addi.d a2, a2, -32
+ vst vr0, a3, -16
+
+ vld vr0, a4, -48
+ vst vr1, a3, -32
+ addi.d a3, a3, -32
+ addi.d a4, a4, -32
+
+L(back_al_less_32):
+ blt a2, t6, L(back_al_less_16)
+ vst vr0, a3, -16
+ vld vr0, a4, -32
+ addi.d a3, a3, -16
+
+
+L(back_al_less_16):
+ vld vr1, a1, 0
+ vst vr0, a3, -16
+ vst vr1, a0, 0
+ jr ra
+
+L(magic_num_2):
+ .dword 0x0706050403020100
+ .dword 0x0f0e0d0c0b0a0908
+L(back_unaligned):
+ pcaddi t2, -4
+ bstrins.d a4, zero, 3, 0
+ vld vr8, t2, 0
+ vld vr0, a4, 0
+
+ vld vr1, a4, -16
+ addi.d a2, a2, -16
+ vst vr2, a3, -16
+ sub.d a3, a3, t0
+
+
+ vreplgr2vr.b vr9, t1
+ andi t2, a2, 0x7f
+ vadd.b vr9, vr9, vr8
+ addi.d a4, a4, -16
+
+ beq t2, a2, L(back_un_less_128)
+ sub.d t3, a2, t2
+ move a2, t2
+ sub.d a6, a4, t3
+
+L(back_un_loop):
+ vld vr2, a4, -16
+ vld vr3, a4, -32
+ vld vr4, a4, -48
+
+ vld vr5, a4, -64
+ vld vr6, a4, -80
+ vld vr7, a4, -96
+ vshuf.b vr8, vr0, vr1, vr9
+
+
+ vld vr0, a4, -112
+ vst vr8, a3, -16
+ vshuf.b vr8, vr1, vr2, vr9
+ vld vr1, a4, -128
+
+ vst vr8, a3, -32
+ addi.d a4, a4, -128
+ vshuf.b vr2, vr2, vr3, vr9
+ vshuf.b vr3, vr3, vr4, vr9
+
+ vst vr2, a3, -48
+ vshuf.b vr4, vr4, vr5, vr9
+ vst vr3, a3, -64
+ vshuf.b vr5, vr5, vr6, vr9
+
+ vst vr4, a3, -80
+ vshuf.b vr6, vr6, vr7, vr9
+ vst vr5, a3, -96
+ vshuf.b vr7, vr7, vr0, vr9
+
+
+ vst vr6, a3, -112
+ vst vr7, a3, -128
+ addi.d a3, a3, -128
+ bne a4, a6, L(back_un_loop)
+
+L(back_un_less_128):
+ blt a2, t8, L(back_un_less_64)
+ vld vr2, a4, -16
+ vld vr3, a4, -32
+ vshuf.b vr4, vr0, vr1, vr9
+
+ vld vr0, a4, -48
+ vst vr4, a3, -16
+ addi.d a2, a2, -64
+ vshuf.b vr4, vr1, vr2, vr9
+
+ vld vr1, a4, -64
+ addi.d a4, a4, -64
+ vst vr4, a3, -32
+ vshuf.b vr2, vr2, vr3, vr9
+
+
+ vshuf.b vr3, vr3, vr0, vr9
+ vst vr2, a3, -48
+ vst vr3, a3, -64
+ addi.d a3, a3, -64
+
+L(back_un_less_64):
+ blt a2, t7, L(back_un_less_32)
+ vshuf.b vr3, vr0, vr1, vr9
+ vld vr0, a4, -16
+ vst vr3, a3, -16
+
+ addi.d a2, a2, -32
+ vshuf.b vr3, vr1, vr0, vr9
+ vld vr1, a4, -32
+ addi.d a4, a4, -32
+
+ vst vr3, a3, -32
+ addi.d a3, a3, -32
+L(back_un_less_32):
+ blt a2, t6, L(back_un_less_16)
+ vshuf.b vr2, vr0, vr1, vr9
+
+
+ vor.v vr0, vr1, vr1
+ vld vr1, a4, -16
+ vst vr2, a3, -16
+ addi.d a3, a3, -16
+
+L(back_un_less_16):
+ vld vr2, a1, 0
+ vshuf.b vr0, vr0, vr1, vr9
+ vst vr0, a3, -16
+ vst vr2, a0, 0
+
+ jr ra
+END(MEMMOVE_NAME)
+
+libc_hidden_builtin_def (MEMCPY_NAME)
+libc_hidden_builtin_def (MEMMOVE_NAME)
+#endif
diff --git a/sysdeps/loongarch/lp64/multiarch/memmove-unaligned.S b/sysdeps/loongarch/lp64/multiarch/memmove-unaligned.S
new file mode 100644
index 00000000..90a64b6b
--- /dev/null
+++ b/sysdeps/loongarch/lp64/multiarch/memmove-unaligned.S
@@ -0,0 +1,380 @@
+/* Optimized memmove_unaligned implementation using basic Loongarch instructions.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#include <sys/regdef.h>
+#include <sys/asm.h>
+
+#if IS_IN (libc)
+
+# define MEMMOVE_NAME __memmove_unaligned
+
+# define LD_64(reg, n) \
+ ld.d t0, reg, n; \
+ ld.d t1, reg, n + 8; \
+ ld.d t2, reg, n + 16; \
+ ld.d t3, reg, n + 24; \
+ ld.d t4, reg, n + 32; \
+ ld.d t5, reg, n + 40; \
+ ld.d t6, reg, n + 48; \
+ ld.d t7, reg, n + 56;
+
+# define ST_64(reg, n) \
+ st.d t0, reg, n; \
+ st.d t1, reg, n + 8; \
+ st.d t2, reg, n + 16; \
+ st.d t3, reg, n + 24; \
+ st.d t4, reg, n + 32; \
+ st.d t5, reg, n + 40; \
+ st.d t6, reg, n + 48; \
+ st.d t7, reg, n + 56;
+
+LEAF(MEMMOVE_NAME, 3)
+ add.d a4, a1, a2
+ add.d a3, a0, a2
+ beq a1, a0, L(less_1bytes)
+ move t8, a0
+
+ srai.d a6, a2, 4
+ beqz a6, L(less_16bytes)
+ srai.d a6, a2, 6
+ bnez a6, L(more_64bytes)
+ srai.d a6, a2, 5
+ beqz a6, L(less_32bytes)
+
+ ld.d t0, a1, 0
+ ld.d t1, a1, 8
+ ld.d t2, a1, 16
+ ld.d t3, a1, 24
+
+ ld.d t4, a4, -32
+ ld.d t5, a4, -24
+ ld.d t6, a4, -16
+ ld.d t7, a4, -8
+
+ st.d t0, a0, 0
+ st.d t1, a0, 8
+ st.d t2, a0, 16
+ st.d t3, a0, 24
+
+ st.d t4, a3, -32
+ st.d t5, a3, -24
+ st.d t6, a3, -16
+ st.d t7, a3, -8
+
+ jr ra
+
+L(less_32bytes):
+ ld.d t0, a1, 0
+ ld.d t1, a1, 8
+ ld.d t2, a4, -16
+ ld.d t3, a4, -8
+
+ st.d t0, a0, 0
+ st.d t1, a0, 8
+ st.d t2, a3, -16
+ st.d t3, a3, -8
+
+ jr ra
+
+L(less_16bytes):
+ srai.d a6, a2, 3
+ beqz a6, L(less_8bytes)
+
+ ld.d t0, a1, 0
+ ld.d t1, a4, -8
+ st.d t0, a0, 0
+ st.d t1, a3, -8
+
+ jr ra
+
+L(less_8bytes):
+ srai.d a6, a2, 2
+ beqz a6, L(less_4bytes)
+
+ ld.w t0, a1, 0
+ ld.w t1, a4, -4
+ st.w t0, a0, 0
+ st.w t1, a3, -4
+
+ jr ra
+
+L(less_4bytes):
+ srai.d a6, a2, 1
+ beqz a6, L(less_2bytes)
+
+ ld.h t0, a1, 0
+ ld.h t1, a4, -2
+ st.h t0, a0, 0
+ st.h t1, a3, -2
+
+ jr ra
+
+L(less_2bytes):
+ beqz a2, L(less_1bytes)
+
+ ld.b t0, a1, 0
+ st.b t0, a0, 0
+
+ jr ra
+
+L(less_1bytes):
+ jr ra
+
+L(more_64bytes):
+ sub.d a7, a0, a1
+ bltu a7, a2, L(copy_backward)
+
+L(copy_forward):
+ srli.d a0, a0, 3
+ slli.d a0, a0, 3
+ beq a0, t8, L(all_align)
+ addi.d a0, a0, 0x8
+ sub.d a7, t8, a0
+ sub.d a1, a1, a7
+ add.d a2, a7, a2
+
+L(start_unalign_proc):
+ pcaddi t1, 18
+ slli.d a6, a7, 3
+ add.d t1, t1, a6
+ jr t1
+
+ ld.b t0, a1, -7
+ st.b t0, a0, -7
+ ld.b t0, a1, -6
+ st.b t0, a0, -6
+ ld.b t0, a1, -5
+ st.b t0, a0, -5
+ ld.b t0, a1, -4
+ st.b t0, a0, -4
+ ld.b t0, a1, -3
+ st.b t0, a0, -3
+ ld.b t0, a1, -2
+ st.b t0, a0, -2
+ ld.b t0, a1, -1
+ st.b t0, a0, -1
+L(start_over):
+
+ addi.d a2, a2, -0x80
+ blt a2, zero, L(end_unalign_proc)
+
+L(loop_less):
+ LD_64(a1, 0)
+ ST_64(a0, 0)
+ LD_64(a1, 64)
+ ST_64(a0, 64)
+
+ addi.d a0, a0, 0x80
+ addi.d a1, a1, 0x80
+ addi.d a2, a2, -0x80
+ bge a2, zero, L(loop_less)
+
+L(end_unalign_proc):
+ addi.d a2, a2, 0x80
+
+ pcaddi t1, 36
+ andi t2, a2, 0x78
+ add.d a1, a1, t2
+ add.d a0, a0, t2
+ sub.d t1, t1, t2
+ jr t1
+
+ ld.d t0, a1, -120
+ st.d t0, a0, -120
+ ld.d t0, a1, -112
+ st.d t0, a0, -112
+ ld.d t0, a1, -104
+ st.d t0, a0, -104
+ ld.d t0, a1, -96
+ st.d t0, a0, -96
+ ld.d t0, a1, -88
+ st.d t0, a0, -88
+ ld.d t0, a1, -80
+ st.d t0, a0, -80
+ ld.d t0, a1, -72
+ st.d t0, a0, -72
+ ld.d t0, a1, -64
+ st.d t0, a0, -64
+ ld.d t0, a1, -56
+ st.d t0, a0, -56
+ ld.d t0, a1, -48
+ st.d t0, a0, -48
+ ld.d t0, a1, -40
+ st.d t0, a0, -40
+ ld.d t0, a1, -32
+ st.d t0, a0, -32
+ ld.d t0, a1, -24
+ st.d t0, a0, -24
+ ld.d t0, a1, -16
+ st.d t0, a0, -16
+ ld.d t0, a1, -8
+ st.d t0, a0, -8
+
+ andi a2, a2, 0x7
+ pcaddi t1, 18
+ slli.d a2, a2, 3
+ sub.d t1, t1, a2
+ jr t1
+
+ ld.b t0, a4, -7
+ st.b t0, a3, -7
+ ld.b t0, a4, -6
+ st.b t0, a3, -6
+ ld.b t0, a4, -5
+ st.b t0, a3, -5
+ ld.b t0, a4, -4
+ st.b t0, a3, -4
+ ld.b t0, a4, -3
+ st.b t0, a3, -3
+ ld.b t0, a4, -2
+ st.b t0, a3, -2
+ ld.b t0, a4, -1
+ st.b t0, a3, -1
+L(end):
+ move a0, t8
+ jr ra
+
+L(all_align):
+ addi.d a1, a1, 0x8
+ addi.d a0, a0, 0x8
+ ld.d t0, a1, -8
+ st.d t0, a0, -8
+ addi.d a2, a2, -8
+ b L(start_over)
+
+L(all_align_back):
+ addi.d a4, a4, -0x8
+ addi.d a3, a3, -0x8
+ ld.d t0, a4, 0
+ st.d t0, a3, 0
+ addi.d a2, a2, -8
+ b L(start_over_back)
+
+L(copy_backward):
+ move a5, a3
+ srli.d a3, a3, 3
+ slli.d a3, a3, 3
+ beq a3, a5, L(all_align_back)
+ sub.d a7, a3, a5
+ add.d a4, a4, a7
+ add.d a2, a7, a2
+
+ pcaddi t1, 18
+ slli.d a6, a7, 3
+ add.d t1, t1, a6
+ jr t1
+
+ ld.b t0, a4, 6
+ st.b t0, a3, 6
+ ld.b t0, a4, 5
+ st.b t0, a3, 5
+ ld.b t0, a4, 4
+ st.b t0, a3, 4
+ ld.b t0, a4, 3
+ st.b t0, a3, 3
+ ld.b t0, a4, 2
+ st.b t0, a3, 2
+ ld.b t0, a4, 1
+ st.b t0, a3, 1
+ ld.b t0, a4, 0
+ st.b t0, a3, 0
+L(start_over_back):
+ addi.d a2, a2, -0x80
+ blt a2, zero, L(end_unalign_proc_back)
+
+L(loop_less_back):
+ LD_64(a4, -64)
+ ST_64(a3, -64)
+ LD_64(a4, -128)
+ ST_64(a3, -128)
+
+ addi.d a4, a4, -0x80
+ addi.d a3, a3, -0x80
+ addi.d a2, a2, -0x80
+ bge a2, zero, L(loop_less_back)
+
+L(end_unalign_proc_back):
+ addi.d a2, a2, 0x80
+
+ pcaddi t1, 36
+ andi t2, a2, 0x78
+ sub.d a4, a4, t2
+ sub.d a3, a3, t2
+ sub.d t1, t1, t2
+ jr t1
+
+ ld.d t0, a4, 112
+ st.d t0, a3, 112
+ ld.d t0, a4, 104
+ st.d t0, a3, 104
+ ld.d t0, a4, 96
+ st.d t0, a3, 96
+ ld.d t0, a4, 88
+ st.d t0, a3, 88
+ ld.d t0, a4, 80
+ st.d t0, a3, 80
+ ld.d t0, a4, 72
+ st.d t0, a3, 72
+ ld.d t0, a4, 64
+ st.d t0, a3, 64
+ ld.d t0, a4, 56
+ st.d t0, a3, 56
+ ld.d t0, a4, 48
+ st.d t0, a3, 48
+ ld.d t0, a4, 40
+ st.d t0, a3, 40
+ ld.d t0, a4, 32
+ st.d t0, a3, 32
+ ld.d t0, a4, 24
+ st.d t0, a3, 24
+ ld.d t0, a4, 16
+ st.d t0, a3, 16
+ ld.d t0, a4, 8
+ st.d t0, a3, 8
+ ld.d t0, a4, 0
+ st.d t0, a3, 0
+
+ andi a2, a2, 0x7
+ pcaddi t1, 18
+ slli.d a2, a2, 3
+ sub.d t1, t1, a2
+ jr t1
+
+ ld.b t0, a1, 6
+ st.b t0, a0, 6
+ ld.b t0, a1, 5
+ st.b t0, a0, 5
+ ld.b t0, a1, 4
+ st.b t0, a0, 4
+ ld.b t0, a1, 3
+ st.b t0, a0, 3
+ ld.b t0, a1, 2
+ st.b t0, a0, 2
+ ld.b t0, a1, 1
+ st.b t0, a0, 1
+ ld.b t0, a1, 0
+ st.b t0, a0, 0
+
+ move a0, t8
+ jr ra
+END(MEMMOVE_NAME)
+
+libc_hidden_builtin_def (MEMMOVE_NAME)
+#endif
diff --git a/sysdeps/loongarch/lp64/multiarch/memmove.c b/sysdeps/loongarch/lp64/multiarch/memmove.c
new file mode 100644
index 00000000..7e3ca4c4
--- /dev/null
+++ b/sysdeps/loongarch/lp64/multiarch/memmove.c
@@ -0,0 +1,38 @@
+/* Multiple versions of memmove.
+ All versions must be listed in ifunc-impl-list.c.
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+/* Define multiple versions only for the definition in libc. */
+#if IS_IN (libc)
+# define memmove __redirect_memmove
+# include <string.h>
+# undef memmove
+
+# define SYMBOL_NAME memmove
+# include "ifunc-lasx.h"
+
+libc_ifunc_redirected (__redirect_memmove, __libc_memmove,
+ IFUNC_SELECTOR ());
+strong_alias (__libc_memmove, memmove);
+
+# ifdef SHARED
+__hidden_ver1 (__libc_memmove, __GI_memmove, __redirect_memmove)
+ __attribute__ ((visibility ("hidden")));
+# endif
+
+#endif
--
2.33.0
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/ocs-commit-check/glibc.git
git@gitee.com:ocs-commit-check/glibc.git
ocs-commit-check
glibc
glibc
master

搜索帮助