From 0bf8e824c5e231a29a7742c00f776928a08d777f Mon Sep 17 00:00:00 2001 From: Liwei Ge Date: Thu, 7 Jul 2022 20:59:22 +0800 Subject: [PATCH] Add loongarch64 base support Signed-off-by: Guoqi Chen --- ...md-internal-sys-declare-loong64-arch.patch | 57 + ...-fix-placement-of-loong64-definition.patch | 87 + ...rnal-add-loong64-constant-definition.patch | 66 + ...configure-go-tool-workflow-for-loong.patch | 39 + ...ster-loong64.Init-function-for-compi.patch | 34 + ...rnal-loong64-implement-Init-function.patch | 984 ++ ...rnal-ssa-config-lower-pass-function-.patch | 36 + ...rnal-ssa-increase-the-bit-width-of-B.patch | 27 + ...rnal-ssa-gen-define-rules-and-operat.patch | 12601 ++++++++++++++++ ...rnal-ssa-inline-memmove-with-known-s.patch | 27 + ...rnal-ssa-add-support-on-loong64-for-.patch | 37 + ...rnal-ssagen-enable-intrinsic-operati.patch | 172 + ...e-internal-fix-test-error-on-loong64.patch | 44 + ...-instructions-and-registers-for-loon.patch | 3323 ++++ ...-helper-function-and-end-to-end-test.patch | 812 + ...abi-cmd-link-support-linker-for-linu.patch | 750 + ...p-for-linux-loong64-and-implement-ru.patch | 989 ++ ...-load-save-TLS-variable-g-on-loong64.patch | 68 + ...e-implement-signal-for-linux-loong64.patch | 439 + ...ntime-support-vdso-for-linux-loong64.patch | 114 + ...t-duffzero-duffcopy-for-linux-loong6.patch | 979 ++ ...ement-asyncPreempt-for-linux-loong64.patch | 231 + ...ort-memclr-memmove-for-linux-loong64.patch | 178 + ...t-syscalls-for-runtime-bootstrap-on-.patch | 626 + ...d-tag-for-common-support-on-linux-lo.patch | 63 + ...e-fix-runtime-test-error-for-loong64.patch | 26 + ...ernal-add-atomic-support-for-loong64.patch | 414 + ...d-cgo-configure-cgo-tool-for-loong64.patch | 47 + ...cgo-function-call-support-for-loong6.patch | 250 + ...mp-cmd-pprof-disassembly-is-not-supp.patch | 55 + ...d-dist-support-dist-tool-for-loong64.patch | 88 + ...e-vendored-golang.org-x-sys-to-suppo.patch | 2740 ++++ ...e-vendored-golang.org-x-tools-to-sup.patch | 52 + ...-support-basic-byte-operation-on-loo.patch | 297 + ...-reflect-vendor-support-standard-lib.patch | 390 + ...dd-syscall-support-for-linux-loong64.patch | 5006 ++++++ ...all-unix-loong64-use-generic-syscall.patch | 45 + ...misc-test-fix-test-error-for-loong64.patch | 93 + ...-copyright-add-Loongson-into-AUTHORS.patch | 25 + 0040-api-fix-check-errors-for-loong64.patch | 126 + ...cgo-test-sigaltstack-size-on-loong64.patch | 54 + 0042-fixup-fix-mabi-to-lp64-for-loong64.patch | 82 + ...fixup-fix-runtime-defs_linux_loong64.patch | 200 + ...ixup-fix-test-issue11656-for-loong64.patch | 26 + ...nc-breakpoint-implementation-on-loon.patch | 30 + ...r-golang.org-x-sys-for-byteorder-fix.patch | 26 + ...ve-atomic-Cas-Xchg-and-Xadd-intrinsi.patch | 68 + ...cPreempt-implementation-for-errors-o.patch | 101 + ...-add-FuncInfo-SPWRITE-flag-for-linux.patch | 50 + ...ing-TOPFRAME-NOFRAME-flag-for-linux-.patch | 48 + ...loong64-constant-folding-in-division.patch | 148 + ...x-the-vDSO-symbol-version-on-loong64.patch | 39 + ...pu-fix-cpu-cacheLineSize-for-loong64.patch | 36 + ...internal-syscall-always-zero-the-hig.patch | 68 + ...up-unused-function-gosave-on-loong64.patch | 39 + ...d-IMAGE_FILE_MACHINE_LOONGARCH-64-32.patch | 85 + golang.spec | 83 +- 57 files changed, 33608 insertions(+), 12 deletions(-) create mode 100644 0001-cmd-internal-sys-declare-loong64-arch.patch create mode 100644 0002-cmd-internal-sys-fix-placement-of-loong64-definition.patch create mode 100644 0003-internal-add-loong64-constant-definition.patch create mode 100644 0004-cmd-go-internal-configure-go-tool-workflow-for-loong.patch create mode 100644 0005-cmd-compile-register-loong64.Init-function-for-compi.patch create mode 100644 0006-cmd-compile-internal-loong64-implement-Init-function.patch create mode 100644 0007-cmd-compile-internal-ssa-config-lower-pass-function-.patch create mode 100644 0008-cmd-compile-internal-ssa-increase-the-bit-width-of-B.patch create mode 100644 0009-cmd-compile-internal-ssa-gen-define-rules-and-operat.patch create mode 100644 0010-cmd-compile-internal-ssa-inline-memmove-with-known-s.patch create mode 100644 0011-cmd-compile-internal-ssa-add-support-on-loong64-for-.patch create mode 100644 0012-cmd-compile-internal-ssagen-enable-intrinsic-operati.patch create mode 100644 0013-cmd-compile-internal-fix-test-error-on-loong64.patch create mode 100644 0014-cmd-internal-obj-instructions-and-registers-for-loon.patch create mode 100644 0015-cmd-asm-internal-helper-function-and-end-to-end-test.patch create mode 100644 0016-cmd-internal-objabi-cmd-link-support-linker-for-linu.patch create mode 100644 0017-runtime-bootstrap-for-linux-loong64-and-implement-ru.patch create mode 100644 0018-runtime-load-save-TLS-variable-g-on-loong64.patch create mode 100644 0019-runtime-implement-signal-for-linux-loong64.patch create mode 100644 0020-runtime-support-vdso-for-linux-loong64.patch create mode 100644 0021-runtime-implement-duffzero-duffcopy-for-linux-loong6.patch create mode 100644 0022-runtime-implement-asyncPreempt-for-linux-loong64.patch create mode 100644 0023-runtime-support-memclr-memmove-for-linux-loong64.patch create mode 100644 0024-runtime-implement-syscalls-for-runtime-bootstrap-on-.patch create mode 100644 0025-runtime-add-build-tag-for-common-support-on-linux-lo.patch create mode 100644 0026-runtime-fix-runtime-test-error-for-loong64.patch create mode 100644 0027-runtime-internal-add-atomic-support-for-loong64.patch create mode 100644 0028-cmd-cgo-configure-cgo-tool-for-loong64.patch create mode 100644 0029-runtime-cgo-add-cgo-function-call-support-for-loong6.patch create mode 100644 0030-cmd-nm-cmd-objdump-cmd-pprof-disassembly-is-not-supp.patch create mode 100644 0031-cmd-dist-support-dist-tool-for-loong64.patch create mode 100644 0032-cmd-vendor-update-vendored-golang.org-x-sys-to-suppo.patch create mode 100644 0033-cmd-vendor-update-vendored-golang.org-x-tools-to-sup.patch create mode 100644 0034-internal-bytealg-support-basic-byte-operation-on-loo.patch create mode 100644 0035-debug-go-math-os-reflect-vendor-support-standard-lib.patch create mode 100644 0036-syscall-add-syscall-support-for-linux-loong64.patch create mode 100644 0037-internal-syscall-unix-loong64-use-generic-syscall.patch create mode 100644 0038-misc-test-fix-test-error-for-loong64.patch create mode 100644 0039-copyright-add-Loongson-into-AUTHORS.patch create mode 100644 0040-api-fix-check-errors-for-loong64.patch create mode 100644 0041-fixup-fix-misc-cgo-test-sigaltstack-size-on-loong64.patch create mode 100644 0042-fixup-fix-mabi-to-lp64-for-loong64.patch create mode 100644 0043-fixup-fix-runtime-defs_linux_loong64.patch create mode 100644 0044-fixup-fix-test-issue11656-for-loong64.patch create mode 100644 0045-runtime-fixed-func-breakpoint-implementation-on-loon.patch create mode 100644 0046-update-vendor-golang.org-x-sys-for-byteorder-fix.patch create mode 100644 0047-cmd-compile-remove-atomic-Cas-Xchg-and-Xadd-intrinsi.patch create mode 100644 0048-runtime-fix-asyncPreempt-implementation-for-errors-o.patch create mode 100644 0049-cmd-internal-obj-add-FuncInfo-SPWRITE-flag-for-linux.patch create mode 100644 0050-runtime-add-missing-TOPFRAME-NOFRAME-flag-for-linux-.patch create mode 100644 0051-cmd-compile-fix-loong64-constant-folding-in-division.patch create mode 100644 0052-runtime-fix-the-vDSO-symbol-version-on-loong64.patch create mode 100644 0053-internal-cpu-fix-cpu-cacheLineSize-for-loong64.patch create mode 100644 0054-syscall-runtime-internal-syscall-always-zero-the-hig.patch create mode 100644 0055-runtime-clean-up-unused-function-gosave-on-loong64.patch create mode 100644 0056-debug-pe-add-IMAGE_FILE_MACHINE_LOONGARCH-64-32.patch diff --git a/0001-cmd-internal-sys-declare-loong64-arch.patch b/0001-cmd-internal-sys-declare-loong64-arch.patch new file mode 100644 index 0000000..53087a3 --- /dev/null +++ b/0001-cmd-internal-sys-declare-loong64-arch.patch @@ -0,0 +1,57 @@ +From 2daa10c1a22775d71e730db274497fd585e5f7b0 Mon Sep 17 00:00:00 2001 +From: WANG Xuerui +Date: Sun, 20 Jun 2021 12:59:45 +0800 +Subject: [PATCH 01/56] cmd/internal/sys: declare loong64 arch + +Updates #46229 + +Change-Id: Icb736f2440443e9245872b091d13e5bdfb6cb01a +Reviewed-on: https://go-review.googlesource.com/c/go/+/339009 +Reviewed-by: Meng Zhuo +Reviewed-by: Cherry Mui +Trust: Meng Zhuo +Trust: Michael Knyszek +Run-TryBot: Meng Zhuo +TryBot-Result: Go Bot +--- + src/cmd/internal/sys/arch.go | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +diff --git a/src/cmd/internal/sys/arch.go b/src/cmd/internal/sys/arch.go +index a3e3976..e7063fd 100644 +--- a/src/cmd/internal/sys/arch.go ++++ b/src/cmd/internal/sys/arch.go +@@ -22,6 +22,7 @@ const ( + RISCV64 + S390X + Wasm ++ Loong64 + ) + + // Arch represents an individual architecture. +@@ -189,6 +190,16 @@ var ArchWasm = &Arch{ + Alignment: 1, + } + ++var ArchLoong64 = &Arch{ ++ Name: "loong64", ++ Family: Loong64, ++ ByteOrder: binary.LittleEndian, ++ PtrSize: 8, ++ RegSize: 8, ++ MinLC: 4, ++ Alignment: 8, // Unaligned accesses are not guaranteed to be fast ++} ++ + var Archs = [...]*Arch{ + Arch386, + ArchAMD64, +@@ -203,4 +214,5 @@ var Archs = [...]*Arch{ + ArchRISCV64, + ArchS390X, + ArchWasm, ++ ArchLoong64, + } +-- +2.27.0 + diff --git a/0002-cmd-internal-sys-fix-placement-of-loong64-definition.patch b/0002-cmd-internal-sys-fix-placement-of-loong64-definition.patch new file mode 100644 index 0000000..0150701 --- /dev/null +++ b/0002-cmd-internal-sys-fix-placement-of-loong64-definition.patch @@ -0,0 +1,87 @@ +From 808642d3b1eae9ced3ad851c9d10ae0937f8a6f2 Mon Sep 17 00:00:00 2001 +From: WANG Xuerui +Date: Mon, 6 Sep 2021 15:17:11 +0800 +Subject: [PATCH 02/56] cmd/internal/sys: fix placement of loong64 definition + +In rebasing the patch series up to CL 339015, the branches were messed +up by me, and changes from v3 to v4 of CL 339009 was lost. Fix the +ordering to restore alphabetical order per original review. + +Change-Id: I8e57c96e996c4f962cab684a9d305a8dbdeea43b +Reviewed-on: https://go-review.googlesource.com/c/go/+/347731 +Run-TryBot: Tobias Klauser +TryBot-Result: Go Bot +Reviewed-by: Ian Lance Taylor +Trust: Meng Zhuo +--- + src/cmd/internal/sys/arch.go | 24 ++++++++++++------------ + 1 file changed, 12 insertions(+), 12 deletions(-) + +diff --git a/src/cmd/internal/sys/arch.go b/src/cmd/internal/sys/arch.go +index e7063fd..4b2b4c3 100644 +--- a/src/cmd/internal/sys/arch.go ++++ b/src/cmd/internal/sys/arch.go +@@ -16,13 +16,13 @@ const ( + ARM + ARM64 + I386 ++ Loong64 + MIPS + MIPS64 + PPC64 + RISCV64 + S390X + Wasm +- Loong64 + ) + + // Arch represents an individual architecture. +@@ -100,6 +100,16 @@ var ArchARM64 = &Arch{ + Alignment: 1, + } + ++var ArchLoong64 = &Arch{ ++ Name: "loong64", ++ Family: Loong64, ++ ByteOrder: binary.LittleEndian, ++ PtrSize: 8, ++ RegSize: 8, ++ MinLC: 4, ++ Alignment: 8, // Unaligned accesses are not guaranteed to be fast ++} ++ + var ArchMIPS = &Arch{ + Name: "mips", + Family: MIPS, +@@ -190,21 +200,12 @@ var ArchWasm = &Arch{ + Alignment: 1, + } + +-var ArchLoong64 = &Arch{ +- Name: "loong64", +- Family: Loong64, +- ByteOrder: binary.LittleEndian, +- PtrSize: 8, +- RegSize: 8, +- MinLC: 4, +- Alignment: 8, // Unaligned accesses are not guaranteed to be fast +-} +- + var Archs = [...]*Arch{ + Arch386, + ArchAMD64, + ArchARM, + ArchARM64, ++ ArchLoong64, + ArchMIPS, + ArchMIPSLE, + ArchMIPS64, +@@ -214,5 +215,4 @@ var Archs = [...]*Arch{ + ArchRISCV64, + ArchS390X, + ArchWasm, +- ArchLoong64, + } +-- +2.27.0 + diff --git a/0003-internal-add-loong64-constant-definition.patch b/0003-internal-add-loong64-constant-definition.patch new file mode 100644 index 0000000..f773daa --- /dev/null +++ b/0003-internal-add-loong64-constant-definition.patch @@ -0,0 +1,66 @@ +From 1b566ebd45f98632b70cce1e2734e02e48cbfc81 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Mon, 15 Nov 2021 20:53:47 +0800 +Subject: [PATCH 03/56] internal: add loong64 constant definition + +Change-Id: I39d42e5959391e47bf621b3bdd3d95de72f023cc +--- + src/internal/cpu/cpu_loong64.go | 12 ++++++++++++ + src/runtime/internal/sys/arch.go | 1 + + src/runtime/internal/sys/arch_loong64.go | 13 +++++++++++++ + 3 files changed, 26 insertions(+) + create mode 100644 src/internal/cpu/cpu_loong64.go + create mode 100644 src/runtime/internal/sys/arch_loong64.go + +diff --git a/src/internal/cpu/cpu_loong64.go b/src/internal/cpu/cpu_loong64.go +new file mode 100644 +index 0000000..d0ff934 +--- /dev/null ++++ b/src/internal/cpu/cpu_loong64.go +@@ -0,0 +1,12 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build loong64 ++// +build loong64 ++ ++package cpu ++ ++const CacheLinePadSize = 32 ++ ++func doinit() {} +diff --git a/src/runtime/internal/sys/arch.go b/src/runtime/internal/sys/arch.go +index 3c99a2f..154673b 100644 +--- a/src/runtime/internal/sys/arch.go ++++ b/src/runtime/internal/sys/arch.go +@@ -10,6 +10,7 @@ const ( + AMD64 ArchFamilyType = iota + ARM + ARM64 ++ LOONG64 + I386 + MIPS + MIPS64 +diff --git a/src/runtime/internal/sys/arch_loong64.go b/src/runtime/internal/sys/arch_loong64.go +new file mode 100644 +index 0000000..4ed3388 +--- /dev/null ++++ b/src/runtime/internal/sys/arch_loong64.go +@@ -0,0 +1,13 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package sys ++ ++const ( ++ _ArchFamily = LOONG64 ++ _DefaultPhysPageSize = 16384 ++ _PCQuantum = 4 ++ _MinFrameSize = 8 ++ _StackAlign = PtrSize ++) +-- +2.27.0 + diff --git a/0004-cmd-go-internal-configure-go-tool-workflow-for-loong.patch b/0004-cmd-go-internal-configure-go-tool-workflow-for-loong.patch new file mode 100644 index 0000000..6a165c8 --- /dev/null +++ b/0004-cmd-go-internal-configure-go-tool-workflow-for-loong.patch @@ -0,0 +1,39 @@ +From 5151b71e20d740c794c2a1395498fa9e84391e6a Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 15:57:43 +0800 +Subject: [PATCH 04/56] cmd/go/internal: configure go tool workflow for loong64 + +Change-Id: I6b537a7d842b0683586917fe7ea7cd4d70d888de +--- + src/cmd/go/internal/imports/build.go | 1 + + src/cmd/go/internal/work/exec.go | 2 ++ + 2 files changed, 3 insertions(+) + +diff --git a/src/cmd/go/internal/imports/build.go b/src/cmd/go/internal/imports/build.go +index ff6bea6..5d4580c 100644 +--- a/src/cmd/go/internal/imports/build.go ++++ b/src/cmd/go/internal/imports/build.go +@@ -326,6 +326,7 @@ var KnownArch = map[string]bool{ + "mips64le": true, + "mips64p32": true, + "mips64p32le": true, ++ "loong64": true, + "ppc": true, + "riscv": true, + "riscv64": true, +diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go +index 5a225fb..83d4161 100644 +--- a/src/cmd/go/internal/work/exec.go ++++ b/src/cmd/go/internal/work/exec.go +@@ -2648,6 +2648,8 @@ func (b *Builder) gccArchArgs() []string { + } else if cfg.GOMIPS == "softfloat" { + return append(args, "-msoft-float") + } ++ case "loong64": ++ return []string{"-mabi=lp64d"} + case "ppc64": + if cfg.Goos == "aix" { + return []string{"-maix64"} +-- +2.27.0 + diff --git a/0005-cmd-compile-register-loong64.Init-function-for-compi.patch b/0005-cmd-compile-register-loong64.Init-function-for-compi.patch new file mode 100644 index 0000000..87f2ce9 --- /dev/null +++ b/0005-cmd-compile-register-loong64.Init-function-for-compi.patch @@ -0,0 +1,34 @@ +From df15ee6ee04865c5338f17e9a94e8f51119a975f Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 24 Nov 2021 16:41:33 +0800 +Subject: [PATCH 05/56] cmd/compile: register loong64.Init function for + compiler + +Change-Id: Ia3cb07af626e3422e43e3834baf15b7c8fad2326 +--- + src/cmd/compile/main.go | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/src/cmd/compile/main.go b/src/cmd/compile/main.go +index 3af1e1f..7d38bea 100644 +--- a/src/cmd/compile/main.go ++++ b/src/cmd/compile/main.go +@@ -10,6 +10,7 @@ import ( + "cmd/compile/internal/arm64" + "cmd/compile/internal/base" + "cmd/compile/internal/gc" ++ "cmd/compile/internal/loong64" + "cmd/compile/internal/mips" + "cmd/compile/internal/mips64" + "cmd/compile/internal/ppc64" +@@ -29,6 +30,7 @@ var archInits = map[string]func(*ssagen.ArchInfo){ + "amd64": amd64.Init, + "arm": arm.Init, + "arm64": arm64.Init, ++ "loong64": loong64.Init, + "mips": mips.Init, + "mipsle": mips.Init, + "mips64": mips64.Init, +-- +2.27.0 + diff --git a/0006-cmd-compile-internal-loong64-implement-Init-function.patch b/0006-cmd-compile-internal-loong64-implement-Init-function.patch new file mode 100644 index 0000000..a6966e8 --- /dev/null +++ b/0006-cmd-compile-internal-loong64-implement-Init-function.patch @@ -0,0 +1,984 @@ +From e9f66199f5537dfc148235b5f70b8ae8e87d5254 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 24 Nov 2021 16:47:08 +0800 +Subject: [PATCH 06/56] cmd/compile/internal/loong64: implement Init function + for loong64 + +Change-Id: Iab3f13b70a41f31f412b59801def3106f5eb7c11 +--- + src/cmd/compile/internal/loong64/galign.go | 24 + + src/cmd/compile/internal/loong64/ggen.go | 59 ++ + src/cmd/compile/internal/loong64/ssa.go | 864 +++++++++++++++++++++ + 3 files changed, 947 insertions(+) + create mode 100644 src/cmd/compile/internal/loong64/galign.go + create mode 100644 src/cmd/compile/internal/loong64/ggen.go + create mode 100644 src/cmd/compile/internal/loong64/ssa.go + +diff --git a/src/cmd/compile/internal/loong64/galign.go b/src/cmd/compile/internal/loong64/galign.go +new file mode 100644 +index 0000000..ac8b5db +--- /dev/null ++++ b/src/cmd/compile/internal/loong64/galign.go +@@ -0,0 +1,24 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package loong64 ++ ++import ( ++ "cmd/compile/internal/ssa" ++ "cmd/compile/internal/ssagen" ++ "cmd/internal/obj/loong64" ++) ++ ++func Init(arch *ssagen.ArchInfo) { ++ arch.LinkArch = &loong64.Linkloong64 ++ arch.REGSP = loong64.REGSP ++ arch.MAXWIDTH = 1 << 50 ++ arch.ZeroRange = zerorange ++ arch.Ginsnop = ginsnop ++ arch.Ginsnopdefer = ginsnop ++ ++ arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {} ++ arch.SSAGenValue = ssaGenValue ++ arch.SSAGenBlock = ssaGenBlock ++} +diff --git a/src/cmd/compile/internal/loong64/ggen.go b/src/cmd/compile/internal/loong64/ggen.go +new file mode 100644 +index 0000000..c3649cf +--- /dev/null ++++ b/src/cmd/compile/internal/loong64/ggen.go +@@ -0,0 +1,59 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package loong64 ++ ++import ( ++ "cmd/compile/internal/ir" ++ "cmd/compile/internal/objw" ++ "cmd/compile/internal/types" ++ "cmd/internal/obj" ++ "cmd/internal/obj/loong64" ++) ++ ++func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { ++ if cnt == 0 { ++ return p ++ } ++ if cnt < int64(4*types.PtrSize) { ++ for i := int64(0); i < cnt; i += int64(types.PtrSize) { ++ p = pp.Append(p, loong64.AMOVV, obj.TYPE_REG, loong64.REGZERO, 0, obj.TYPE_MEM, loong64.REGSP, 8+off+i) ++ } ++ } else if cnt <= int64(128*types.PtrSize) { ++ p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, loong64.REGRT1, 0) ++ p.Reg = loong64.REGSP ++ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) ++ p.To.Name = obj.NAME_EXTERN ++ p.To.Sym = ir.Syms.Duffzero ++ p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize)) ++ } else { ++ // ADDV $(8+frame+lo-8), SP, r1 ++ // ADDV $cnt, r1, r2 ++ // loop: ++ // MOVV R0, (Widthptr)r1 ++ // ADDV $Widthptr, r1 ++ // BNE r1, r2, loop ++ p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, loong64.REGRT1, 0) ++ p.Reg = loong64.REGSP ++ p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, loong64.REGRT2, 0) ++ p.Reg = loong64.REGRT1 ++ p = pp.Append(p, loong64.AMOVV, obj.TYPE_REG, loong64.REGZERO, 0, obj.TYPE_MEM, loong64.REGRT1, int64(types.PtrSize)) ++ p1 := p ++ p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, loong64.REGRT1, 0) ++ p = pp.Append(p, loong64.ABNE, obj.TYPE_REG, loong64.REGRT1, 0, obj.TYPE_BRANCH, 0, 0) ++ p.Reg = loong64.REGRT2 ++ p.To.SetTarget(p1) ++ } ++ ++ return p ++} ++ ++func ginsnop(pp *objw.Progs) *obj.Prog { ++ p := pp.Prog(loong64.ANOR) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = loong64.REG_R0 ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = loong64.REG_R0 ++ return p ++} +diff --git a/src/cmd/compile/internal/loong64/ssa.go b/src/cmd/compile/internal/loong64/ssa.go +new file mode 100644 +index 0000000..4f3aa68 +--- /dev/null ++++ b/src/cmd/compile/internal/loong64/ssa.go +@@ -0,0 +1,864 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package loong64 ++ ++import ( ++ "math" ++ ++ "cmd/compile/internal/base" ++ "cmd/compile/internal/ir" ++ "cmd/compile/internal/logopt" ++ "cmd/compile/internal/ssa" ++ "cmd/compile/internal/ssagen" ++ "cmd/compile/internal/types" ++ "cmd/internal/obj" ++ "cmd/internal/obj/loong64" ++) ++ ++// isFPreg reports whether r is an FP register ++func isFPreg(r int16) bool { ++ return loong64.REG_F0 <= r && r <= loong64.REG_F31 ++} ++ ++// loadByType returns the load instruction of the given type. ++func loadByType(t *types.Type, r int16) obj.As { ++ if isFPreg(r) { ++ if t.Size() == 4 { ++ return loong64.AMOVF ++ } else { ++ return loong64.AMOVD ++ } ++ } else { ++ switch t.Size() { ++ case 1: ++ if t.IsSigned() { ++ return loong64.AMOVB ++ } else { ++ return loong64.AMOVBU ++ } ++ case 2: ++ if t.IsSigned() { ++ return loong64.AMOVH ++ } else { ++ return loong64.AMOVHU ++ } ++ case 4: ++ if t.IsSigned() { ++ return loong64.AMOVW ++ } else { ++ return loong64.AMOVWU ++ } ++ case 8: ++ return loong64.AMOVV ++ } ++ } ++ panic("bad load type") ++} ++ ++// storeByType returns the store instruction of the given type. ++func storeByType(t *types.Type, r int16) obj.As { ++ if isFPreg(r) { ++ if t.Size() == 4 { ++ return loong64.AMOVF ++ } else { ++ return loong64.AMOVD ++ } ++ } else { ++ switch t.Size() { ++ case 1: ++ return loong64.AMOVB ++ case 2: ++ return loong64.AMOVH ++ case 4: ++ return loong64.AMOVW ++ case 8: ++ return loong64.AMOVV ++ } ++ } ++ panic("bad store type") ++} ++ ++func ssaGenValue(s *ssagen.State, v *ssa.Value) { ++ switch v.Op { ++ case ssa.OpCopy, ssa.OpLOONG64MOVVreg: ++ if v.Type.IsMemory() { ++ return ++ } ++ x := v.Args[0].Reg() ++ y := v.Reg() ++ if x == y { ++ return ++ } ++ as := loong64.AMOVV ++ if isFPreg(x) && isFPreg(y) { ++ as = loong64.AMOVD ++ } ++ p := s.Prog(as) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = x ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = y ++ case ssa.OpLOONG64MOVVnop: ++ if v.Reg() != v.Args[0].Reg() { ++ v.Fatalf("input[0] and output not in same register %s", v.LongString()) ++ } ++ // nothing to do ++ case ssa.OpLoadReg: ++ if v.Type.IsFlags() { ++ v.Fatalf("load flags not implemented: %v", v.LongString()) ++ return ++ } ++ r := v.Reg() ++ p := s.Prog(loadByType(v.Type, r)) ++ ssagen.AddrAuto(&p.From, v.Args[0]) ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = r ++ case ssa.OpStoreReg: ++ if v.Type.IsFlags() { ++ v.Fatalf("store flags not implemented: %v", v.LongString()) ++ return ++ } ++ r := v.Args[0].Reg() ++ p := s.Prog(storeByType(v.Type, r)) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = r ++ ssagen.AddrAuto(&p.To, v) ++ case ssa.OpLOONG64ADDV, ++ ssa.OpLOONG64SUBV, ++ ssa.OpLOONG64AND, ++ ssa.OpLOONG64OR, ++ ssa.OpLOONG64XOR, ++ ssa.OpLOONG64NOR, ++ ssa.OpLOONG64SLLV, ++ ssa.OpLOONG64SRLV, ++ ssa.OpLOONG64SRAV, ++ ssa.OpLOONG64ADDF, ++ ssa.OpLOONG64ADDD, ++ ssa.OpLOONG64SUBF, ++ ssa.OpLOONG64SUBD, ++ ssa.OpLOONG64MULF, ++ ssa.OpLOONG64MULD, ++ ssa.OpLOONG64DIVF, ++ ssa.OpLOONG64DIVD: ++ p := s.Prog(v.Op.Asm()) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[1].Reg() ++ p.Reg = v.Args[0].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg() ++ case ssa.OpLOONG64SGT, ++ ssa.OpLOONG64SGTU: ++ p := s.Prog(v.Op.Asm()) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[0].Reg() ++ p.Reg = v.Args[1].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg() ++ case ssa.OpLOONG64ADDVconst, ++ ssa.OpLOONG64SUBVconst, ++ ssa.OpLOONG64ANDconst, ++ ssa.OpLOONG64ORconst, ++ ssa.OpLOONG64XORconst, ++ ssa.OpLOONG64NORconst, ++ ssa.OpLOONG64SLLVconst, ++ ssa.OpLOONG64SRLVconst, ++ ssa.OpLOONG64SRAVconst, ++ ssa.OpLOONG64SGTconst, ++ ssa.OpLOONG64SGTUconst: ++ p := s.Prog(v.Op.Asm()) ++ p.From.Type = obj.TYPE_CONST ++ p.From.Offset = v.AuxInt ++ p.Reg = v.Args[0].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg() ++ case ssa.OpLOONG64MULV: ++ p := s.Prog(loong64.AMULV) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[1].Reg() ++ p.Reg = v.Args[0].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg1() ++ p1 := s.Prog(loong64.AMULHV) ++ p1.From.Type = obj.TYPE_REG ++ p1.From.Reg = v.Args[1].Reg() ++ p1.Reg = v.Args[0].Reg() ++ p1.To.Type = obj.TYPE_REG ++ p1.To.Reg = v.Reg0() ++ case ssa.OpLOONG64MULVU: ++ p := s.Prog(loong64.AMULV) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[1].Reg() ++ p.Reg = v.Args[0].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg1() ++ p1 := s.Prog(loong64.AMULHVU) ++ p1.From.Type = obj.TYPE_REG ++ p1.From.Reg = v.Args[1].Reg() ++ p1.Reg = v.Args[0].Reg() ++ p1.To.Type = obj.TYPE_REG ++ p1.To.Reg = v.Reg0() ++ case ssa.OpLOONG64DIVV: ++ p := s.Prog(loong64.ADIVV) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[1].Reg() ++ p.Reg = v.Args[0].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg1() ++ p1 := s.Prog(loong64.AREMV) ++ p1.From.Type = obj.TYPE_REG ++ p1.From.Reg = v.Args[1].Reg() ++ p1.Reg = v.Args[0].Reg() ++ p1.To.Type = obj.TYPE_REG ++ p1.To.Reg = v.Reg0() ++ case ssa.OpLOONG64DIVVU: ++ p := s.Prog(loong64.ADIVVU) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[1].Reg() ++ p.Reg = v.Args[0].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg1() ++ p1 := s.Prog(loong64.AREMVU) ++ p1.From.Type = obj.TYPE_REG ++ p1.From.Reg = v.Args[1].Reg() ++ p1.Reg = v.Args[0].Reg() ++ p1.To.Type = obj.TYPE_REG ++ p1.To.Reg = v.Reg0() ++ case ssa.OpLOONG64MOVVconst: ++ r := v.Reg() ++ p := s.Prog(v.Op.Asm()) ++ p.From.Type = obj.TYPE_CONST ++ p.From.Offset = v.AuxInt ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = r ++ if isFPreg(r) { ++ // cannot move into FP or special registers, use TMP as intermediate ++ p.To.Reg = loong64.REGTMP ++ p = s.Prog(loong64.AMOVV) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = loong64.REGTMP ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = r ++ } ++ case ssa.OpLOONG64MOVFconst, ++ ssa.OpLOONG64MOVDconst: ++ p := s.Prog(v.Op.Asm()) ++ p.From.Type = obj.TYPE_FCONST ++ p.From.Val = math.Float64frombits(uint64(v.AuxInt)) ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg() ++ case ssa.OpLOONG64CMPEQF, ++ ssa.OpLOONG64CMPEQD, ++ ssa.OpLOONG64CMPGEF, ++ ssa.OpLOONG64CMPGED, ++ ssa.OpLOONG64CMPGTF, ++ ssa.OpLOONG64CMPGTD: ++ p := s.Prog(v.Op.Asm()) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[0].Reg() ++ p.Reg = v.Args[1].Reg() ++ case ssa.OpLOONG64MOVVaddr: ++ p := s.Prog(loong64.AMOVV) ++ p.From.Type = obj.TYPE_ADDR ++ p.From.Reg = v.Args[0].Reg() ++ var wantreg string ++ // MOVV $sym+off(base), R ++ // the assembler expands it as the following: ++ // - base is SP: add constant offset to SP (R3) ++ // when constant is large, tmp register (R30) may be used ++ // - base is SB: load external address with relocation ++ switch v.Aux.(type) { ++ default: ++ v.Fatalf("aux is of unknown type %T", v.Aux) ++ case *obj.LSym: ++ wantreg = "SB" ++ ssagen.AddAux(&p.From, v) ++ case *ir.Name: ++ wantreg = "SP" ++ ssagen.AddAux(&p.From, v) ++ case nil: ++ // No sym, just MOVV $off(SP), R ++ wantreg = "SP" ++ p.From.Offset = v.AuxInt ++ } ++ if reg := v.Args[0].RegName(); reg != wantreg { ++ v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg) ++ } ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg() ++ case ssa.OpLOONG64MOVBload, ++ ssa.OpLOONG64MOVBUload, ++ ssa.OpLOONG64MOVHload, ++ ssa.OpLOONG64MOVHUload, ++ ssa.OpLOONG64MOVWload, ++ ssa.OpLOONG64MOVWUload, ++ ssa.OpLOONG64MOVVload, ++ ssa.OpLOONG64MOVFload, ++ ssa.OpLOONG64MOVDload: ++ p := s.Prog(v.Op.Asm()) ++ p.From.Type = obj.TYPE_MEM ++ p.From.Reg = v.Args[0].Reg() ++ ssagen.AddAux(&p.From, v) ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg() ++ case ssa.OpLOONG64MOVBstore, ++ ssa.OpLOONG64MOVHstore, ++ ssa.OpLOONG64MOVWstore, ++ ssa.OpLOONG64MOVVstore, ++ ssa.OpLOONG64MOVFstore, ++ ssa.OpLOONG64MOVDstore: ++ p := s.Prog(v.Op.Asm()) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[1].Reg() ++ p.To.Type = obj.TYPE_MEM ++ p.To.Reg = v.Args[0].Reg() ++ ssagen.AddAux(&p.To, v) ++ case ssa.OpLOONG64MOVBstorezero, ++ ssa.OpLOONG64MOVHstorezero, ++ ssa.OpLOONG64MOVWstorezero, ++ ssa.OpLOONG64MOVVstorezero: ++ p := s.Prog(v.Op.Asm()) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = loong64.REGZERO ++ p.To.Type = obj.TYPE_MEM ++ p.To.Reg = v.Args[0].Reg() ++ ssagen.AddAux(&p.To, v) ++ case ssa.OpLOONG64MOVBreg, ++ ssa.OpLOONG64MOVBUreg, ++ ssa.OpLOONG64MOVHreg, ++ ssa.OpLOONG64MOVHUreg, ++ ssa.OpLOONG64MOVWreg, ++ ssa.OpLOONG64MOVWUreg: ++ a := v.Args[0] ++ for a.Op == ssa.OpCopy || a.Op == ssa.OpLOONG64MOVVreg { ++ a = a.Args[0] ++ } ++ if a.Op == ssa.OpLoadReg && loong64.REG_R0 <= a.Reg() && a.Reg() <= loong64.REG_R31 { ++ // LoadReg from a narrower type does an extension, except loading ++ // to a floating point register. So only eliminate the extension ++ // if it is loaded to an integer register. ++ ++ t := a.Type ++ switch { ++ case v.Op == ssa.OpLOONG64MOVBreg && t.Size() == 1 && t.IsSigned(), ++ v.Op == ssa.OpLOONG64MOVBUreg && t.Size() == 1 && !t.IsSigned(), ++ v.Op == ssa.OpLOONG64MOVHreg && t.Size() == 2 && t.IsSigned(), ++ v.Op == ssa.OpLOONG64MOVHUreg && t.Size() == 2 && !t.IsSigned(), ++ v.Op == ssa.OpLOONG64MOVWreg && t.Size() == 4 && t.IsSigned(), ++ v.Op == ssa.OpLOONG64MOVWUreg && t.Size() == 4 && !t.IsSigned(): ++ // arg is a proper-typed load, already zero/sign-extended, don't extend again ++ if v.Reg() == v.Args[0].Reg() { ++ return ++ } ++ p := s.Prog(loong64.AMOVV) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[0].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg() ++ return ++ default: ++ } ++ } ++ fallthrough ++ case ssa.OpLOONG64MOVWF, ++ ssa.OpLOONG64MOVWD, ++ ssa.OpLOONG64TRUNCFW, ++ ssa.OpLOONG64TRUNCDW, ++ ssa.OpLOONG64MOVVF, ++ ssa.OpLOONG64MOVVD, ++ ssa.OpLOONG64TRUNCFV, ++ ssa.OpLOONG64TRUNCDV, ++ ssa.OpLOONG64MOVFD, ++ ssa.OpLOONG64MOVDF, ++ ssa.OpLOONG64NEGF, ++ ssa.OpLOONG64NEGD, ++ ssa.OpLOONG64SQRTD, ++ ssa.OpLOONG64SQRTF: ++ p := s.Prog(v.Op.Asm()) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[0].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg() ++ case ssa.OpLOONG64NEGV: ++ // SUB from REGZERO ++ p := s.Prog(loong64.ASUBVU) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[0].Reg() ++ p.Reg = loong64.REGZERO ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg() ++ case ssa.OpLOONG64DUFFZERO: ++ // runtime.duffzero expects start address - 8 in R19 ++ p := s.Prog(loong64.ASUBVU) ++ p.From.Type = obj.TYPE_CONST ++ p.From.Offset = 8 ++ p.Reg = v.Args[0].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = loong64.REG_R19 ++ p = s.Prog(obj.ADUFFZERO) ++ p.To.Type = obj.TYPE_MEM ++ p.To.Name = obj.NAME_EXTERN ++ p.To.Sym = ir.Syms.Duffzero ++ p.To.Offset = v.AuxInt ++ case ssa.OpLOONG64LoweredZero: ++ // SUBV $8, R19 ++ // MOVV R0, 8(R19) ++ // ADDV $8, R19 ++ // BNE Rarg1, R19, -2(PC) ++ // arg1 is the address of the last element to zero ++ var sz int64 ++ var mov obj.As ++ switch { ++ case v.AuxInt%8 == 0: ++ sz = 8 ++ mov = loong64.AMOVV ++ case v.AuxInt%4 == 0: ++ sz = 4 ++ mov = loong64.AMOVW ++ case v.AuxInt%2 == 0: ++ sz = 2 ++ mov = loong64.AMOVH ++ default: ++ sz = 1 ++ mov = loong64.AMOVB ++ } ++ p := s.Prog(loong64.ASUBVU) ++ p.From.Type = obj.TYPE_CONST ++ p.From.Offset = sz ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = loong64.REG_R19 ++ p2 := s.Prog(mov) ++ p2.From.Type = obj.TYPE_REG ++ p2.From.Reg = loong64.REGZERO ++ p2.To.Type = obj.TYPE_MEM ++ p2.To.Reg = loong64.REG_R19 ++ p2.To.Offset = sz ++ p3 := s.Prog(loong64.AADDVU) ++ p3.From.Type = obj.TYPE_CONST ++ p3.From.Offset = sz ++ p3.To.Type = obj.TYPE_REG ++ p3.To.Reg = loong64.REG_R19 ++ p4 := s.Prog(loong64.ABNE) ++ p4.From.Type = obj.TYPE_REG ++ p4.From.Reg = v.Args[1].Reg() ++ p4.Reg = loong64.REG_R19 ++ p4.To.Type = obj.TYPE_BRANCH ++ p4.To.SetTarget(p2) ++ case ssa.OpLOONG64DUFFCOPY: ++ p := s.Prog(obj.ADUFFCOPY) ++ p.To.Type = obj.TYPE_MEM ++ p.To.Name = obj.NAME_EXTERN ++ p.To.Sym = ir.Syms.Duffcopy ++ p.To.Offset = v.AuxInt ++ case ssa.OpLOONG64LoweredMove: ++ // SUBV $8, R19 ++ // MOVV 8(R19), Rtmp ++ // MOVV Rtmp, (R4) ++ // ADDV $8, R19 ++ // ADDV $8, R4 ++ // BNE Rarg2, R19, -4(PC) ++ // arg2 is the address of the last element of src ++ var sz int64 ++ var mov obj.As ++ switch { ++ case v.AuxInt%8 == 0: ++ sz = 8 ++ mov = loong64.AMOVV ++ case v.AuxInt%4 == 0: ++ sz = 4 ++ mov = loong64.AMOVW ++ case v.AuxInt%2 == 0: ++ sz = 2 ++ mov = loong64.AMOVH ++ default: ++ sz = 1 ++ mov = loong64.AMOVB ++ } ++ p := s.Prog(loong64.ASUBVU) ++ p.From.Type = obj.TYPE_CONST ++ p.From.Offset = sz ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = loong64.REG_R19 ++ p2 := s.Prog(mov) ++ p2.From.Type = obj.TYPE_MEM ++ p2.From.Reg = loong64.REG_R19 ++ p2.From.Offset = sz ++ p2.To.Type = obj.TYPE_REG ++ p2.To.Reg = loong64.REGTMP ++ p3 := s.Prog(mov) ++ p3.From.Type = obj.TYPE_REG ++ p3.From.Reg = loong64.REGTMP ++ p3.To.Type = obj.TYPE_MEM ++ p3.To.Reg = loong64.REG_R4 ++ p4 := s.Prog(loong64.AADDVU) ++ p4.From.Type = obj.TYPE_CONST ++ p4.From.Offset = sz ++ p4.To.Type = obj.TYPE_REG ++ p4.To.Reg = loong64.REG_R19 ++ p5 := s.Prog(loong64.AADDVU) ++ p5.From.Type = obj.TYPE_CONST ++ p5.From.Offset = sz ++ p5.To.Type = obj.TYPE_REG ++ p5.To.Reg = loong64.REG_R4 ++ p6 := s.Prog(loong64.ABNE) ++ p6.From.Type = obj.TYPE_REG ++ p6.From.Reg = v.Args[2].Reg() ++ p6.Reg = loong64.REG_R19 ++ p6.To.Type = obj.TYPE_BRANCH ++ p6.To.SetTarget(p2) ++ case ssa.OpLOONG64CALLstatic, ssa.OpLOONG64CALLclosure, ssa.OpLOONG64CALLinter: ++ s.Call(v) ++ case ssa.OpLOONG64LoweredWB: ++ p := s.Prog(obj.ACALL) ++ p.To.Type = obj.TYPE_MEM ++ p.To.Name = obj.NAME_EXTERN ++ p.To.Sym = v.Aux.(*obj.LSym) ++ case ssa.OpLOONG64LoweredPanicBoundsA, ssa.OpLOONG64LoweredPanicBoundsB, ssa.OpLOONG64LoweredPanicBoundsC: ++ p := s.Prog(obj.ACALL) ++ p.To.Type = obj.TYPE_MEM ++ p.To.Name = obj.NAME_EXTERN ++ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt] ++ s.UseArgs(16) // space used in callee args area by assembly stubs ++ case ssa.OpLOONG64LoweredAtomicLoad8, ssa.OpLOONG64LoweredAtomicLoad32, ssa.OpLOONG64LoweredAtomicLoad64: ++ as := loong64.AMOVV ++ switch v.Op { ++ case ssa.OpLOONG64LoweredAtomicLoad8: ++ as = loong64.AMOVB ++ case ssa.OpLOONG64LoweredAtomicLoad32: ++ as = loong64.AMOVW ++ } ++ s.Prog(loong64.ADBAR) ++ p := s.Prog(as) ++ p.From.Type = obj.TYPE_MEM ++ p.From.Reg = v.Args[0].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg0() ++ s.Prog(loong64.ADBAR) ++ case ssa.OpLOONG64LoweredAtomicStore8, ssa.OpLOONG64LoweredAtomicStore32, ssa.OpLOONG64LoweredAtomicStore64: ++ as := loong64.AMOVV ++ switch v.Op { ++ case ssa.OpLOONG64LoweredAtomicStore8: ++ as = loong64.AMOVB ++ case ssa.OpLOONG64LoweredAtomicStore32: ++ as = loong64.AMOVW ++ } ++ s.Prog(loong64.ADBAR) ++ p := s.Prog(as) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[1].Reg() ++ p.To.Type = obj.TYPE_MEM ++ p.To.Reg = v.Args[0].Reg() ++ s.Prog(loong64.ADBAR) ++ case ssa.OpLOONG64LoweredAtomicStorezero32, ssa.OpLOONG64LoweredAtomicStorezero64: ++ as := loong64.AMOVV ++ if v.Op == ssa.OpLOONG64LoweredAtomicStorezero32 { ++ as = loong64.AMOVW ++ } ++ s.Prog(loong64.ADBAR) ++ p := s.Prog(as) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = loong64.REGZERO ++ p.To.Type = obj.TYPE_MEM ++ p.To.Reg = v.Args[0].Reg() ++ s.Prog(loong64.ADBAR) ++ case ssa.OpLOONG64LoweredAtomicExchange32, ssa.OpLOONG64LoweredAtomicExchange64: ++ // DBAR ++ // MOVV Rarg1, Rtmp ++ // LL (Rarg0), Rout ++ // SC Rtmp, (Rarg0) ++ // BEQ Rtmp, -3(PC) ++ // DBAR ++ ll := loong64.ALLV ++ sc := loong64.ASCV ++ if v.Op == ssa.OpLOONG64LoweredAtomicExchange32 { ++ ll = loong64.ALL ++ sc = loong64.ASC ++ } ++ s.Prog(loong64.ADBAR) ++ p := s.Prog(loong64.AMOVV) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = v.Args[1].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = loong64.REGTMP ++ p1 := s.Prog(ll) ++ p1.From.Type = obj.TYPE_MEM ++ p1.From.Reg = v.Args[0].Reg() ++ p1.To.Type = obj.TYPE_REG ++ p1.To.Reg = v.Reg0() ++ p2 := s.Prog(sc) ++ p2.From.Type = obj.TYPE_REG ++ p2.From.Reg = loong64.REGTMP ++ p2.To.Type = obj.TYPE_MEM ++ p2.To.Reg = v.Args[0].Reg() ++ p3 := s.Prog(loong64.ABEQ) ++ p3.From.Type = obj.TYPE_REG ++ p3.From.Reg = loong64.REGTMP ++ p3.To.Type = obj.TYPE_BRANCH ++ p3.To.SetTarget(p) ++ s.Prog(loong64.ADBAR) ++ case ssa.OpLOONG64LoweredAtomicAdd32, ssa.OpLOONG64LoweredAtomicAdd64: ++ // DBAR ++ // LL (Rarg0), Rout ++ // ADDV Rarg1, Rout, Rtmp ++ // SC Rtmp, (Rarg0) ++ // BEQ Rtmp, -3(PC) ++ // DBAR ++ // ADDV Rarg1, Rout ++ ll := loong64.ALLV ++ sc := loong64.ASCV ++ if v.Op == ssa.OpLOONG64LoweredAtomicAdd32 { ++ ll = loong64.ALL ++ sc = loong64.ASC ++ } ++ s.Prog(loong64.ADBAR) ++ p := s.Prog(ll) ++ p.From.Type = obj.TYPE_MEM ++ p.From.Reg = v.Args[0].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg0() ++ p1 := s.Prog(loong64.AADDVU) ++ p1.From.Type = obj.TYPE_REG ++ p1.From.Reg = v.Args[1].Reg() ++ p1.Reg = v.Reg0() ++ p1.To.Type = obj.TYPE_REG ++ p1.To.Reg = loong64.REGTMP ++ p2 := s.Prog(sc) ++ p2.From.Type = obj.TYPE_REG ++ p2.From.Reg = loong64.REGTMP ++ p2.To.Type = obj.TYPE_MEM ++ p2.To.Reg = v.Args[0].Reg() ++ p3 := s.Prog(loong64.ABEQ) ++ p3.From.Type = obj.TYPE_REG ++ p3.From.Reg = loong64.REGTMP ++ p3.To.Type = obj.TYPE_BRANCH ++ p3.To.SetTarget(p) ++ s.Prog(loong64.ADBAR) ++ p4 := s.Prog(loong64.AADDVU) ++ p4.From.Type = obj.TYPE_REG ++ p4.From.Reg = v.Args[1].Reg() ++ p4.Reg = v.Reg0() ++ p4.To.Type = obj.TYPE_REG ++ p4.To.Reg = v.Reg0() ++ case ssa.OpLOONG64LoweredAtomicAddconst32, ssa.OpLOONG64LoweredAtomicAddconst64: ++ // DBAR ++ // LL (Rarg0), Rout ++ // ADDV $auxint, Rout, Rtmp ++ // SC Rtmp, (Rarg0) ++ // BEQ Rtmp, -3(PC) ++ // DBAR ++ // ADDV $auxint, Rout ++ ll := loong64.ALLV ++ sc := loong64.ASCV ++ if v.Op == ssa.OpLOONG64LoweredAtomicAddconst32 { ++ ll = loong64.ALL ++ sc = loong64.ASC ++ } ++ s.Prog(loong64.ADBAR) ++ p := s.Prog(ll) ++ p.From.Type = obj.TYPE_MEM ++ p.From.Reg = v.Args[0].Reg() ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg0() ++ p1 := s.Prog(loong64.AADDVU) ++ p1.From.Type = obj.TYPE_CONST ++ p1.From.Offset = v.AuxInt ++ p1.Reg = v.Reg0() ++ p1.To.Type = obj.TYPE_REG ++ p1.To.Reg = loong64.REGTMP ++ p2 := s.Prog(sc) ++ p2.From.Type = obj.TYPE_REG ++ p2.From.Reg = loong64.REGTMP ++ p2.To.Type = obj.TYPE_MEM ++ p2.To.Reg = v.Args[0].Reg() ++ p3 := s.Prog(loong64.ABEQ) ++ p3.From.Type = obj.TYPE_REG ++ p3.From.Reg = loong64.REGTMP ++ p3.To.Type = obj.TYPE_BRANCH ++ p3.To.SetTarget(p) ++ s.Prog(loong64.ADBAR) ++ p4 := s.Prog(loong64.AADDVU) ++ p4.From.Type = obj.TYPE_CONST ++ p4.From.Offset = v.AuxInt ++ p4.Reg = v.Reg0() ++ p4.To.Type = obj.TYPE_REG ++ p4.To.Reg = v.Reg0() ++ case ssa.OpLOONG64LoweredAtomicCas32, ssa.OpLOONG64LoweredAtomicCas64: ++ // MOVV $0, Rout ++ // DBAR ++ // LL (Rarg0), Rtmp ++ // BNE Rtmp, Rarg1, 4(PC) ++ // MOVV Rarg2, Rout ++ // SC Rout, (Rarg0) ++ // BEQ Rout, -4(PC) ++ // DBAR ++ ll := loong64.ALLV ++ sc := loong64.ASCV ++ if v.Op == ssa.OpLOONG64LoweredAtomicCas32 { ++ ll = loong64.ALL ++ sc = loong64.ASC ++ } ++ p := s.Prog(loong64.AMOVV) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = loong64.REGZERO ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg0() ++ s.Prog(loong64.ADBAR) ++ p1 := s.Prog(ll) ++ p1.From.Type = obj.TYPE_MEM ++ p1.From.Reg = v.Args[0].Reg() ++ p1.To.Type = obj.TYPE_REG ++ p1.To.Reg = loong64.REGTMP ++ p2 := s.Prog(loong64.ABNE) ++ p2.From.Type = obj.TYPE_REG ++ p2.From.Reg = v.Args[1].Reg() ++ p2.Reg = loong64.REGTMP ++ p2.To.Type = obj.TYPE_BRANCH ++ p3 := s.Prog(loong64.AMOVV) ++ p3.From.Type = obj.TYPE_REG ++ p3.From.Reg = v.Args[2].Reg() ++ p3.To.Type = obj.TYPE_REG ++ p3.To.Reg = v.Reg0() ++ p4 := s.Prog(sc) ++ p4.From.Type = obj.TYPE_REG ++ p4.From.Reg = v.Reg0() ++ p4.To.Type = obj.TYPE_MEM ++ p4.To.Reg = v.Args[0].Reg() ++ p5 := s.Prog(loong64.ABEQ) ++ p5.From.Type = obj.TYPE_REG ++ p5.From.Reg = v.Reg0() ++ p5.To.Type = obj.TYPE_BRANCH ++ p5.To.SetTarget(p1) ++ p6 := s.Prog(loong64.ADBAR) ++ p2.To.SetTarget(p6) ++ case ssa.OpLOONG64LoweredNilCheck: ++ // Issue a load which will fault if arg is nil. ++ p := s.Prog(loong64.AMOVB) ++ p.From.Type = obj.TYPE_MEM ++ p.From.Reg = v.Args[0].Reg() ++ ssagen.AddAux(&p.From, v) ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = loong64.REGTMP ++ if logopt.Enabled() { ++ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) ++ } ++ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers ++ base.WarnfAt(v.Pos, "generated nil check") ++ } ++ case ssa.OpLOONG64FPFlagTrue, ++ ssa.OpLOONG64FPFlagFalse: ++ // MOVV $0, r ++ // BFPF 2(PC) ++ // MOVV $1, r ++ branch := loong64.ABFPF ++ if v.Op == ssa.OpLOONG64FPFlagFalse { ++ branch = loong64.ABFPT ++ } ++ p := s.Prog(loong64.AMOVV) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = loong64.REGZERO ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg() ++ p2 := s.Prog(branch) ++ p2.To.Type = obj.TYPE_BRANCH ++ p3 := s.Prog(loong64.AMOVV) ++ p3.From.Type = obj.TYPE_CONST ++ p3.From.Offset = 1 ++ p3.To.Type = obj.TYPE_REG ++ p3.To.Reg = v.Reg() ++ p4 := s.Prog(obj.ANOP) // not a machine instruction, for branch to land ++ p2.To.SetTarget(p4) ++ case ssa.OpLOONG64LoweredGetClosurePtr: ++ // Closure pointer is R22 (loong64.REGCTXT). ++ ssagen.CheckLoweredGetClosurePtr(v) ++ case ssa.OpLOONG64LoweredGetCallerSP: ++ // caller's SP is FixedFrameSize below the address of the first arg ++ p := s.Prog(loong64.AMOVV) ++ p.From.Type = obj.TYPE_ADDR ++ p.From.Offset = -base.Ctxt.FixedFrameSize() ++ p.From.Name = obj.NAME_PARAM ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg() ++ case ssa.OpLOONG64LoweredGetCallerPC: ++ p := s.Prog(obj.AGETCALLERPC) ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = v.Reg() ++ case ssa.OpClobber, ssa.OpClobberReg: ++ // TODO: implement for clobberdead experiment. Nop is ok for now. ++ default: ++ v.Fatalf("genValue not implemented: %s", v.LongString()) ++ } ++} ++ ++var blockJump = map[ssa.BlockKind]struct { ++ asm, invasm obj.As ++}{ ++ ssa.BlockLOONG64EQ: {loong64.ABEQ, loong64.ABNE}, ++ ssa.BlockLOONG64NE: {loong64.ABNE, loong64.ABEQ}, ++ ssa.BlockLOONG64LTZ: {loong64.ABLTZ, loong64.ABGEZ}, ++ ssa.BlockLOONG64GEZ: {loong64.ABGEZ, loong64.ABLTZ}, ++ ssa.BlockLOONG64LEZ: {loong64.ABLEZ, loong64.ABGTZ}, ++ ssa.BlockLOONG64GTZ: {loong64.ABGTZ, loong64.ABLEZ}, ++ ssa.BlockLOONG64FPT: {loong64.ABFPT, loong64.ABFPF}, ++ ssa.BlockLOONG64FPF: {loong64.ABFPF, loong64.ABFPT}, ++} ++ ++func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { ++ switch b.Kind { ++ case ssa.BlockPlain: ++ if b.Succs[0].Block() != next { ++ p := s.Prog(obj.AJMP) ++ p.To.Type = obj.TYPE_BRANCH ++ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) ++ } ++ case ssa.BlockDefer: ++ // defer returns in R19: ++ // 0 if we should continue executing ++ // 1 if we should jump to deferreturn call ++ p := s.Prog(loong64.ABNE) ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = loong64.REGZERO ++ p.Reg = loong64.REG_R19 ++ p.To.Type = obj.TYPE_BRANCH ++ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()}) ++ if b.Succs[0].Block() != next { ++ p := s.Prog(obj.AJMP) ++ p.To.Type = obj.TYPE_BRANCH ++ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) ++ } ++ case ssa.BlockExit: ++ case ssa.BlockRet: ++ s.Prog(obj.ARET) ++ case ssa.BlockRetJmp: ++ p := s.Prog(obj.ARET) ++ p.To.Type = obj.TYPE_MEM ++ p.To.Name = obj.NAME_EXTERN ++ p.To.Sym = b.Aux.(*obj.LSym) ++ case ssa.BlockLOONG64EQ, ssa.BlockLOONG64NE, ++ ssa.BlockLOONG64LTZ, ssa.BlockLOONG64GEZ, ++ ssa.BlockLOONG64LEZ, ssa.BlockLOONG64GTZ, ++ ssa.BlockLOONG64FPT, ssa.BlockLOONG64FPF: ++ jmp := blockJump[b.Kind] ++ var p *obj.Prog ++ switch next { ++ case b.Succs[0].Block(): ++ p = s.Br(jmp.invasm, b.Succs[1].Block()) ++ case b.Succs[1].Block(): ++ p = s.Br(jmp.asm, b.Succs[0].Block()) ++ default: ++ if b.Likely != ssa.BranchUnlikely { ++ p = s.Br(jmp.asm, b.Succs[0].Block()) ++ s.Br(obj.AJMP, b.Succs[1].Block()) ++ } else { ++ p = s.Br(jmp.invasm, b.Succs[1].Block()) ++ s.Br(obj.AJMP, b.Succs[0].Block()) ++ } ++ } ++ if !b.Controls[0].Type.IsFlags() { ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = b.Controls[0].Reg() ++ } ++ default: ++ b.Fatalf("branch not implemented: %s", b.LongString()) ++ } ++} +-- +2.27.0 + diff --git a/0007-cmd-compile-internal-ssa-config-lower-pass-function-.patch b/0007-cmd-compile-internal-ssa-config-lower-pass-function-.patch new file mode 100644 index 0000000..c381506 --- /dev/null +++ b/0007-cmd-compile-internal-ssa-config-lower-pass-function-.patch @@ -0,0 +1,36 @@ +From efdd10c5e18eb1cfe3c0f7fc26e0ad46e7568fa2 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 24 Nov 2021 17:48:09 +0800 +Subject: [PATCH 07/56] cmd/compile/internal/ssa: config lower pass function + and register on loong64 + +Change-Id: I50d20eb22f2108d245513de8ac95ebe0b7e1a1dc +--- + src/cmd/compile/internal/ssa/config.go | 11 +++++++++++ + 1 file changed, 11 insertions(+) + +diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go +index a8393a1..c18a723 100644 +--- a/src/cmd/compile/internal/ssa/config.go ++++ b/src/cmd/compile/internal/ssa/config.go +@@ -262,6 +262,17 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config + c.FPReg = framepointerRegMIPS64 + c.LinkReg = linkRegMIPS64 + c.hasGReg = true ++ case "loong64": ++ c.PtrSize = 8 ++ c.RegSize = 8 ++ c.lowerBlock = rewriteBlockLOONG64 ++ c.lowerValue = rewriteValueLOONG64 ++ c.registers = registersLOONG64[:] ++ c.gpRegMask = gpRegMaskLOONG64 ++ c.fpRegMask = fpRegMaskLOONG64 ++ c.FPReg = framepointerRegLOONG64 ++ c.LinkReg = linkRegLOONG64 ++ c.hasGReg = true + case "s390x": + c.PtrSize = 8 + c.RegSize = 8 +-- +2.27.0 + diff --git a/0008-cmd-compile-internal-ssa-increase-the-bit-width-of-B.patch b/0008-cmd-compile-internal-ssa-increase-the-bit-width-of-B.patch new file mode 100644 index 0000000..c0dcb4e --- /dev/null +++ b/0008-cmd-compile-internal-ssa-increase-the-bit-width-of-B.patch @@ -0,0 +1,27 @@ +From 2c285601f33ce5abd952dff0bab0759e57c0c9bb Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 24 Nov 2021 17:53:07 +0800 +Subject: [PATCH 08/56] cmd/compile/internal/ssa: increase the bit width of + BlockKind type + +Change-Id: I246d9c22334d0ea9e1440d29df05c9ec2d472b30 +--- + src/cmd/compile/internal/ssa/block.go | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/cmd/compile/internal/ssa/block.go b/src/cmd/compile/internal/ssa/block.go +index 71ca774..2e93304 100644 +--- a/src/cmd/compile/internal/ssa/block.go ++++ b/src/cmd/compile/internal/ssa/block.go +@@ -111,7 +111,7 @@ func (e Edge) String() string { + // Plain [] [next] + // If [boolean Value] [then, else] + // Defer [mem] [nopanic, panic] (control opcode should be OpStaticCall to runtime.deferproc) +-type BlockKind int8 ++type BlockKind int16 + + // short form print + func (b *Block) String() string { +-- +2.27.0 + diff --git a/0009-cmd-compile-internal-ssa-gen-define-rules-and-operat.patch b/0009-cmd-compile-internal-ssa-gen-define-rules-and-operat.patch new file mode 100644 index 0000000..95f8086 --- /dev/null +++ b/0009-cmd-compile-internal-ssa-gen-define-rules-and-operat.patch @@ -0,0 +1,12601 @@ +From ab835f6a6247e8a75c1c0d72e303d47ce2abb125 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 24 Nov 2021 17:31:18 +0800 +Subject: [PATCH 09/56] cmd/compile/internal/ssa{,/gen}: define rules and + operation on loong64 + +Change-Id: Ia362ed7ba5d84046697aadbc8d6d4cbe495f6076 +--- + .../compile/internal/ssa/gen/LOONG64.rules | 679 ++ + .../compile/internal/ssa/gen/LOONG64Ops.go | 524 ++ + src/cmd/compile/internal/ssa/opGen.go | 2584 +++++- + .../compile/internal/ssa/rewriteLOONG64.go | 7943 +++++++++++++++++ + 4 files changed, 11357 insertions(+), 373 deletions(-) + create mode 100644 src/cmd/compile/internal/ssa/gen/LOONG64.rules + create mode 100644 src/cmd/compile/internal/ssa/gen/LOONG64Ops.go + create mode 100644 src/cmd/compile/internal/ssa/rewriteLOONG64.go + +diff --git a/src/cmd/compile/internal/ssa/gen/LOONG64.rules b/src/cmd/compile/internal/ssa/gen/LOONG64.rules +new file mode 100644 +index 0000000..3fd4552 +--- /dev/null ++++ b/src/cmd/compile/internal/ssa/gen/LOONG64.rules +@@ -0,0 +1,679 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++(Add(Ptr|64|32|16|8) ...) => (ADDV ...) ++(Add(32|64)F ...) => (ADD(F|D) ...) ++ ++(Sub(Ptr|64|32|16|8) ...) => (SUBV ...) ++(Sub(32|64)F ...) => (SUB(F|D) ...) ++ ++(Mul(64|32|16|8) x y) => (Select1 (MULVU x y)) ++(Mul(32|64)F ...) => (MUL(F|D) ...) ++(Mul64uhilo ...) => (MULVU ...) ++(Select0 (Mul64uover x y)) => (Select1 (MULVU x y)) ++(Select1 (Mul64uover x y)) => (SGTU (Select0 (MULVU x y)) (MOVVconst [0])) ++ ++(Hmul64 x y) => (Select0 (MULV x y)) ++(Hmul64u x y) => (Select0 (MULVU x y)) ++(Hmul32 x y) => (SRAVconst (Select1 (MULV (SignExt32to64 x) (SignExt32to64 y))) [32]) ++(Hmul32u x y) => (SRLVconst (Select1 (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32]) ++ ++(Div64 x y) => (Select1 (DIVV x y)) ++(Div64u x y) => (Select1 (DIVVU x y)) ++(Div32 x y) => (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y))) ++(Div32u x y) => (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))) ++(Div16 x y) => (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y))) ++(Div16u x y) => (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))) ++(Div8 x y) => (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y))) ++(Div8u x y) => (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) ++(Div(32|64)F ...) => (DIV(F|D) ...) ++ ++(Mod64 x y) => (Select0 (DIVV x y)) ++(Mod64u x y) => (Select0 (DIVVU x y)) ++(Mod32 x y) => (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y))) ++(Mod32u x y) => (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))) ++(Mod16 x y) => (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y))) ++(Mod16u x y) => (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))) ++(Mod8 x y) => (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y))) ++(Mod8u x y) => (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) ++ ++// (x + y) / 2 with x>=y => (x - y) / 2 + y ++(Avg64u x y) => (ADDV (SRLVconst (SUBV x y) [1]) y) ++ ++(And(64|32|16|8) ...) => (AND ...) ++(Or(64|32|16|8) ...) => (OR ...) ++(Xor(64|32|16|8) ...) => (XOR ...) ++ ++// shifts ++// hardware instruction uses only the low 6 bits of the shift ++// we compare to 64 to ensure Go semantics for large shifts ++(Lsh64x64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) ++(Lsh64x32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) ++(Lsh64x16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) ++(Lsh64x8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) ++ ++(Lsh32x64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) ++(Lsh32x32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) ++(Lsh32x16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) ++(Lsh32x8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) ++ ++(Lsh16x64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) ++(Lsh16x32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) ++(Lsh16x16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) ++(Lsh16x8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) ++ ++(Lsh8x64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) ++(Lsh8x32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) ++(Lsh8x16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) ++(Lsh8x8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) ++ ++(Rsh64Ux64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV x y)) ++(Rsh64Ux32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV x (ZeroExt32to64 y))) ++(Rsh64Ux16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV x (ZeroExt16to64 y))) ++(Rsh64Ux8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV x (ZeroExt8to64 y))) ++ ++(Rsh32Ux64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt32to64 x) y)) ++(Rsh32Ux32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt32to64 y))) ++(Rsh32Ux16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt16to64 y))) ++(Rsh32Ux8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt8to64 y))) ++ ++(Rsh16Ux64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt16to64 x) y)) ++(Rsh16Ux32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt32to64 y))) ++(Rsh16Ux16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt16to64 y))) ++(Rsh16Ux8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt8to64 y))) ++ ++(Rsh8Ux64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt8to64 x) y)) ++(Rsh8Ux32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt32to64 y))) ++(Rsh8Ux16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt16to64 y))) ++(Rsh8Ux8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt8to64 y))) ++ ++(Rsh64x64 x y) => (SRAV x (OR (NEGV (SGTU y (MOVVconst [63]))) y)) ++(Rsh64x32 x y) => (SRAV x (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) ++(Rsh64x16 x y) => (SRAV x (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) ++(Rsh64x8 x y) => (SRAV x (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) ++ ++(Rsh32x64 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) ++(Rsh32x32 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) ++(Rsh32x16 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) ++(Rsh32x8 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) ++ ++(Rsh16x64 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) ++(Rsh16x32 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) ++(Rsh16x16 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) ++(Rsh16x8 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) ++ ++(Rsh8x64 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) ++(Rsh8x32 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) ++(Rsh8x16 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) ++(Rsh8x8 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) ++ ++// rotates ++(RotateLeft8 x (MOVVconst [c])) => (Or8 (Lsh8x64 x (MOVVconst [c&7])) (Rsh8Ux64 x (MOVVconst [-c&7]))) ++(RotateLeft16 x (MOVVconst [c])) => (Or16 (Lsh16x64 x (MOVVconst [c&15])) (Rsh16Ux64 x (MOVVconst [-c&15]))) ++(RotateLeft32 x (MOVVconst [c])) => (Or32 (Lsh32x64 x (MOVVconst [c&31])) (Rsh32Ux64 x (MOVVconst [-c&31]))) ++(RotateLeft64 x (MOVVconst [c])) => (Or64 (Lsh64x64 x (MOVVconst [c&63])) (Rsh64Ux64 x (MOVVconst [-c&63]))) ++ ++// unary ops ++(Neg(64|32|16|8) ...) => (NEGV ...) ++(Neg(32|64)F ...) => (NEG(F|D) ...) ++ ++(Com(64|32|16|8) x) => (NOR (MOVVconst [0]) x) ++ ++(Sqrt ...) => (SQRTD ...) ++(Sqrt32 ...) => (SQRTF ...) ++ ++// boolean ops -- booleans are represented with 0=false, 1=true ++(AndB ...) => (AND ...) ++(OrB ...) => (OR ...) ++(EqB x y) => (XOR (MOVVconst [1]) (XOR x y)) ++(NeqB ...) => (XOR ...) ++(Not x) => (XORconst [1] x) ++ ++// constants ++(Const(64|32|16|8) [val]) => (MOVVconst [int64(val)]) ++(Const(32|64)F [val]) => (MOV(F|D)const [float64(val)]) ++(ConstNil) => (MOVVconst [0]) ++(ConstBool [t]) => (MOVVconst [int64(b2i(t))]) ++ ++(Slicemask x) => (SRAVconst (NEGV x) [63]) ++ ++// truncations ++// Because we ignore high parts of registers, truncates are just copies. ++(Trunc16to8 ...) => (Copy ...) ++(Trunc32to8 ...) => (Copy ...) ++(Trunc32to16 ...) => (Copy ...) ++(Trunc64to8 ...) => (Copy ...) ++(Trunc64to16 ...) => (Copy ...) ++(Trunc64to32 ...) => (Copy ...) ++ ++// Zero-/Sign-extensions ++(ZeroExt8to16 ...) => (MOVBUreg ...) ++(ZeroExt8to32 ...) => (MOVBUreg ...) ++(ZeroExt16to32 ...) => (MOVHUreg ...) ++(ZeroExt8to64 ...) => (MOVBUreg ...) ++(ZeroExt16to64 ...) => (MOVHUreg ...) ++(ZeroExt32to64 ...) => (MOVWUreg ...) ++ ++(SignExt8to16 ...) => (MOVBreg ...) ++(SignExt8to32 ...) => (MOVBreg ...) ++(SignExt16to32 ...) => (MOVHreg ...) ++(SignExt8to64 ...) => (MOVBreg ...) ++(SignExt16to64 ...) => (MOVHreg ...) ++(SignExt32to64 ...) => (MOVWreg ...) ++ ++// float <=> int conversion ++(Cvt32to32F ...) => (MOVWF ...) ++(Cvt32to64F ...) => (MOVWD ...) ++(Cvt64to32F ...) => (MOVVF ...) ++(Cvt64to64F ...) => (MOVVD ...) ++(Cvt32Fto32 ...) => (TRUNCFW ...) ++(Cvt64Fto32 ...) => (TRUNCDW ...) ++(Cvt32Fto64 ...) => (TRUNCFV ...) ++(Cvt64Fto64 ...) => (TRUNCDV ...) ++(Cvt32Fto64F ...) => (MOVFD ...) ++(Cvt64Fto32F ...) => (MOVDF ...) ++ ++(CvtBoolToUint8 ...) => (Copy ...) ++ ++(Round(32|64)F ...) => (Copy ...) ++ ++// comparisons ++(Eq8 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y))) ++(Eq16 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y))) ++(Eq32 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y))) ++(Eq64 x y) => (SGTU (MOVVconst [1]) (XOR x y)) ++(EqPtr x y) => (SGTU (MOVVconst [1]) (XOR x y)) ++(Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y)) ++ ++(Neq8 x y) => (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0])) ++(Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0])) ++(Neq32 x y) => (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0])) ++(Neq64 x y) => (SGTU (XOR x y) (MOVVconst [0])) ++(NeqPtr x y) => (SGTU (XOR x y) (MOVVconst [0])) ++(Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y)) ++ ++(Less8 x y) => (SGT (SignExt8to64 y) (SignExt8to64 x)) ++(Less16 x y) => (SGT (SignExt16to64 y) (SignExt16to64 x)) ++(Less32 x y) => (SGT (SignExt32to64 y) (SignExt32to64 x)) ++(Less64 x y) => (SGT y x) ++(Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN ++ ++(Less8U x y) => (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x)) ++(Less16U x y) => (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x)) ++(Less32U x y) => (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x)) ++(Less64U x y) => (SGTU y x) ++ ++(Leq8 x y) => (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y))) ++(Leq16 x y) => (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y))) ++(Leq32 x y) => (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y))) ++(Leq64 x y) => (XOR (MOVVconst [1]) (SGT x y)) ++(Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN ++ ++(Leq8U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y))) ++(Leq16U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y))) ++(Leq32U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y))) ++(Leq64U x y) => (XOR (MOVVconst [1]) (SGTU x y)) ++ ++(OffPtr [off] ptr:(SP)) => (MOVVaddr [int32(off)] ptr) ++(OffPtr [off] ptr) => (ADDVconst [off] ptr) ++ ++(Addr {sym} base) => (MOVVaddr {sym} base) ++(LocalAddr {sym} base _) => (MOVVaddr {sym} base) ++ ++// loads ++(Load ptr mem) && t.IsBoolean() => (MOVBUload ptr mem) ++(Load ptr mem) && (is8BitInt(t) && isSigned(t)) => (MOVBload ptr mem) ++(Load ptr mem) && (is8BitInt(t) && !isSigned(t)) => (MOVBUload ptr mem) ++(Load ptr mem) && (is16BitInt(t) && isSigned(t)) => (MOVHload ptr mem) ++(Load ptr mem) && (is16BitInt(t) && !isSigned(t)) => (MOVHUload ptr mem) ++(Load ptr mem) && (is32BitInt(t) && isSigned(t)) => (MOVWload ptr mem) ++(Load ptr mem) && (is32BitInt(t) && !isSigned(t)) => (MOVWUload ptr mem) ++(Load ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVVload ptr mem) ++(Load ptr mem) && is32BitFloat(t) => (MOVFload ptr mem) ++(Load ptr mem) && is64BitFloat(t) => (MOVDload ptr mem) ++ ++// stores ++(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem) ++(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem) ++(Store {t} ptr val mem) && t.Size() == 4 && !is32BitFloat(val.Type) => (MOVWstore ptr val mem) ++(Store {t} ptr val mem) && t.Size() == 8 && !is64BitFloat(val.Type) => (MOVVstore ptr val mem) ++(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (MOVFstore ptr val mem) ++(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (MOVDstore ptr val mem) ++ ++// zeroing ++(Zero [0] _ mem) => mem ++(Zero [1] ptr mem) => (MOVBstore ptr (MOVVconst [0]) mem) ++(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 => ++ (MOVHstore ptr (MOVVconst [0]) mem) ++(Zero [2] ptr mem) => ++ (MOVBstore [1] ptr (MOVVconst [0]) ++ (MOVBstore [0] ptr (MOVVconst [0]) mem)) ++(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 => ++ (MOVWstore ptr (MOVVconst [0]) mem) ++(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 => ++ (MOVHstore [2] ptr (MOVVconst [0]) ++ (MOVHstore [0] ptr (MOVVconst [0]) mem)) ++(Zero [4] ptr mem) => ++ (MOVBstore [3] ptr (MOVVconst [0]) ++ (MOVBstore [2] ptr (MOVVconst [0]) ++ (MOVBstore [1] ptr (MOVVconst [0]) ++ (MOVBstore [0] ptr (MOVVconst [0]) mem)))) ++(Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 => ++ (MOVVstore ptr (MOVVconst [0]) mem) ++(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 => ++ (MOVWstore [4] ptr (MOVVconst [0]) ++ (MOVWstore [0] ptr (MOVVconst [0]) mem)) ++(Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 => ++ (MOVHstore [6] ptr (MOVVconst [0]) ++ (MOVHstore [4] ptr (MOVVconst [0]) ++ (MOVHstore [2] ptr (MOVVconst [0]) ++ (MOVHstore [0] ptr (MOVVconst [0]) mem)))) ++ ++(Zero [3] ptr mem) => ++ (MOVBstore [2] ptr (MOVVconst [0]) ++ (MOVBstore [1] ptr (MOVVconst [0]) ++ (MOVBstore [0] ptr (MOVVconst [0]) mem))) ++(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 => ++ (MOVHstore [4] ptr (MOVVconst [0]) ++ (MOVHstore [2] ptr (MOVVconst [0]) ++ (MOVHstore [0] ptr (MOVVconst [0]) mem))) ++(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 => ++ (MOVWstore [8] ptr (MOVVconst [0]) ++ (MOVWstore [4] ptr (MOVVconst [0]) ++ (MOVWstore [0] ptr (MOVVconst [0]) mem))) ++(Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 => ++ (MOVVstore [8] ptr (MOVVconst [0]) ++ (MOVVstore [0] ptr (MOVVconst [0]) mem)) ++(Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 => ++ (MOVVstore [16] ptr (MOVVconst [0]) ++ (MOVVstore [8] ptr (MOVVconst [0]) ++ (MOVVstore [0] ptr (MOVVconst [0]) mem))) ++ ++// medium zeroing uses a duff device ++// 8, and 128 are magic constants, see runtime/mkduff.go ++(Zero [s] {t} ptr mem) ++ && s%8 == 0 && s > 24 && s <= 8*128 ++ && t.Alignment()%8 == 0 && !config.noDuffDevice => ++ (DUFFZERO [8 * (128 - s/8)] ptr mem) ++ ++// large or unaligned zeroing uses a loop ++(Zero [s] {t} ptr mem) ++ && (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0 => ++ (LoweredZero [t.Alignment()] ++ ptr ++ (ADDVconst ptr [s-moveSize(t.Alignment(), config)]) ++ mem) ++ ++// moves ++(Move [0] _ _ mem) => mem ++(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem) ++(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 => ++ (MOVHstore dst (MOVHload src mem) mem) ++(Move [2] dst src mem) => ++ (MOVBstore [1] dst (MOVBload [1] src mem) ++ (MOVBstore dst (MOVBload src mem) mem)) ++(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 => ++ (MOVWstore dst (MOVWload src mem) mem) ++(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 => ++ (MOVHstore [2] dst (MOVHload [2] src mem) ++ (MOVHstore dst (MOVHload src mem) mem)) ++(Move [4] dst src mem) => ++ (MOVBstore [3] dst (MOVBload [3] src mem) ++ (MOVBstore [2] dst (MOVBload [2] src mem) ++ (MOVBstore [1] dst (MOVBload [1] src mem) ++ (MOVBstore dst (MOVBload src mem) mem)))) ++(Move [8] {t} dst src mem) && t.Alignment()%8 == 0 => ++ (MOVVstore dst (MOVVload src mem) mem) ++(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 => ++ (MOVWstore [4] dst (MOVWload [4] src mem) ++ (MOVWstore dst (MOVWload src mem) mem)) ++(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 => ++ (MOVHstore [6] dst (MOVHload [6] src mem) ++ (MOVHstore [4] dst (MOVHload [4] src mem) ++ (MOVHstore [2] dst (MOVHload [2] src mem) ++ (MOVHstore dst (MOVHload src mem) mem)))) ++ ++(Move [3] dst src mem) => ++ (MOVBstore [2] dst (MOVBload [2] src mem) ++ (MOVBstore [1] dst (MOVBload [1] src mem) ++ (MOVBstore dst (MOVBload src mem) mem))) ++(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 => ++ (MOVHstore [4] dst (MOVHload [4] src mem) ++ (MOVHstore [2] dst (MOVHload [2] src mem) ++ (MOVHstore dst (MOVHload src mem) mem))) ++(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 => ++ (MOVWstore [8] dst (MOVWload [8] src mem) ++ (MOVWstore [4] dst (MOVWload [4] src mem) ++ (MOVWstore dst (MOVWload src mem) mem))) ++(Move [16] {t} dst src mem) && t.Alignment()%8 == 0 => ++ (MOVVstore [8] dst (MOVVload [8] src mem) ++ (MOVVstore dst (MOVVload src mem) mem)) ++(Move [24] {t} dst src mem) && t.Alignment()%8 == 0 => ++ (MOVVstore [16] dst (MOVVload [16] src mem) ++ (MOVVstore [8] dst (MOVVload [8] src mem) ++ (MOVVstore dst (MOVVload src mem) mem))) ++ ++// medium move uses a duff device ++(Move [s] {t} dst src mem) ++ && s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 ++ && !config.noDuffDevice && logLargeCopy(v, s) => ++ (DUFFCOPY [16 * (128 - s/8)] dst src mem) ++// 16 and 128 are magic constants. 16 is the number of bytes to encode: ++// MOVV (R1), R23 ++// ADDV $8, R1 ++// MOVV R23, (R2) ++// ADDV $8, R2 ++// and 128 is the number of such blocks. See runtime/duff_mips64.s:duffcopy. ++ ++// large or unaligned move uses a loop ++(Move [s] {t} dst src mem) ++ && s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0 => ++ (LoweredMove [t.Alignment()] ++ dst ++ src ++ (ADDVconst src [s-moveSize(t.Alignment(), config)]) ++ mem) ++ ++// calls ++(StaticCall ...) => (CALLstatic ...) ++(ClosureCall ...) => (CALLclosure ...) ++(InterCall ...) => (CALLinter ...) ++ ++// atomic intrinsics ++(AtomicLoad(8|32|64) ...) => (LoweredAtomicLoad(8|32|64) ...) ++(AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...) ++ ++(AtomicStore(8|32|64) ...) => (LoweredAtomicStore(8|32|64) ...) ++(AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...) ++ ++(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...) ++ ++(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...) ++ ++(AtomicCompareAndSwap(32|64) ...) => (LoweredAtomicCas(32|64) ...) ++ ++// checks ++(NilCheck ...) => (LoweredNilCheck ...) ++(IsNonNil ptr) => (SGTU ptr (MOVVconst [0])) ++(IsInBounds idx len) => (SGTU len idx) ++(IsSliceInBounds idx len) => (XOR (MOVVconst [1]) (SGTU idx len)) ++ ++// pseudo-ops ++(GetClosurePtr ...) => (LoweredGetClosurePtr ...) ++(GetCallerSP ...) => (LoweredGetCallerSP ...) ++(GetCallerPC ...) => (LoweredGetCallerPC ...) ++ ++(If cond yes no) => (NE cond yes no) ++ ++// Write barrier. ++(WB ...) => (LoweredWB ...) ++ ++(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem) ++(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem) ++(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem) ++ ++// Optimizations ++ ++// Absorb boolean tests into block ++(NE (FPFlagTrue cmp) yes no) => (FPT cmp yes no) ++(NE (FPFlagFalse cmp) yes no) => (FPF cmp yes no) ++(EQ (FPFlagTrue cmp) yes no) => (FPF cmp yes no) ++(EQ (FPFlagFalse cmp) yes no) => (FPT cmp yes no) ++(NE (XORconst [1] cmp:(SGT _ _)) yes no) => (EQ cmp yes no) ++(NE (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQ cmp yes no) ++(NE (XORconst [1] cmp:(SGTconst _)) yes no) => (EQ cmp yes no) ++(NE (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQ cmp yes no) ++(EQ (XORconst [1] cmp:(SGT _ _)) yes no) => (NE cmp yes no) ++(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NE cmp yes no) ++(EQ (XORconst [1] cmp:(SGTconst _)) yes no) => (NE cmp yes no) ++(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NE cmp yes no) ++(NE (SGTUconst [1] x) yes no) => (EQ x yes no) ++(EQ (SGTUconst [1] x) yes no) => (NE x yes no) ++(NE (SGTU x (MOVVconst [0])) yes no) => (NE x yes no) ++(EQ (SGTU x (MOVVconst [0])) yes no) => (EQ x yes no) ++(NE (SGTconst [0] x) yes no) => (LTZ x yes no) ++(EQ (SGTconst [0] x) yes no) => (GEZ x yes no) ++(NE (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no) ++(EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no) ++ ++// fold offset into address ++(ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) => (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr) ++ ++// fold address into load/store ++(MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBload [off1+int32(off2)] {sym} ptr mem) ++(MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBUload [off1+int32(off2)] {sym} ptr mem) ++(MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHload [off1+int32(off2)] {sym} ptr mem) ++(MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHUload [off1+int32(off2)] {sym} ptr mem) ++(MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWload [off1+int32(off2)] {sym} ptr mem) ++(MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWUload [off1+int32(off2)] {sym} ptr mem) ++(MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVload [off1+int32(off2)] {sym} ptr mem) ++(MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVFload [off1+int32(off2)] {sym} ptr mem) ++(MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVDload [off1+int32(off2)] {sym} ptr mem) ++ ++(MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVBstore [off1+int32(off2)] {sym} ptr val mem) ++(MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVHstore [off1+int32(off2)] {sym} ptr val mem) ++(MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVWstore [off1+int32(off2)] {sym} ptr val mem) ++(MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVVstore [off1+int32(off2)] {sym} ptr val mem) ++(MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVFstore [off1+int32(off2)] {sym} ptr val mem) ++(MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVDstore [off1+int32(off2)] {sym} ptr val mem) ++(MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem) ++(MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem) ++(MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem) ++(MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVstorezero [off1+int32(off2)] {sym} ptr mem) ++ ++(MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++(MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++(MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++(MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++(MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++(MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++(MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++(MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++(MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ ++(MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) ++(MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) ++(MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) ++(MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) ++(MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) ++(MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) ++(MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++(MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++(MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++(MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => ++ (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ ++// store zero ++(LoweredAtomicStore(32|64) ptr (MOVVconst [0]) mem) => (LoweredAtomicStorezero(32|64) ptr mem) ++(LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst32 [int32(c)] ptr mem) ++(LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst64 [c] ptr mem) ++ ++// don't extend after proper load ++(MOVBreg x:(MOVBload _ _)) => (MOVVreg x) ++(MOVBUreg x:(MOVBUload _ _)) => (MOVVreg x) ++(MOVHreg x:(MOVBload _ _)) => (MOVVreg x) ++(MOVHreg x:(MOVBUload _ _)) => (MOVVreg x) ++(MOVHreg x:(MOVHload _ _)) => (MOVVreg x) ++(MOVHUreg x:(MOVBUload _ _)) => (MOVVreg x) ++(MOVHUreg x:(MOVHUload _ _)) => (MOVVreg x) ++(MOVWreg x:(MOVBload _ _)) => (MOVVreg x) ++(MOVWreg x:(MOVBUload _ _)) => (MOVVreg x) ++(MOVWreg x:(MOVHload _ _)) => (MOVVreg x) ++(MOVWreg x:(MOVHUload _ _)) => (MOVVreg x) ++(MOVWreg x:(MOVWload _ _)) => (MOVVreg x) ++(MOVWUreg x:(MOVBUload _ _)) => (MOVVreg x) ++(MOVWUreg x:(MOVHUload _ _)) => (MOVVreg x) ++(MOVWUreg x:(MOVWUload _ _)) => (MOVVreg x) ++ ++// fold double extensions ++(MOVBreg x:(MOVBreg _)) => (MOVVreg x) ++(MOVBUreg x:(MOVBUreg _)) => (MOVVreg x) ++(MOVHreg x:(MOVBreg _)) => (MOVVreg x) ++(MOVHreg x:(MOVBUreg _)) => (MOVVreg x) ++(MOVHreg x:(MOVHreg _)) => (MOVVreg x) ++(MOVHUreg x:(MOVBUreg _)) => (MOVVreg x) ++(MOVHUreg x:(MOVHUreg _)) => (MOVVreg x) ++(MOVWreg x:(MOVBreg _)) => (MOVVreg x) ++(MOVWreg x:(MOVBUreg _)) => (MOVVreg x) ++(MOVWreg x:(MOVHreg _)) => (MOVVreg x) ++(MOVWreg x:(MOVWreg _)) => (MOVVreg x) ++(MOVWUreg x:(MOVBUreg _)) => (MOVVreg x) ++(MOVWUreg x:(MOVHUreg _)) => (MOVVreg x) ++(MOVWUreg x:(MOVWUreg _)) => (MOVVreg x) ++ ++// don't extend before store ++(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem) ++(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) ++(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem) ++(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) ++(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem) ++(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) ++(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem) ++(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem) ++(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem) ++(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem) ++(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem) ++(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem) ++ ++// if a register move has only 1 use, just use the same register without emitting instruction ++// MOVVnop doesn't emit instruction, only for ensuring the type. ++(MOVVreg x) && x.Uses == 1 => (MOVVnop x) ++ ++// fold constant into arithmatic ops ++(ADDV x (MOVVconst [c])) && is32Bit(c) => (ADDVconst [c] x) ++(SUBV x (MOVVconst [c])) && is32Bit(c) => (SUBVconst [c] x) ++(AND x (MOVVconst [c])) && is32Bit(c) => (ANDconst [c] x) ++(OR x (MOVVconst [c])) && is32Bit(c) => (ORconst [c] x) ++(XOR x (MOVVconst [c])) && is32Bit(c) => (XORconst [c] x) ++(NOR x (MOVVconst [c])) && is32Bit(c) => (NORconst [c] x) ++ ++(SLLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0]) ++(SRLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0]) ++(SRAV x (MOVVconst [c])) && uint64(c)>=64 => (SRAVconst x [63]) ++(SLLV x (MOVVconst [c])) => (SLLVconst x [c]) ++(SRLV x (MOVVconst [c])) => (SRLVconst x [c]) ++(SRAV x (MOVVconst [c])) => (SRAVconst x [c]) ++ ++(SGT (MOVVconst [c]) x) && is32Bit(c) => (SGTconst [c] x) ++(SGTU (MOVVconst [c]) x) && is32Bit(c) => (SGTUconst [c] x) ++ ++// mul by constant ++(Select1 (MULVU x (MOVVconst [-1]))) => (NEGV x) ++(Select1 (MULVU _ (MOVVconst [0]))) => (MOVVconst [0]) ++(Select1 (MULVU x (MOVVconst [1]))) => x ++(Select1 (MULVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (SLLVconst [log64(c)] x) ++ ++// div by constant ++(Select1 (DIVVU x (MOVVconst [1]))) => x ++(Select1 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (SRLVconst [log64(c)] x) ++(Select0 (DIVVU _ (MOVVconst [1]))) => (MOVVconst [0]) // mod ++(Select0 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (ANDconst [c-1] x) // mod ++ ++// generic simplifications ++(ADDV x (NEGV y)) => (SUBV x y) ++(SUBV x x) => (MOVVconst [0]) ++(SUBV (MOVVconst [0]) x) => (NEGV x) ++(AND x x) => x ++(OR x x) => x ++(XOR x x) => (MOVVconst [0]) ++ ++// remove redundant *const ops ++(ADDVconst [0] x) => x ++(SUBVconst [0] x) => x ++(ANDconst [0] _) => (MOVVconst [0]) ++(ANDconst [-1] x) => x ++(ORconst [0] x) => x ++(ORconst [-1] _) => (MOVVconst [-1]) ++(XORconst [0] x) => x ++(XORconst [-1] x) => (NORconst [0] x) ++ ++// generic constant folding ++(ADDVconst [c] (MOVVconst [d])) => (MOVVconst [c+d]) ++(ADDVconst [c] (ADDVconst [d] x)) && is32Bit(c+d) => (ADDVconst [c+d] x) ++(ADDVconst [c] (SUBVconst [d] x)) && is32Bit(c-d) => (ADDVconst [c-d] x) ++(SUBVconst [c] (MOVVconst [d])) => (MOVVconst [d-c]) ++(SUBVconst [c] (SUBVconst [d] x)) && is32Bit(-c-d) => (ADDVconst [-c-d] x) ++(SUBVconst [c] (ADDVconst [d] x)) && is32Bit(-c+d) => (ADDVconst [-c+d] x) ++(SLLVconst [c] (MOVVconst [d])) => (MOVVconst [d< (MOVVconst [int64(uint64(d)>>uint64(c))]) ++(SRAVconst [c] (MOVVconst [d])) => (MOVVconst [d>>uint64(c)]) ++(Select1 (MULVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c*d]) ++(Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c/d]) ++(Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [int64(uint64(c)/uint64(d))]) ++(Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c%d]) // mod ++(Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [int64(uint64(c)%uint64(d))]) // mod ++(ANDconst [c] (MOVVconst [d])) => (MOVVconst [c&d]) ++(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x) ++(ORconst [c] (MOVVconst [d])) => (MOVVconst [c|d]) ++(ORconst [c] (ORconst [d] x)) && is32Bit(c|d) => (ORconst [c|d] x) ++(XORconst [c] (MOVVconst [d])) => (MOVVconst [c^d]) ++(XORconst [c] (XORconst [d] x)) && is32Bit(c^d) => (XORconst [c^d] x) ++(NORconst [c] (MOVVconst [d])) => (MOVVconst [^(c|d)]) ++(NEGV (MOVVconst [c])) => (MOVVconst [-c]) ++(MOVBreg (MOVVconst [c])) => (MOVVconst [int64(int8(c))]) ++(MOVBUreg (MOVVconst [c])) => (MOVVconst [int64(uint8(c))]) ++(MOVHreg (MOVVconst [c])) => (MOVVconst [int64(int16(c))]) ++(MOVHUreg (MOVVconst [c])) => (MOVVconst [int64(uint16(c))]) ++(MOVWreg (MOVVconst [c])) => (MOVVconst [int64(int32(c))]) ++(MOVWUreg (MOVVconst [c])) => (MOVVconst [int64(uint32(c))]) ++(MOVVreg (MOVVconst [c])) => (MOVVconst [c]) ++//(LoweredAtomicStore32 ptr (MOVVconst [0]) mem) => (LoweredAtomicStorezero32 ptr mem) ++//(LoweredAtomicStore64 ptr (MOVVconst [0]) mem) => (LoweredAtomicStorezero64 ptr mem) ++//(LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst32 [c] ptr mem) ++//(LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst64 [c] ptr mem) ++ ++// constant comparisons ++(SGTconst [c] (MOVVconst [d])) && c>d => (MOVVconst [1]) ++(SGTconst [c] (MOVVconst [d])) && c<=d => (MOVVconst [0]) ++(SGTUconst [c] (MOVVconst [d])) && uint64(c)>uint64(d) => (MOVVconst [1]) ++(SGTUconst [c] (MOVVconst [d])) && uint64(c)<=uint64(d) => (MOVVconst [0]) ++ ++// other known comparisons ++(SGTconst [c] (MOVBreg _)) && 0x7f < c => (MOVVconst [1]) ++(SGTconst [c] (MOVBreg _)) && c <= -0x80 => (MOVVconst [0]) ++(SGTconst [c] (MOVBUreg _)) && 0xff < c => (MOVVconst [1]) ++(SGTconst [c] (MOVBUreg _)) && c < 0 => (MOVVconst [0]) ++(SGTUconst [c] (MOVBUreg _)) && 0xff < uint64(c) => (MOVVconst [1]) ++(SGTconst [c] (MOVHreg _)) && 0x7fff < c => (MOVVconst [1]) ++(SGTconst [c] (MOVHreg _)) && c <= -0x8000 => (MOVVconst [0]) ++(SGTconst [c] (MOVHUreg _)) && 0xffff < c => (MOVVconst [1]) ++(SGTconst [c] (MOVHUreg _)) && c < 0 => (MOVVconst [0]) ++(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint64(c) => (MOVVconst [1]) ++(SGTconst [c] (MOVWUreg _)) && c < 0 => (MOVVconst [0]) ++(SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c => (MOVVconst [1]) ++(SGTUconst [c] (ANDconst [m] _)) && uint64(m) < uint64(c) => (MOVVconst [1]) ++(SGTconst [c] (SRLVconst _ [d])) && 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1]) ++(SGTUconst [c] (SRLVconst _ [d])) && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1]) ++ ++// absorb constants into branches ++(EQ (MOVVconst [0]) yes no) => (First yes no) ++(EQ (MOVVconst [c]) yes no) && c != 0 => (First no yes) ++(NE (MOVVconst [0]) yes no) => (First no yes) ++(NE (MOVVconst [c]) yes no) && c != 0 => (First yes no) ++(LTZ (MOVVconst [c]) yes no) && c < 0 => (First yes no) ++(LTZ (MOVVconst [c]) yes no) && c >= 0 => (First no yes) ++(LEZ (MOVVconst [c]) yes no) && c <= 0 => (First yes no) ++(LEZ (MOVVconst [c]) yes no) && c > 0 => (First no yes) ++(GTZ (MOVVconst [c]) yes no) && c > 0 => (First yes no) ++(GTZ (MOVVconst [c]) yes no) && c <= 0 => (First no yes) ++(GEZ (MOVVconst [c]) yes no) && c >= 0 => (First yes no) ++(GEZ (MOVVconst [c]) yes no) && c < 0 => (First no yes) +diff --git a/src/cmd/compile/internal/ssa/gen/LOONG64Ops.go b/src/cmd/compile/internal/ssa/gen/LOONG64Ops.go +new file mode 100644 +index 0000000..1f0fec7 +--- /dev/null ++++ b/src/cmd/compile/internal/ssa/gen/LOONG64Ops.go +@@ -0,0 +1,524 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build ignore ++// +build ignore ++ ++package main ++ ++import "strings" ++ ++// Notes: ++// - Integer types live in the low portion of registers. Upper portions are junk. ++// - Boolean types use the low-order byte of a register. 0=false, 1=true. ++// Upper bytes are junk. ++// - *const instructions may use a constant larger than the instruction can encode. ++// In this case the assembler expands to multiple instructions and uses tmp ++// register (R23). ++ ++// Suffixes encode the bit width of various instructions. ++// V (vlong) = 64 bit ++// WU (word) = 32 bit unsigned ++// W (word) = 32 bit ++// H (half word) = 16 bit ++// HU = 16 bit unsigned ++// B (byte) = 8 bit ++// BU = 8 bit unsigned ++// F (float) = 32 bit float ++// D (double) = 64 bit float ++ ++// Note: registers not used in regalloc are not included in this list, ++// so that regmask stays within int64 ++// Be careful when hand coding regmasks. ++var regNamesLOONG64 = []string{ ++ "R0", // constant 0 ++ "R1", ++ "SP", // aka R3 ++ "R4", ++ "R5", ++ "R6", ++ "R7", ++ "R8", ++ "R9", ++ "R10", ++ "R11", ++ "R12", ++ "R13", ++ "R14", ++ "R15", ++ "R16", ++ "R17", ++ "R18", ++ "R19", ++ "R20", ++ "R21", ++ "g", // aka R22 ++ "R23", ++ "R24", ++ "R25", ++ "R26", ++ "R27", ++ "R28", ++ "R29", ++ // R30 is REGTMP not used in regalloc ++ "R31", ++ ++ "F0", ++ "F1", ++ "F2", ++ "F3", ++ "F4", ++ "F5", ++ "F6", ++ "F7", ++ "F8", ++ "F9", ++ "F10", ++ "F11", ++ "F12", ++ "F13", ++ "F14", ++ "F15", ++ "F16", ++ "F17", ++ "F18", ++ "F19", ++ "F20", ++ "F21", ++ "F22", ++ "F23", ++ "F24", ++ "F25", ++ "F26", ++ "F27", ++ "F28", ++ "F29", ++ "F30", ++ "F31", ++ ++ // If you add registers, update asyncPreempt in runtime. ++ ++ // pseudo-registers ++ "SB", ++} ++ ++func init() { ++ // Make map from reg names to reg integers. ++ if len(regNamesLOONG64) > 64 { ++ panic("too many registers") ++ } ++ num := map[string]int{} ++ for i, name := range regNamesLOONG64 { ++ num[name] = i ++ } ++ buildReg := func(s string) regMask { ++ m := regMask(0) ++ for _, r := range strings.Split(s, " ") { ++ if n, ok := num[r]; ok { ++ m |= regMask(1) << uint(n) ++ continue ++ } ++ panic("register " + r + " not found") ++ } ++ return m ++ } ++ ++ // Common individual register masks ++ var ( ++ gp = buildReg("R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31") // R1 is LR, R2 is thread pointer, R3 is stack pointer, R21-unused, R22 is g, R30 is REGTMP ++ gps = buildReg("R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31") | buildReg("g") ++ gpg = gp | buildReg("g") ++ gpsp = gp | buildReg("SP") ++ gpspg = gpg | buildReg("SP") ++ gpspsbg = gpspg | buildReg("SB") ++ fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31") ++ callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g ++ r1 = buildReg("R19") ++ r2 = buildReg("R18") ++ r3 = buildReg("R17") ++ r4 = buildReg("R4") ++ ) ++ // Common regInfo ++ var ( ++ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}} ++ gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}} ++ gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}} ++ gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}} ++ gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}} ++ gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}} ++ gpstore0 = regInfo{inputs: []regMask{gpspsbg}} ++ gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}} ++ gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}} ++ fp01 = regInfo{inputs: nil, outputs: []regMask{fp}} ++ fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}} ++ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}} ++ fp2flags = regInfo{inputs: []regMask{fp, fp}} ++ fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}} ++ fpstore = regInfo{inputs: []regMask{gpspsbg, fp}} ++ readflags = regInfo{inputs: nil, outputs: []regMask{gp}} ++ ) ++ ops := []opData{ ++ // binary ops ++ {name: "ADDV", argLength: 2, reg: gp21, asm: "ADDVU", commutative: true}, // arg0 + arg1 ++ {name: "ADDVconst", argLength: 1, reg: gp11sp, asm: "ADDVU", aux: "Int64"}, // arg0 + auxInt. auxInt is 32-bit, also in other *const ops. ++ {name: "SUBV", argLength: 2, reg: gp21, asm: "SUBVU"}, // arg0 - arg1 ++ {name: "SUBVconst", argLength: 1, reg: gp11, asm: "SUBVU", aux: "Int64"}, // arg0 - auxInt ++ ++ { ++ name: "MULV", ++ //aux: "Int64", ++ argLength: 2, ++ reg: regInfo{ ++ inputs: []regMask{gps, gps}, ++ outputs: []regMask{buildReg("R17"), buildReg("R18")}, ++ clobbers: buildReg("R17 R18"), ++ }, ++ clobberFlags: true, ++ typ: "(Int64,Int64)", ++ }, ++ ++ { ++ name: "MULVU", ++ //aux: "Int64", ++ argLength: 2, ++ reg: regInfo{ ++ inputs: []regMask{gps, gps}, ++ outputs: []regMask{buildReg("R17"), buildReg("R18")}, ++ clobbers: buildReg("R17 R18"), ++ }, ++ clobberFlags: true, ++ typ: "(UInt64,UInt64)", ++ }, ++ ++ { ++ name: "DIVV", ++ //aux: "Int64", ++ argLength: 2, ++ reg: regInfo{ ++ inputs: []regMask{gps, gps}, ++ outputs: []regMask{buildReg("R17"), buildReg("R18")}, ++ clobbers: buildReg("R17 R18"), ++ }, ++ clobberFlags: true, ++ typ: "(Int64,Int64)", ++ }, ++ ++ { ++ name: "DIVVU", ++ //aux: "Int64", ++ argLength: 2, ++ reg: regInfo{ ++ inputs: []regMask{gps, gps}, ++ outputs: []regMask{buildReg("R17"), buildReg("R18")}, ++ clobbers: buildReg("R17 R18"), ++ }, ++ clobberFlags: true, ++ typ: "(UInt64,UInt64)", ++ }, ++ ++ {name: "ADDF", argLength: 2, reg: fp21, asm: "ADDF", commutative: true}, // arg0 + arg1 ++ {name: "ADDD", argLength: 2, reg: fp21, asm: "ADDD", commutative: true}, // arg0 + arg1 ++ {name: "SUBF", argLength: 2, reg: fp21, asm: "SUBF"}, // arg0 - arg1 ++ {name: "SUBD", argLength: 2, reg: fp21, asm: "SUBD"}, // arg0 - arg1 ++ {name: "MULF", argLength: 2, reg: fp21, asm: "MULF", commutative: true}, // arg0 * arg1 ++ {name: "MULD", argLength: 2, reg: fp21, asm: "MULD", commutative: true}, // arg0 * arg1 ++ {name: "DIVF", argLength: 2, reg: fp21, asm: "DIVF"}, // arg0 / arg1 ++ {name: "DIVD", argLength: 2, reg: fp21, asm: "DIVD"}, // arg0 / arg1 ++ ++ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1 ++ {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int64"}, // arg0 & auxInt ++ {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0 | arg1 ++ {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0 | auxInt ++ {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true, typ: "UInt64"}, // arg0 ^ arg1 ++ {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64", typ: "UInt64"}, // arg0 ^ auxInt ++ {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0 | arg1) ++ {name: "NORconst", argLength: 1, reg: gp11, asm: "NOR", aux: "Int64"}, // ^(arg0 | auxInt) ++ ++ {name: "NEGV", argLength: 1, reg: gp11}, // -arg0 ++ {name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32 ++ {name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64 ++ {name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64 ++ {name: "SQRTF", argLength: 1, reg: fp11, asm: "SQRTF"}, // sqrt(arg0), float32 ++ ++ // shifts ++ {name: "SLLV", argLength: 2, reg: gp21, asm: "SLLV"}, // arg0 << arg1, shift amount is mod 64 ++ {name: "SLLVconst", argLength: 1, reg: gp11, asm: "SLLV", aux: "Int64"}, // arg0 << auxInt ++ {name: "SRLV", argLength: 2, reg: gp21, asm: "SRLV"}, // arg0 >> arg1, unsigned, shift amount is mod 64 ++ {name: "SRLVconst", argLength: 1, reg: gp11, asm: "SRLV", aux: "Int64"}, // arg0 >> auxInt, unsigned ++ {name: "SRAV", argLength: 2, reg: gp21, asm: "SRAV"}, // arg0 >> arg1, signed, shift amount is mod 64 ++ {name: "SRAVconst", argLength: 1, reg: gp11, asm: "SRAV", aux: "Int64"}, // arg0 >> auxInt, signed ++ ++ // comparisons ++ {name: "SGT", argLength: 2, reg: gp21, asm: "SGT", typ: "Bool"}, // 1 if arg0 > arg1 (signed), 0 otherwise ++ {name: "SGTconst", argLength: 1, reg: gp11, asm: "SGT", aux: "Int64", typ: "Bool"}, // 1 if auxInt > arg0 (signed), 0 otherwise ++ {name: "SGTU", argLength: 2, reg: gp21, asm: "SGTU", typ: "Bool"}, // 1 if arg0 > arg1 (unsigned), 0 otherwise ++ {name: "SGTUconst", argLength: 1, reg: gp11, asm: "SGTU", aux: "Int64", typ: "Bool"}, // 1 if auxInt > arg0 (unsigned), 0 otherwise ++ ++ {name: "CMPEQF", argLength: 2, reg: fp2flags, asm: "CMPEQF", typ: "Flags"}, // flags=true if arg0 = arg1, float32 ++ {name: "CMPEQD", argLength: 2, reg: fp2flags, asm: "CMPEQD", typ: "Flags"}, // flags=true if arg0 = arg1, float64 ++ {name: "CMPGEF", argLength: 2, reg: fp2flags, asm: "CMPGEF", typ: "Flags"}, // flags=true if arg0 >= arg1, float32 ++ {name: "CMPGED", argLength: 2, reg: fp2flags, asm: "CMPGED", typ: "Flags"}, // flags=true if arg0 >= arg1, float64 ++ {name: "CMPGTF", argLength: 2, reg: fp2flags, asm: "CMPGTF", typ: "Flags"}, // flags=true if arg0 > arg1, float32 ++ {name: "CMPGTD", argLength: 2, reg: fp2flags, asm: "CMPGTD", typ: "Flags"}, // flags=true if arg0 > arg1, float64 ++ ++ // moves ++ {name: "MOVVconst", argLength: 0, reg: gp01, aux: "Int64", asm: "MOVV", typ: "UInt64", rematerializeable: true}, // auxint ++ {name: "MOVFconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVF", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float ++ {name: "MOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float ++ ++ {name: "MOVVaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVV", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB ++ ++ {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. ++ {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. ++ {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. ++ {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. ++ {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. ++ {name: "MOVWUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVWU", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. ++ {name: "MOVVload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVV", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. ++ {name: "MOVFload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVF", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. ++ {name: "MOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVD", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. ++ ++ {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem. ++ {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. ++ {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. ++ {name: "MOVVstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. ++ {name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. ++ {name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. ++ ++ {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem. ++ {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem. ++ {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of zero to arg0 + auxInt + aux. arg1=mem. ++ {name: "MOVVstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of zero to arg0 + auxInt + aux. ar12=mem. ++ ++ // conversions ++ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte ++ {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte ++ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half ++ {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half ++ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0, sign-extended from word ++ {name: "MOVWUreg", argLength: 1, reg: gp11, asm: "MOVWU"}, // move from arg0, unsign-extended from word ++ {name: "MOVVreg", argLength: 1, reg: gp11, asm: "MOVV"}, // move from arg0 ++ ++ {name: "MOVVnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register ++ ++ {name: "MOVWF", argLength: 1, reg: fp11, asm: "MOVWF"}, // int32 -> float32 ++ {name: "MOVWD", argLength: 1, reg: fp11, asm: "MOVWD"}, // int32 -> float64 ++ {name: "MOVVF", argLength: 1, reg: fp11, asm: "MOVVF"}, // int64 -> float32 ++ {name: "MOVVD", argLength: 1, reg: fp11, asm: "MOVVD"}, // int64 -> float64 ++ {name: "TRUNCFW", argLength: 1, reg: fp11, asm: "TRUNCFW"}, // float32 -> int32 ++ {name: "TRUNCDW", argLength: 1, reg: fp11, asm: "TRUNCDW"}, // float64 -> int32 ++ {name: "TRUNCFV", argLength: 1, reg: fp11, asm: "TRUNCFV"}, // float32 -> int64 ++ {name: "TRUNCDV", argLength: 1, reg: fp11, asm: "TRUNCDV"}, // float64 -> int64 ++ {name: "MOVFD", argLength: 1, reg: fp11, asm: "MOVFD"}, // float32 -> float64 ++ {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32 ++ ++ // function calls ++ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem ++ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R29"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem ++ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem ++ ++ // duffzero ++ // arg0 = address of memory to zero ++ // arg1 = mem ++ // auxint = offset into duffzero code to start executing ++ // returns mem ++ // R1 aka loong64.REGRT1 changed as side effect ++ { ++ name: "DUFFZERO", ++ aux: "Int64", ++ argLength: 2, ++ reg: regInfo{ ++ inputs: []regMask{gp}, ++ clobbers: buildReg("R19 R1"), ++ }, ++ faultOnNilArg0: true, ++ }, ++ ++ // duffcopy ++ // arg0 = address of dst memory (in R20, changed as side effect) REGRT2 ++ // arg1 = address of src memory (in R19, changed as side effect) REGRT1 ++ // arg2 = mem ++ // auxint = offset into duffcopy code to start executing ++ // returns mem ++ { ++ name: "DUFFCOPY", ++ aux: "Int64", ++ argLength: 3, ++ reg: regInfo{ ++ inputs: []regMask{buildReg("R20"), buildReg("R19")}, ++ clobbers: buildReg("R19 R20 R1"), ++ }, ++ faultOnNilArg0: true, ++ faultOnNilArg1: true, ++ }, ++ ++ // large or unaligned zeroing ++ // arg0 = address of memory to zero (in R19, changed as side effect) ++ // arg1 = address of the last element to zero ++ // arg2 = mem ++ // auxint = alignment ++ // returns mem ++ // SUBV $8, R19 ++ // MOVV R0, 8(R19) ++ // ADDV $8, R19 ++ // BNE Rarg1, R19, -2(PC) ++ { ++ name: "LoweredZero", ++ aux: "Int64", ++ argLength: 3, ++ reg: regInfo{ ++ inputs: []regMask{buildReg("R19"), gp}, ++ clobbers: buildReg("R19"), ++ }, ++ clobberFlags: true, ++ faultOnNilArg0: true, ++ }, ++ ++ // large or unaligned move ++ // arg0 = address of dst memory (in R4, changed as side effect) ++ // arg1 = address of src memory (in R19, changed as side effect) ++ // arg2 = address of the last element of src ++ // arg3 = mem ++ // auxint = alignment ++ // returns mem ++ // SUBV $8, R19 ++ // MOVV 8(R19), Rtmp ++ // MOVV Rtmp, (R4) ++ // ADDV $8, R19 ++ // ADDV $8, R4 ++ // BNE Rarg2, R19, -4(PC) ++ { ++ name: "LoweredMove", ++ aux: "Int64", ++ argLength: 4, ++ reg: regInfo{ ++ inputs: []regMask{buildReg("R4"), buildReg("R19"), gp}, ++ clobbers: buildReg("R19 R4"), ++ }, ++ clobberFlags: true, ++ faultOnNilArg0: true, ++ faultOnNilArg1: true, ++ }, ++ ++ // atomic loads. ++ // load from arg0. arg1=mem. ++ // returns so they can be properly ordered with other loads. ++ {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, faultOnNilArg0: true}, ++ {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, faultOnNilArg0: true}, ++ {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, faultOnNilArg0: true}, ++ ++ // atomic stores. ++ // store arg1 to arg0. arg2=mem. returns memory. ++ {name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, ++ {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, ++ {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, ++ // store zero to arg0. arg1=mem. returns memory. ++ {name: "LoweredAtomicStorezero32", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true}, ++ {name: "LoweredAtomicStorezero64", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true}, ++ ++ // atomic exchange. ++ // store arg1 to arg0. arg2=mem. returns . ++ // DBAR ++ // LL (Rarg0), Rout ++ // MOVV Rarg1, Rtmp ++ // SC Rtmp, (Rarg0) ++ // BEQ Rtmp, -3(PC) ++ // DBAR ++ {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, ++ {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, ++ ++ // atomic add. ++ // *arg0 += arg1. arg2=mem. returns . ++ // DBAR ++ // LL (Rarg0), Rout ++ // ADDV Rarg1, Rout, Rtmp ++ // SC Rtmp, (Rarg0) ++ // BEQ Rtmp, -3(PC) ++ // DBAR ++ // ADDV Rarg1, Rout ++ {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, ++ {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, ++ // *arg0 += auxint. arg1=mem. returns . auxint is 32-bit. ++ {name: "LoweredAtomicAddconst32", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int32", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, ++ {name: "LoweredAtomicAddconst64", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int64", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, ++ ++ // atomic compare and swap. ++ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. ++ // if *arg0 == arg1 { ++ // *arg0 = arg2 ++ // return (true, memory) ++ // } else { ++ // return (false, memory) ++ // } ++ // DBAR ++ // MOVV $0, Rout ++ // LL (Rarg0), Rtmp ++ // BNE Rtmp, Rarg1, 4(PC) ++ // MOVV Rarg2, Rout ++ // SC Rout, (Rarg0) ++ // BEQ Rout, -4(PC) ++ // DBAR ++ {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, ++ {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, ++ ++ // pseudo-ops ++ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem. ++ ++ {name: "FPFlagTrue", argLength: 1, reg: readflags}, // bool, true if FP flag is true ++ {name: "FPFlagFalse", argLength: 1, reg: readflags}, // bool, true if FP flag is false ++ ++ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block, ++ // and sorts it to the very beginning of the block to prevent other ++ // use of R22 (loong64.REGCTXT, the closure pointer) ++ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R29")}}, zeroWidth: true}, ++ ++ // LoweredGetCallerSP returns the SP of the caller of the current function. ++ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true}, ++ ++ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return. ++ // I.e., if f calls g "calls" getcallerpc, ++ // the result should be the PC within f that g will return to. ++ // See runtime/stubs.go for a more detailed discussion. ++ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true}, ++ ++ // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier ++ // It saves all GP registers if necessary, ++ // but clobbers R1 (LR) because it's a call ++ // and R30 (REGTMP). ++ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("R27"), buildReg("R28")}, clobbers: (callerSave &^ gpg) | buildReg("R1")}, clobberFlags: true, aux: "Sym", symEffect: "None"}, ++ ++ // There are three of these functions so that they can have three different register inputs. ++ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the ++ // default registers to match so we don't need to copy registers around unnecessarily. ++ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). ++ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). ++ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). ++ } ++ ++ blocks := []blockData{ ++ {name: "EQ", controls: 1}, ++ {name: "NE", controls: 1}, ++ {name: "LTZ", controls: 1}, // < 0 ++ {name: "LEZ", controls: 1}, // <= 0 ++ {name: "GTZ", controls: 1}, // > 0 ++ {name: "GEZ", controls: 1}, // >= 0 ++ {name: "FPT", controls: 1}, // FP flag is true ++ {name: "FPF", controls: 1}, // FP flag is false ++ } ++ ++ archs = append(archs, arch{ ++ name: "LOONG64", ++ pkg: "cmd/internal/obj/loong64", ++ genfile: "../../loong64/ssa.go", ++ ops: ops, ++ blocks: blocks, ++ regnames: regNamesLOONG64, ++ ParamIntRegNames: "R4 R5 R6 R7 R8 R9 R10 R11", ++ ParamFloatRegNames: "F0 F1 F2 F3 F4 F5 F6 F7", ++ gpregmask: gp, ++ fpregmask: fp, ++ framepointerreg: -1, // not used ++ linkreg: int8(num["R1"]), ++ }) ++} +diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go +index 1c37fbe..5aa0f52 100644 +--- a/src/cmd/compile/internal/ssa/opGen.go ++++ b/src/cmd/compile/internal/ssa/opGen.go +@@ -6,6 +6,7 @@ import ( + "cmd/internal/obj" + "cmd/internal/obj/arm" + "cmd/internal/obj/arm64" ++ "cmd/internal/obj/loong64" + "cmd/internal/obj/mips" + "cmd/internal/obj/ppc64" + "cmd/internal/obj/riscv" +@@ -91,6 +92,15 @@ const ( + BlockARM64GTnoov + BlockARM64GEnoov + ++ BlockLOONG64EQ ++ BlockLOONG64NE ++ BlockLOONG64LTZ ++ BlockLOONG64LEZ ++ BlockLOONG64GTZ ++ BlockLOONG64GEZ ++ BlockLOONG64FPT ++ BlockLOONG64FPF ++ + BlockMIPSEQ + BlockMIPSNE + BlockMIPSLTZ +@@ -229,6 +239,15 @@ var blockString = [...]string{ + BlockARM64GTnoov: "GTnoov", + BlockARM64GEnoov: "GEnoov", + ++ BlockLOONG64EQ: "EQ", ++ BlockLOONG64NE: "NE", ++ BlockLOONG64LTZ: "LTZ", ++ BlockLOONG64LEZ: "LEZ", ++ BlockLOONG64GTZ: "GTZ", ++ BlockLOONG64GEZ: "GEZ", ++ BlockLOONG64FPT: "FPT", ++ BlockLOONG64FPF: "FPF", ++ + BlockMIPSEQ: "EQ", + BlockMIPSNE: "NE", + BlockMIPSLTZ: "LTZ", +@@ -1611,6 +1630,126 @@ const ( + OpARM64LoweredPanicBoundsB + OpARM64LoweredPanicBoundsC + ++ OpLOONG64ADDV ++ OpLOONG64ADDVconst ++ OpLOONG64SUBV ++ OpLOONG64SUBVconst ++ OpLOONG64MULV ++ OpLOONG64MULVU ++ OpLOONG64DIVV ++ OpLOONG64DIVVU ++ OpLOONG64ADDF ++ OpLOONG64ADDD ++ OpLOONG64SUBF ++ OpLOONG64SUBD ++ OpLOONG64MULF ++ OpLOONG64MULD ++ OpLOONG64DIVF ++ OpLOONG64DIVD ++ OpLOONG64AND ++ OpLOONG64ANDconst ++ OpLOONG64OR ++ OpLOONG64ORconst ++ OpLOONG64XOR ++ OpLOONG64XORconst ++ OpLOONG64NOR ++ OpLOONG64NORconst ++ OpLOONG64NEGV ++ OpLOONG64NEGF ++ OpLOONG64NEGD ++ OpLOONG64SQRTD ++ OpLOONG64SQRTF ++ OpLOONG64SLLV ++ OpLOONG64SLLVconst ++ OpLOONG64SRLV ++ OpLOONG64SRLVconst ++ OpLOONG64SRAV ++ OpLOONG64SRAVconst ++ OpLOONG64SGT ++ OpLOONG64SGTconst ++ OpLOONG64SGTU ++ OpLOONG64SGTUconst ++ OpLOONG64CMPEQF ++ OpLOONG64CMPEQD ++ OpLOONG64CMPGEF ++ OpLOONG64CMPGED ++ OpLOONG64CMPGTF ++ OpLOONG64CMPGTD ++ OpLOONG64MOVVconst ++ OpLOONG64MOVFconst ++ OpLOONG64MOVDconst ++ OpLOONG64MOVVaddr ++ OpLOONG64MOVBload ++ OpLOONG64MOVBUload ++ OpLOONG64MOVHload ++ OpLOONG64MOVHUload ++ OpLOONG64MOVWload ++ OpLOONG64MOVWUload ++ OpLOONG64MOVVload ++ OpLOONG64MOVFload ++ OpLOONG64MOVDload ++ OpLOONG64MOVBstore ++ OpLOONG64MOVHstore ++ OpLOONG64MOVWstore ++ OpLOONG64MOVVstore ++ OpLOONG64MOVFstore ++ OpLOONG64MOVDstore ++ OpLOONG64MOVBstorezero ++ OpLOONG64MOVHstorezero ++ OpLOONG64MOVWstorezero ++ OpLOONG64MOVVstorezero ++ OpLOONG64MOVBreg ++ OpLOONG64MOVBUreg ++ OpLOONG64MOVHreg ++ OpLOONG64MOVHUreg ++ OpLOONG64MOVWreg ++ OpLOONG64MOVWUreg ++ OpLOONG64MOVVreg ++ OpLOONG64MOVVnop ++ OpLOONG64MOVWF ++ OpLOONG64MOVWD ++ OpLOONG64MOVVF ++ OpLOONG64MOVVD ++ OpLOONG64TRUNCFW ++ OpLOONG64TRUNCDW ++ OpLOONG64TRUNCFV ++ OpLOONG64TRUNCDV ++ OpLOONG64MOVFD ++ OpLOONG64MOVDF ++ OpLOONG64CALLstatic ++ OpLOONG64CALLclosure ++ OpLOONG64CALLinter ++ OpLOONG64DUFFZERO ++ OpLOONG64DUFFCOPY ++ OpLOONG64LoweredZero ++ OpLOONG64LoweredMove ++ OpLOONG64LoweredAtomicLoad8 ++ OpLOONG64LoweredAtomicLoad32 ++ OpLOONG64LoweredAtomicLoad64 ++ OpLOONG64LoweredAtomicStore8 ++ OpLOONG64LoweredAtomicStore32 ++ OpLOONG64LoweredAtomicStore64 ++ OpLOONG64LoweredAtomicStorezero32 ++ OpLOONG64LoweredAtomicStorezero64 ++ OpLOONG64LoweredAtomicExchange32 ++ OpLOONG64LoweredAtomicExchange64 ++ OpLOONG64LoweredAtomicAdd32 ++ OpLOONG64LoweredAtomicAdd64 ++ OpLOONG64LoweredAtomicAddconst32 ++ OpLOONG64LoweredAtomicAddconst64 ++ OpLOONG64LoweredAtomicCas32 ++ OpLOONG64LoweredAtomicCas64 ++ OpLOONG64LoweredNilCheck ++ OpLOONG64FPFlagTrue ++ OpLOONG64FPFlagFalse ++ OpLOONG64LoweredGetClosurePtr ++ OpLOONG64LoweredGetCallerSP ++ OpLOONG64LoweredGetCallerPC ++ OpLOONG64LoweredWB ++ OpLOONG64LoweredPanicBoundsA ++ OpLOONG64LoweredPanicBoundsB ++ OpLOONG64LoweredPanicBoundsC ++ + OpMIPSADD + OpMIPSADDconst + OpMIPSSUB +@@ -21447,137 +21586,123 @@ var opcodeTable = [...]opInfo{ + }, + + { +- name: "ADD", ++ name: "ADDV", + argLen: 2, + commutative: true, +- asm: mips.AADDU, ++ asm: loong64.AADDVU, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "ADDconst", +- auxType: auxInt32, ++ name: "ADDVconst", ++ auxType: auxInt64, + argLen: 1, +- asm: mips.AADDU, ++ asm: loong64.AADDVU, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 536870910}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 ++ {0, 1072693244}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "SUB", ++ name: "SUBV", + argLen: 2, +- asm: mips.ASUBU, ++ asm: loong64.ASUBVU, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "SUBconst", +- auxType: auxInt32, ++ name: "SUBVconst", ++ auxType: auxInt64, + argLen: 1, +- asm: mips.ASUBU, +- reg: regInfo{ +- inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- }, +- outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 +- }, +- }, +- }, +- { +- name: "MUL", +- argLen: 2, +- commutative: true, +- asm: mips.AMUL, ++ asm: loong64.ASUBVU, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, +- clobbers: 105553116266496, // HI LO + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "MULT", +- argLen: 2, +- commutative: true, +- asm: mips.AMUL, ++ name: "MULV", ++ argLen: 2, ++ clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072496632}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072496632}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, ++ clobbers: 196608, // R17 R18 + outputs: []outputInfo{ +- {0, 35184372088832}, // HI +- {1, 70368744177664}, // LO ++ {0, 65536}, // R17 ++ {1, 131072}, // R18 + }, + }, + }, + { +- name: "MULTU", +- argLen: 2, +- commutative: true, +- asm: mips.AMULU, ++ name: "MULVU", ++ argLen: 2, ++ clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072496632}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072496632}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, ++ clobbers: 196608, // R17 R18 + outputs: []outputInfo{ +- {0, 35184372088832}, // HI +- {1, 70368744177664}, // LO ++ {0, 65536}, // R17 ++ {1, 131072}, // R18 + }, + }, + }, + { +- name: "DIV", +- argLen: 2, +- asm: mips.ADIV, ++ name: "DIVV", ++ argLen: 2, ++ clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072496632}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072496632}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, ++ clobbers: 196608, // R17 R18 + outputs: []outputInfo{ +- {0, 35184372088832}, // HI +- {1, 70368744177664}, // LO ++ {0, 65536}, // R17 ++ {1, 131072}, // R18 + }, + }, + }, + { +- name: "DIVU", +- argLen: 2, +- asm: mips.ADIVU, ++ name: "DIVVU", ++ argLen: 2, ++ clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072496632}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072496632}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, ++ clobbers: 196608, // R17 R18 + outputs: []outputInfo{ +- {0, 35184372088832}, // HI +- {1, 70368744177664}, // LO ++ {0, 65536}, // R17 ++ {1, 131072}, // R18 + }, + }, + }, +@@ -21585,14 +21710,14 @@ var opcodeTable = [...]opInfo{ + name: "ADDF", + argLen: 2, + commutative: true, +- asm: mips.AADDF, ++ asm: loong64.AADDF, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, +@@ -21600,42 +21725,42 @@ var opcodeTable = [...]opInfo{ + name: "ADDD", + argLen: 2, + commutative: true, +- asm: mips.AADDD, ++ asm: loong64.AADDD, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SUBF", + argLen: 2, +- asm: mips.ASUBF, ++ asm: loong64.ASUBF, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SUBD", + argLen: 2, +- asm: mips.ASUBD, ++ asm: loong64.ASUBD, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, +@@ -21643,14 +21768,14 @@ var opcodeTable = [...]opInfo{ + name: "MULF", + argLen: 2, + commutative: true, +- asm: mips.AMULF, ++ asm: loong64.AMULF, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, +@@ -21658,42 +21783,42 @@ var opcodeTable = [...]opInfo{ + name: "MULD", + argLen: 2, + commutative: true, +- asm: mips.AMULD, ++ asm: loong64.AMULD, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "DIVF", + argLen: 2, +- asm: mips.ADIVF, ++ asm: loong64.ADIVF, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "DIVD", + argLen: 2, +- asm: mips.ADIVD, ++ asm: loong64.ADIVD, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, +@@ -21701,28 +21826,28 @@ var opcodeTable = [...]opInfo{ + name: "AND", + argLen: 2, + commutative: true, +- asm: mips.AAND, ++ asm: loong64.AAND, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "ANDconst", +- auxType: auxInt32, ++ auxType: auxInt64, + argLen: 1, +- asm: mips.AAND, ++ asm: loong64.AAND, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, +@@ -21730,28 +21855,28 @@ var opcodeTable = [...]opInfo{ + name: "OR", + argLen: 2, + commutative: true, +- asm: mips.AOR, ++ asm: loong64.AOR, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "ORconst", +- auxType: auxInt32, ++ auxType: auxInt64, + argLen: 1, +- asm: mips.AOR, ++ asm: loong64.AOR, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, +@@ -21759,28 +21884,28 @@ var opcodeTable = [...]opInfo{ + name: "XOR", + argLen: 2, + commutative: true, +- asm: mips.AXOR, ++ asm: loong64.AXOR, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "XORconst", +- auxType: auxInt32, ++ auxType: auxInt64, + argLen: 1, +- asm: mips.AXOR, ++ asm: loong64.AXOR, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, +@@ -21788,361 +21913,322 @@ var opcodeTable = [...]opInfo{ + name: "NOR", + argLen: 2, + commutative: true, +- asm: mips.ANOR, ++ asm: loong64.ANOR, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "NORconst", +- auxType: auxInt32, ++ auxType: auxInt64, + argLen: 1, +- asm: mips.ANOR, ++ asm: loong64.ANOR, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "NEG", ++ name: "NEGV", + argLen: 1, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "NEGF", + argLen: 1, +- asm: mips.ANEGF, ++ asm: loong64.ANEGF, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "NEGD", + argLen: 1, +- asm: mips.ANEGD, ++ asm: loong64.ANEGD, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SQRTD", + argLen: 1, +- asm: mips.ASQRTD, ++ asm: loong64.ASQRTD, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SQRTF", + argLen: 1, +- asm: mips.ASQRTF, ++ asm: loong64.ASQRTF, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { +- name: "SLL", ++ name: "SLLV", + argLen: 2, +- asm: mips.ASLL, ++ asm: loong64.ASLLV, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "SLLconst", +- auxType: auxInt32, ++ name: "SLLVconst", ++ auxType: auxInt64, + argLen: 1, +- asm: mips.ASLL, ++ asm: loong64.ASLLV, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "SRL", ++ name: "SRLV", + argLen: 2, +- asm: mips.ASRL, ++ asm: loong64.ASRLV, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "SRLconst", +- auxType: auxInt32, ++ name: "SRLVconst", ++ auxType: auxInt64, + argLen: 1, +- asm: mips.ASRL, ++ asm: loong64.ASRLV, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "SRA", ++ name: "SRAV", + argLen: 2, +- asm: mips.ASRA, ++ asm: loong64.ASRAV, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "SRAconst", +- auxType: auxInt32, ++ name: "SRAVconst", ++ auxType: auxInt64, + argLen: 1, +- asm: mips.ASRA, +- reg: regInfo{ +- inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- }, +- outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 +- }, +- }, +- }, +- { +- name: "CLZ", +- argLen: 1, +- asm: mips.ACLZ, ++ asm: loong64.ASRAV, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SGT", + argLen: 2, +- asm: mips.ASGT, ++ asm: loong64.ASGT, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SGTconst", +- auxType: auxInt32, ++ auxType: auxInt64, + argLen: 1, +- asm: mips.ASGT, +- reg: regInfo{ +- inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- }, +- outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 +- }, +- }, +- }, +- { +- name: "SGTzero", +- argLen: 1, +- asm: mips.ASGT, ++ asm: loong64.ASGT, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SGTU", + argLen: 2, +- asm: mips.ASGTU, ++ asm: loong64.ASGTU, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SGTUconst", +- auxType: auxInt32, ++ auxType: auxInt64, + argLen: 1, +- asm: mips.ASGTU, +- reg: regInfo{ +- inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- }, +- outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 +- }, +- }, +- }, +- { +- name: "SGTUzero", +- argLen: 1, +- asm: mips.ASGTU, ++ asm: loong64.ASGTU, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "CMPEQF", + argLen: 2, +- asm: mips.ACMPEQF, ++ asm: loong64.ACMPEQF, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPEQD", + argLen: 2, +- asm: mips.ACMPEQD, ++ asm: loong64.ACMPEQD, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPGEF", + argLen: 2, +- asm: mips.ACMPGEF, ++ asm: loong64.ACMPGEF, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPGED", + argLen: 2, +- asm: mips.ACMPGED, ++ asm: loong64.ACMPGED, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPGTF", + argLen: 2, +- asm: mips.ACMPGTF, ++ asm: loong64.ACMPGTF, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPGTD", + argLen: 2, +- asm: mips.ACMPGTD, ++ asm: loong64.ACMPGTD, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { +- name: "MOVWconst", +- auxType: auxInt32, ++ name: "MOVVconst", ++ auxType: auxInt64, + argLen: 0, + rematerializeable: true, +- asm: mips.AMOVW, ++ asm: loong64.AMOVV, + reg: regInfo{ + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVFconst", +- auxType: auxFloat32, ++ auxType: auxFloat64, + argLen: 0, + rematerializeable: true, +- asm: mips.AMOVF, ++ asm: loong64.AMOVF, + reg: regInfo{ + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, +@@ -22151,26 +22237,26 @@ var opcodeTable = [...]opInfo{ + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, +- asm: mips.AMOVD, ++ asm: loong64.AMOVD, + reg: regInfo{ + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { +- name: "MOVWaddr", ++ name: "MOVVaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, +- asm: mips.AMOVW, ++ asm: loong64.AMOVV, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140737555464192}, // SP SB ++ {0, 4611686018427387908}, // SP SB + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, +@@ -22180,13 +22266,13 @@ var opcodeTable = [...]opInfo{ + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, +- asm: mips.AMOVB, ++ asm: loong64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, +@@ -22196,13 +22282,13 @@ var opcodeTable = [...]opInfo{ + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, +- asm: mips.AMOVBU, ++ asm: loong64.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, +@@ -22212,13 +22298,13 @@ var opcodeTable = [...]opInfo{ + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, +- asm: mips.AMOVH, ++ asm: loong64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, +@@ -22228,13 +22314,13 @@ var opcodeTable = [...]opInfo{ + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, +- asm: mips.AMOVHU, ++ asm: loong64.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, +@@ -22244,13 +22330,45 @@ var opcodeTable = [...]opInfo{ + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, +- asm: mips.AMOVW, ++ asm: loong64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVWUload", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymRead, ++ asm: loong64.AMOVWU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVVload", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymRead, ++ asm: loong64.AMOVV, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, +@@ -22260,13 +22378,13 @@ var opcodeTable = [...]opInfo{ + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, +- asm: mips.AMOVF, ++ asm: loong64.AMOVF, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, +@@ -22276,13 +22394,13 @@ var opcodeTable = [...]opInfo{ + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, +- asm: mips.AMOVD, ++ asm: loong64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, +@@ -22292,11 +22410,11 @@ var opcodeTable = [...]opInfo{ + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, +- asm: mips.AMOVB, ++ asm: loong64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, +@@ -22306,11 +22424,11 @@ var opcodeTable = [...]opInfo{ + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, +- asm: mips.AMOVH, ++ asm: loong64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, +@@ -22320,11 +22438,25 @@ var opcodeTable = [...]opInfo{ + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, +- asm: mips.AMOVW, ++ asm: loong64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "MOVVstore", ++ auxType: auxSymOff, ++ argLen: 3, ++ faultOnNilArg0: true, ++ symEffect: SymWrite, ++ asm: loong64.AMOVV, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, +@@ -22334,11 +22466,11 @@ var opcodeTable = [...]opInfo{ + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, +- asm: mips.AMOVF, ++ asm: loong64.AMOVF, + reg: regInfo{ + inputs: []inputInfo{ +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, +@@ -22348,11 +22480,11 @@ var opcodeTable = [...]opInfo{ + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, +- asm: mips.AMOVD, ++ asm: loong64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ +- {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, +@@ -22362,10 +22494,10 @@ var opcodeTable = [...]opInfo{ + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, +- asm: mips.AMOVB, ++ asm: loong64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, +@@ -22375,10 +22507,10 @@ var opcodeTable = [...]opInfo{ + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, +- asm: mips.AMOVH, ++ asm: loong64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, +@@ -22388,197 +22520,257 @@ var opcodeTable = [...]opInfo{ + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, +- asm: mips.AMOVW, ++ asm: loong64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "MOVVstorezero", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymWrite, ++ asm: loong64.AMOVV, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "MOVBreg", + argLen: 1, +- asm: mips.AMOVB, ++ asm: loong64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVBUreg", + argLen: 1, +- asm: mips.AMOVBU, ++ asm: loong64.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVHreg", + argLen: 1, +- asm: mips.AMOVH, ++ asm: loong64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVHUreg", + argLen: 1, +- asm: mips.AMOVHU, ++ asm: loong64.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVWreg", + argLen: 1, +- asm: mips.AMOVW, ++ asm: loong64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "MOVWnop", +- argLen: 1, +- resultInArg0: true, ++ name: "MOVWUreg", ++ argLen: 1, ++ asm: loong64.AMOVWU, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "CMOVZ", +- argLen: 3, +- resultInArg0: true, +- asm: mips.ACMOVZ, ++ name: "MOVVreg", ++ argLen: 1, ++ asm: loong64.AMOVV, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 +- {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 +- {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { +- name: "CMOVZzero", +- argLen: 2, ++ name: "MOVVnop", ++ argLen: 1, + resultInArg0: true, +- asm: mips.ACMOVZ, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 +- {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVWF", + argLen: 1, +- asm: mips.AMOVWF, ++ asm: loong64.AMOVWF, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVWD", + argLen: 1, +- asm: mips.AMOVWD, ++ asm: loong64.AMOVWD, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVVF", ++ argLen: 1, ++ asm: loong64.AMOVVF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ }, ++ outputs: []outputInfo{ ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVVD", ++ argLen: 1, ++ asm: loong64.AMOVVD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ }, ++ outputs: []outputInfo{ ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "TRUNCFW", + argLen: 1, +- asm: mips.ATRUNCFW, ++ asm: loong64.ATRUNCFW, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "TRUNCDW", + argLen: 1, +- asm: mips.ATRUNCDW, ++ asm: loong64.ATRUNCDW, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ }, ++ }, ++ }, ++ { ++ name: "TRUNCFV", ++ argLen: 1, ++ asm: loong64.ATRUNCFV, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ }, ++ outputs: []outputInfo{ ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ }, ++ }, ++ }, ++ { ++ name: "TRUNCDV", ++ argLen: 1, ++ asm: loong64.ATRUNCDV, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ }, ++ outputs: []outputInfo{ ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVFD", + argLen: 1, +- asm: mips.AMOVFD, ++ asm: loong64.AMOVFD, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVDF", + argLen: 1, +- asm: mips.AMOVDF, ++ asm: loong64.AMOVDF, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ +- {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, +@@ -22589,7 +22781,7 @@ var opcodeTable = [...]opInfo{ + clobberFlags: true, + call: true, + reg: regInfo{ +- clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO ++ clobbers: 4611686018426339320, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { +@@ -22600,10 +22792,10 @@ var opcodeTable = [...]opInfo{ + call: true, + reg: regInfo{ + inputs: []inputInfo{ +- {1, 4194304}, // R22 +- {0, 402653182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP R31 ++ {1, 268435456}, // R29 ++ {0, 1070596092}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, +- clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO ++ clobbers: 4611686018426339320, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { +@@ -22614,39 +22806,1613 @@ var opcodeTable = [...]opInfo{ + call: true, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, +- clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO ++ clobbers: 4611686018426339320, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { +- name: "LoweredAtomicLoad8", ++ name: "DUFFZERO", ++ auxType: auxInt64, + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB +- }, +- outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, ++ clobbers: 262146, // R1 R19 + }, + }, + { +- name: "LoweredAtomicLoad32", +- argLen: 2, ++ name: "DUFFCOPY", ++ auxType: auxInt64, ++ argLen: 3, + faultOnNilArg0: true, ++ faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ +- {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB +- }, +- outputs: []outputInfo{ +- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {0, 524288}, // R20 ++ {1, 262144}, // R19 + }, ++ clobbers: 786434, // R1 R19 R20 + }, + }, + { +- name: "LoweredAtomicStore8", ++ name: "LoweredZero", ++ auxType: auxInt64, ++ argLen: 3, ++ clobberFlags: true, ++ faultOnNilArg0: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 262144}, // R19 ++ {1, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ clobbers: 262144, // R19 ++ }, ++ }, ++ { ++ name: "LoweredMove", ++ auxType: auxInt64, ++ argLen: 4, ++ clobberFlags: true, ++ faultOnNilArg0: true, ++ faultOnNilArg1: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 8}, // R4 ++ {1, 262144}, // R19 ++ {2, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ clobbers: 262152, // R4 R19 ++ }, ++ }, ++ { ++ name: "LoweredAtomicLoad8", ++ argLen: 2, ++ faultOnNilArg0: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicLoad32", ++ argLen: 2, ++ faultOnNilArg0: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicLoad64", ++ argLen: 2, ++ faultOnNilArg0: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicStore8", ++ argLen: 3, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicStore32", ++ argLen: 3, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicStore64", ++ argLen: 3, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicStorezero32", ++ argLen: 2, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicStorezero64", ++ argLen: 2, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicExchange32", ++ argLen: 3, ++ resultNotInArgs: true, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ unsafePoint: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicExchange64", ++ argLen: 3, ++ resultNotInArgs: true, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ unsafePoint: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicAdd32", ++ argLen: 3, ++ resultNotInArgs: true, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ unsafePoint: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicAdd64", ++ argLen: 3, ++ resultNotInArgs: true, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ unsafePoint: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicAddconst32", ++ auxType: auxInt32, ++ argLen: 2, ++ resultNotInArgs: true, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ unsafePoint: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicAddconst64", ++ auxType: auxInt64, ++ argLen: 2, ++ resultNotInArgs: true, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ unsafePoint: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicCas32", ++ argLen: 4, ++ resultNotInArgs: true, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ unsafePoint: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {2, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicCas64", ++ argLen: 4, ++ resultNotInArgs: true, ++ faultOnNilArg0: true, ++ hasSideEffects: true, ++ unsafePoint: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {2, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredNilCheck", ++ argLen: 2, ++ nilCheck: true, ++ faultOnNilArg0: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "FPFlagTrue", ++ argLen: 1, ++ reg: regInfo{ ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "FPFlagFalse", ++ argLen: 1, ++ reg: regInfo{ ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredGetClosurePtr", ++ argLen: 0, ++ zeroWidth: true, ++ reg: regInfo{ ++ outputs: []outputInfo{ ++ {0, 268435456}, // R29 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredGetCallerSP", ++ argLen: 0, ++ rematerializeable: true, ++ reg: regInfo{ ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredGetCallerPC", ++ argLen: 0, ++ rematerializeable: true, ++ reg: regInfo{ ++ outputs: []outputInfo{ ++ {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredWB", ++ auxType: auxSym, ++ argLen: 3, ++ clobberFlags: true, ++ symEffect: SymNone, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 67108864}, // R27 ++ {1, 134217728}, // R28 ++ }, ++ clobbers: 4611686017353646082, // R1 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 ++ }, ++ }, ++ { ++ name: "LoweredPanicBoundsA", ++ auxType: auxInt64, ++ argLen: 3, ++ call: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 65536}, // R17 ++ {1, 8}, // R4 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredPanicBoundsB", ++ auxType: auxInt64, ++ argLen: 3, ++ call: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 131072}, // R18 ++ {1, 65536}, // R17 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredPanicBoundsC", ++ auxType: auxInt64, ++ argLen: 3, ++ call: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 262144}, // R19 ++ {1, 131072}, // R18 ++ }, ++ }, ++ }, ++ ++ { ++ name: "ADD", ++ argLen: 2, ++ commutative: true, ++ asm: mips.AADDU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "ADDconst", ++ auxType: auxInt32, ++ argLen: 1, ++ asm: mips.AADDU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 536870910}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SUB", ++ argLen: 2, ++ asm: mips.ASUBU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SUBconst", ++ auxType: auxInt32, ++ argLen: 1, ++ asm: mips.ASUBU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MUL", ++ argLen: 2, ++ commutative: true, ++ asm: mips.AMUL, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ clobbers: 105553116266496, // HI LO ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MULT", ++ argLen: 2, ++ commutative: true, ++ asm: mips.AMUL, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 35184372088832}, // HI ++ {1, 70368744177664}, // LO ++ }, ++ }, ++ }, ++ { ++ name: "MULTU", ++ argLen: 2, ++ commutative: true, ++ asm: mips.AMULU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 35184372088832}, // HI ++ {1, 70368744177664}, // LO ++ }, ++ }, ++ }, ++ { ++ name: "DIV", ++ argLen: 2, ++ asm: mips.ADIV, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 35184372088832}, // HI ++ {1, 70368744177664}, // LO ++ }, ++ }, ++ }, ++ { ++ name: "DIVU", ++ argLen: 2, ++ asm: mips.ADIVU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 35184372088832}, // HI ++ {1, 70368744177664}, // LO ++ }, ++ }, ++ }, ++ { ++ name: "ADDF", ++ argLen: 2, ++ commutative: true, ++ asm: mips.AADDF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "ADDD", ++ argLen: 2, ++ commutative: true, ++ asm: mips.AADDD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "SUBF", ++ argLen: 2, ++ asm: mips.ASUBF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "SUBD", ++ argLen: 2, ++ asm: mips.ASUBD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "MULF", ++ argLen: 2, ++ commutative: true, ++ asm: mips.AMULF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "MULD", ++ argLen: 2, ++ commutative: true, ++ asm: mips.AMULD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "DIVF", ++ argLen: 2, ++ asm: mips.ADIVF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "DIVD", ++ argLen: 2, ++ asm: mips.ADIVD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "AND", ++ argLen: 2, ++ commutative: true, ++ asm: mips.AAND, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "ANDconst", ++ auxType: auxInt32, ++ argLen: 1, ++ asm: mips.AAND, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "OR", ++ argLen: 2, ++ commutative: true, ++ asm: mips.AOR, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "ORconst", ++ auxType: auxInt32, ++ argLen: 1, ++ asm: mips.AOR, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "XOR", ++ argLen: 2, ++ commutative: true, ++ asm: mips.AXOR, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "XORconst", ++ auxType: auxInt32, ++ argLen: 1, ++ asm: mips.AXOR, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "NOR", ++ argLen: 2, ++ commutative: true, ++ asm: mips.ANOR, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "NORconst", ++ auxType: auxInt32, ++ argLen: 1, ++ asm: mips.ANOR, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "NEG", ++ argLen: 1, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "NEGF", ++ argLen: 1, ++ asm: mips.ANEGF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "NEGD", ++ argLen: 1, ++ asm: mips.ANEGD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "SQRTD", ++ argLen: 1, ++ asm: mips.ASQRTD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "SQRTF", ++ argLen: 1, ++ asm: mips.ASQRTF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "SLL", ++ argLen: 2, ++ asm: mips.ASLL, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SLLconst", ++ auxType: auxInt32, ++ argLen: 1, ++ asm: mips.ASLL, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SRL", ++ argLen: 2, ++ asm: mips.ASRL, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SRLconst", ++ auxType: auxInt32, ++ argLen: 1, ++ asm: mips.ASRL, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SRA", ++ argLen: 2, ++ asm: mips.ASRA, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SRAconst", ++ auxType: auxInt32, ++ argLen: 1, ++ asm: mips.ASRA, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "CLZ", ++ argLen: 1, ++ asm: mips.ACLZ, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SGT", ++ argLen: 2, ++ asm: mips.ASGT, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SGTconst", ++ auxType: auxInt32, ++ argLen: 1, ++ asm: mips.ASGT, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SGTzero", ++ argLen: 1, ++ asm: mips.ASGT, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SGTU", ++ argLen: 2, ++ asm: mips.ASGTU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SGTUconst", ++ auxType: auxInt32, ++ argLen: 1, ++ asm: mips.ASGTU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "SGTUzero", ++ argLen: 1, ++ asm: mips.ASGTU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "CMPEQF", ++ argLen: 2, ++ asm: mips.ACMPEQF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "CMPEQD", ++ argLen: 2, ++ asm: mips.ACMPEQD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "CMPGEF", ++ argLen: 2, ++ asm: mips.ACMPGEF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "CMPGED", ++ argLen: 2, ++ asm: mips.ACMPGED, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "CMPGTF", ++ argLen: 2, ++ asm: mips.ACMPGTF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "CMPGTD", ++ argLen: 2, ++ asm: mips.ACMPGTD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "MOVWconst", ++ auxType: auxInt32, ++ argLen: 0, ++ rematerializeable: true, ++ asm: mips.AMOVW, ++ reg: regInfo{ ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVFconst", ++ auxType: auxFloat32, ++ argLen: 0, ++ rematerializeable: true, ++ asm: mips.AMOVF, ++ reg: regInfo{ ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "MOVDconst", ++ auxType: auxFloat64, ++ argLen: 0, ++ rematerializeable: true, ++ asm: mips.AMOVD, ++ reg: regInfo{ ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "MOVWaddr", ++ auxType: auxSymOff, ++ argLen: 1, ++ rematerializeable: true, ++ symEffect: SymAddr, ++ asm: mips.AMOVW, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140737555464192}, // SP SB ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVBload", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymRead, ++ asm: mips.AMOVB, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVBUload", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymRead, ++ asm: mips.AMOVBU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVHload", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymRead, ++ asm: mips.AMOVH, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVHUload", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymRead, ++ asm: mips.AMOVHU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVWload", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymRead, ++ asm: mips.AMOVW, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVFload", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymRead, ++ asm: mips.AMOVF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "MOVDload", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymRead, ++ asm: mips.AMOVD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "MOVBstore", ++ auxType: auxSymOff, ++ argLen: 3, ++ faultOnNilArg0: true, ++ symEffect: SymWrite, ++ asm: mips.AMOVB, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "MOVHstore", ++ auxType: auxSymOff, ++ argLen: 3, ++ faultOnNilArg0: true, ++ symEffect: SymWrite, ++ asm: mips.AMOVH, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "MOVWstore", ++ auxType: auxSymOff, ++ argLen: 3, ++ faultOnNilArg0: true, ++ symEffect: SymWrite, ++ asm: mips.AMOVW, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "MOVFstore", ++ auxType: auxSymOff, ++ argLen: 3, ++ faultOnNilArg0: true, ++ symEffect: SymWrite, ++ asm: mips.AMOVF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "MOVDstore", ++ auxType: auxSymOff, ++ argLen: 3, ++ faultOnNilArg0: true, ++ symEffect: SymWrite, ++ asm: mips.AMOVD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "MOVBstorezero", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymWrite, ++ asm: mips.AMOVB, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "MOVHstorezero", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymWrite, ++ asm: mips.AMOVH, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "MOVWstorezero", ++ auxType: auxSymOff, ++ argLen: 2, ++ faultOnNilArg0: true, ++ symEffect: SymWrite, ++ asm: mips.AMOVW, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ }, ++ }, ++ { ++ name: "MOVBreg", ++ argLen: 1, ++ asm: mips.AMOVB, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVBUreg", ++ argLen: 1, ++ asm: mips.AMOVBU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVHreg", ++ argLen: 1, ++ asm: mips.AMOVH, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVHUreg", ++ argLen: 1, ++ asm: mips.AMOVHU, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVWreg", ++ argLen: 1, ++ asm: mips.AMOVW, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVWnop", ++ argLen: 1, ++ resultInArg0: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "CMOVZ", ++ argLen: 3, ++ resultInArg0: true, ++ asm: mips.ACMOVZ, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "CMOVZzero", ++ argLen: 2, ++ resultInArg0: true, ++ asm: mips.ACMOVZ, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "MOVWF", ++ argLen: 1, ++ asm: mips.AMOVWF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "MOVWD", ++ argLen: 1, ++ asm: mips.AMOVWD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "TRUNCFW", ++ argLen: 1, ++ asm: mips.ATRUNCFW, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "TRUNCDW", ++ argLen: 1, ++ asm: mips.ATRUNCDW, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "MOVFD", ++ argLen: 1, ++ asm: mips.AMOVFD, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "MOVDF", ++ argLen: 1, ++ asm: mips.AMOVDF, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ outputs: []outputInfo{ ++ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 ++ }, ++ }, ++ }, ++ { ++ name: "CALLstatic", ++ auxType: auxCallOff, ++ argLen: 1, ++ clobberFlags: true, ++ call: true, ++ reg: regInfo{ ++ clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO ++ }, ++ }, ++ { ++ name: "CALLclosure", ++ auxType: auxCallOff, ++ argLen: 3, ++ clobberFlags: true, ++ call: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {1, 4194304}, // R22 ++ {0, 402653182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP R31 ++ }, ++ clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO ++ }, ++ }, ++ { ++ name: "CALLinter", ++ auxType: auxCallOff, ++ argLen: 2, ++ clobberFlags: true, ++ call: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO ++ }, ++ }, ++ { ++ name: "LoweredAtomicLoad8", ++ argLen: 2, ++ faultOnNilArg0: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicLoad32", ++ argLen: 2, ++ faultOnNilArg0: true, ++ reg: regInfo{ ++ inputs: []inputInfo{ ++ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB ++ }, ++ outputs: []outputInfo{ ++ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 ++ }, ++ }, ++ }, ++ { ++ name: "LoweredAtomicStore8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, +@@ -36407,6 +38173,78 @@ var fpRegMaskARM64 = regMask(9223372034707292160) + var specialRegMaskARM64 = regMask(0) + var framepointerRegARM64 = int8(-1) + var linkRegARM64 = int8(29) ++var registersLOONG64 = [...]Register{ ++ {0, loong64.REG_R0, -1, "R0"}, ++ {1, loong64.REG_R1, -1, "R1"}, ++ {2, loong64.REGSP, -1, "SP"}, ++ {3, loong64.REG_R4, 0, "R4"}, ++ {4, loong64.REG_R5, 1, "R5"}, ++ {5, loong64.REG_R6, 2, "R6"}, ++ {6, loong64.REG_R7, 3, "R7"}, ++ {7, loong64.REG_R8, 4, "R8"}, ++ {8, loong64.REG_R9, 5, "R9"}, ++ {9, loong64.REG_R10, 6, "R10"}, ++ {10, loong64.REG_R11, 7, "R11"}, ++ {11, loong64.REG_R12, 8, "R12"}, ++ {12, loong64.REG_R13, 9, "R13"}, ++ {13, loong64.REG_R14, 10, "R14"}, ++ {14, loong64.REG_R15, 11, "R15"}, ++ {15, loong64.REG_R16, 12, "R16"}, ++ {16, loong64.REG_R17, 13, "R17"}, ++ {17, loong64.REG_R18, 14, "R18"}, ++ {18, loong64.REG_R19, 15, "R19"}, ++ {19, loong64.REG_R20, 16, "R20"}, ++ {20, loong64.REG_R21, -1, "R21"}, ++ {21, loong64.REGG, -1, "g"}, ++ {22, loong64.REG_R23, 17, "R23"}, ++ {23, loong64.REG_R24, 18, "R24"}, ++ {24, loong64.REG_R25, 19, "R25"}, ++ {25, loong64.REG_R26, 20, "R26"}, ++ {26, loong64.REG_R27, 21, "R27"}, ++ {27, loong64.REG_R28, 22, "R28"}, ++ {28, loong64.REG_R29, 23, "R29"}, ++ {29, loong64.REG_R31, 24, "R31"}, ++ {30, loong64.REG_F0, -1, "F0"}, ++ {31, loong64.REG_F1, -1, "F1"}, ++ {32, loong64.REG_F2, -1, "F2"}, ++ {33, loong64.REG_F3, -1, "F3"}, ++ {34, loong64.REG_F4, -1, "F4"}, ++ {35, loong64.REG_F5, -1, "F5"}, ++ {36, loong64.REG_F6, -1, "F6"}, ++ {37, loong64.REG_F7, -1, "F7"}, ++ {38, loong64.REG_F8, -1, "F8"}, ++ {39, loong64.REG_F9, -1, "F9"}, ++ {40, loong64.REG_F10, -1, "F10"}, ++ {41, loong64.REG_F11, -1, "F11"}, ++ {42, loong64.REG_F12, -1, "F12"}, ++ {43, loong64.REG_F13, -1, "F13"}, ++ {44, loong64.REG_F14, -1, "F14"}, ++ {45, loong64.REG_F15, -1, "F15"}, ++ {46, loong64.REG_F16, -1, "F16"}, ++ {47, loong64.REG_F17, -1, "F17"}, ++ {48, loong64.REG_F18, -1, "F18"}, ++ {49, loong64.REG_F19, -1, "F19"}, ++ {50, loong64.REG_F20, -1, "F20"}, ++ {51, loong64.REG_F21, -1, "F21"}, ++ {52, loong64.REG_F22, -1, "F22"}, ++ {53, loong64.REG_F23, -1, "F23"}, ++ {54, loong64.REG_F24, -1, "F24"}, ++ {55, loong64.REG_F25, -1, "F25"}, ++ {56, loong64.REG_F26, -1, "F26"}, ++ {57, loong64.REG_F27, -1, "F27"}, ++ {58, loong64.REG_F28, -1, "F28"}, ++ {59, loong64.REG_F29, -1, "F29"}, ++ {60, loong64.REG_F30, -1, "F30"}, ++ {61, loong64.REG_F31, -1, "F31"}, ++ {62, 0, -1, "SB"}, ++} ++var paramIntRegLOONG64 = []int8{3, 4, 5, 6, 7, 8, 9, 10} ++var paramFloatRegLOONG64 = []int8{30, 31, 32, 33, 34, 35, 36, 37} ++var gpRegMaskLOONG64 = regMask(1070596088) ++var fpRegMaskLOONG64 = regMask(4611686017353646080) ++var specialRegMaskLOONG64 = regMask(0) ++var framepointerRegLOONG64 = int8(-1) ++var linkRegLOONG64 = int8(1) + var registersMIPS = [...]Register{ + {0, mips.REG_R0, -1, "R0"}, + {1, mips.REG_R1, 0, "R1"}, +diff --git a/src/cmd/compile/internal/ssa/rewriteLOONG64.go b/src/cmd/compile/internal/ssa/rewriteLOONG64.go +new file mode 100644 +index 0000000..463a045 +--- /dev/null ++++ b/src/cmd/compile/internal/ssa/rewriteLOONG64.go +@@ -0,0 +1,7943 @@ ++// Code generated from gen/LOONG64.rules; DO NOT EDIT. ++// generated with: cd gen; go run *.go ++ ++package ssa ++ ++import "cmd/compile/internal/types" ++ ++func rewriteValueLOONG64(v *Value) bool { ++ switch v.Op { ++ case OpAdd16: ++ v.Op = OpLOONG64ADDV ++ return true ++ case OpAdd32: ++ v.Op = OpLOONG64ADDV ++ return true ++ case OpAdd32F: ++ v.Op = OpLOONG64ADDF ++ return true ++ case OpAdd64: ++ v.Op = OpLOONG64ADDV ++ return true ++ case OpAdd64F: ++ v.Op = OpLOONG64ADDD ++ return true ++ case OpAdd8: ++ v.Op = OpLOONG64ADDV ++ return true ++ case OpAddPtr: ++ v.Op = OpLOONG64ADDV ++ return true ++ case OpAddr: ++ return rewriteValueLOONG64_OpAddr(v) ++ case OpAnd16: ++ v.Op = OpLOONG64AND ++ return true ++ case OpAnd32: ++ v.Op = OpLOONG64AND ++ return true ++ case OpAnd64: ++ v.Op = OpLOONG64AND ++ return true ++ case OpAnd8: ++ v.Op = OpLOONG64AND ++ return true ++ case OpAndB: ++ v.Op = OpLOONG64AND ++ return true ++ case OpAtomicAdd32: ++ v.Op = OpLOONG64LoweredAtomicAdd32 ++ return true ++ case OpAtomicAdd64: ++ v.Op = OpLOONG64LoweredAtomicAdd64 ++ return true ++ case OpAtomicCompareAndSwap32: ++ v.Op = OpLOONG64LoweredAtomicCas32 ++ return true ++ case OpAtomicCompareAndSwap64: ++ v.Op = OpLOONG64LoweredAtomicCas64 ++ return true ++ case OpAtomicExchange32: ++ v.Op = OpLOONG64LoweredAtomicExchange32 ++ return true ++ case OpAtomicExchange64: ++ v.Op = OpLOONG64LoweredAtomicExchange64 ++ return true ++ case OpAtomicLoad32: ++ v.Op = OpLOONG64LoweredAtomicLoad32 ++ return true ++ case OpAtomicLoad64: ++ v.Op = OpLOONG64LoweredAtomicLoad64 ++ return true ++ case OpAtomicLoad8: ++ v.Op = OpLOONG64LoweredAtomicLoad8 ++ return true ++ case OpAtomicLoadPtr: ++ v.Op = OpLOONG64LoweredAtomicLoad64 ++ return true ++ case OpAtomicStore32: ++ v.Op = OpLOONG64LoweredAtomicStore32 ++ return true ++ case OpAtomicStore64: ++ v.Op = OpLOONG64LoweredAtomicStore64 ++ return true ++ case OpAtomicStore8: ++ v.Op = OpLOONG64LoweredAtomicStore8 ++ return true ++ case OpAtomicStorePtrNoWB: ++ v.Op = OpLOONG64LoweredAtomicStore64 ++ return true ++ case OpAvg64u: ++ return rewriteValueLOONG64_OpAvg64u(v) ++ case OpClosureCall: ++ v.Op = OpLOONG64CALLclosure ++ return true ++ case OpCom16: ++ return rewriteValueLOONG64_OpCom16(v) ++ case OpCom32: ++ return rewriteValueLOONG64_OpCom32(v) ++ case OpCom64: ++ return rewriteValueLOONG64_OpCom64(v) ++ case OpCom8: ++ return rewriteValueLOONG64_OpCom8(v) ++ case OpConst16: ++ return rewriteValueLOONG64_OpConst16(v) ++ case OpConst32: ++ return rewriteValueLOONG64_OpConst32(v) ++ case OpConst32F: ++ return rewriteValueLOONG64_OpConst32F(v) ++ case OpConst64: ++ return rewriteValueLOONG64_OpConst64(v) ++ case OpConst64F: ++ return rewriteValueLOONG64_OpConst64F(v) ++ case OpConst8: ++ return rewriteValueLOONG64_OpConst8(v) ++ case OpConstBool: ++ return rewriteValueLOONG64_OpConstBool(v) ++ case OpConstNil: ++ return rewriteValueLOONG64_OpConstNil(v) ++ case OpCvt32Fto32: ++ v.Op = OpLOONG64TRUNCFW ++ return true ++ case OpCvt32Fto64: ++ v.Op = OpLOONG64TRUNCFV ++ return true ++ case OpCvt32Fto64F: ++ v.Op = OpLOONG64MOVFD ++ return true ++ case OpCvt32to32F: ++ v.Op = OpLOONG64MOVWF ++ return true ++ case OpCvt32to64F: ++ v.Op = OpLOONG64MOVWD ++ return true ++ case OpCvt64Fto32: ++ v.Op = OpLOONG64TRUNCDW ++ return true ++ case OpCvt64Fto32F: ++ v.Op = OpLOONG64MOVDF ++ return true ++ case OpCvt64Fto64: ++ v.Op = OpLOONG64TRUNCDV ++ return true ++ case OpCvt64to32F: ++ v.Op = OpLOONG64MOVVF ++ return true ++ case OpCvt64to64F: ++ v.Op = OpLOONG64MOVVD ++ return true ++ case OpCvtBoolToUint8: ++ v.Op = OpCopy ++ return true ++ case OpDiv16: ++ return rewriteValueLOONG64_OpDiv16(v) ++ case OpDiv16u: ++ return rewriteValueLOONG64_OpDiv16u(v) ++ case OpDiv32: ++ return rewriteValueLOONG64_OpDiv32(v) ++ case OpDiv32F: ++ v.Op = OpLOONG64DIVF ++ return true ++ case OpDiv32u: ++ return rewriteValueLOONG64_OpDiv32u(v) ++ case OpDiv64: ++ return rewriteValueLOONG64_OpDiv64(v) ++ case OpDiv64F: ++ v.Op = OpLOONG64DIVD ++ return true ++ case OpDiv64u: ++ return rewriteValueLOONG64_OpDiv64u(v) ++ case OpDiv8: ++ return rewriteValueLOONG64_OpDiv8(v) ++ case OpDiv8u: ++ return rewriteValueLOONG64_OpDiv8u(v) ++ case OpEq16: ++ return rewriteValueLOONG64_OpEq16(v) ++ case OpEq32: ++ return rewriteValueLOONG64_OpEq32(v) ++ case OpEq32F: ++ return rewriteValueLOONG64_OpEq32F(v) ++ case OpEq64: ++ return rewriteValueLOONG64_OpEq64(v) ++ case OpEq64F: ++ return rewriteValueLOONG64_OpEq64F(v) ++ case OpEq8: ++ return rewriteValueLOONG64_OpEq8(v) ++ case OpEqB: ++ return rewriteValueLOONG64_OpEqB(v) ++ case OpEqPtr: ++ return rewriteValueLOONG64_OpEqPtr(v) ++ case OpGetCallerPC: ++ v.Op = OpLOONG64LoweredGetCallerPC ++ return true ++ case OpGetCallerSP: ++ v.Op = OpLOONG64LoweredGetCallerSP ++ return true ++ case OpGetClosurePtr: ++ v.Op = OpLOONG64LoweredGetClosurePtr ++ return true ++ case OpHmul32: ++ return rewriteValueLOONG64_OpHmul32(v) ++ case OpHmul32u: ++ return rewriteValueLOONG64_OpHmul32u(v) ++ case OpHmul64: ++ return rewriteValueLOONG64_OpHmul64(v) ++ case OpHmul64u: ++ return rewriteValueLOONG64_OpHmul64u(v) ++ case OpInterCall: ++ v.Op = OpLOONG64CALLinter ++ return true ++ case OpIsInBounds: ++ return rewriteValueLOONG64_OpIsInBounds(v) ++ case OpIsNonNil: ++ return rewriteValueLOONG64_OpIsNonNil(v) ++ case OpIsSliceInBounds: ++ return rewriteValueLOONG64_OpIsSliceInBounds(v) ++ case OpLOONG64ADDV: ++ return rewriteValueLOONG64_OpLOONG64ADDV(v) ++ case OpLOONG64ADDVconst: ++ return rewriteValueLOONG64_OpLOONG64ADDVconst(v) ++ case OpLOONG64AND: ++ return rewriteValueLOONG64_OpLOONG64AND(v) ++ case OpLOONG64ANDconst: ++ return rewriteValueLOONG64_OpLOONG64ANDconst(v) ++ case OpLOONG64LoweredAtomicAdd32: ++ return rewriteValueLOONG64_OpLOONG64LoweredAtomicAdd32(v) ++ case OpLOONG64LoweredAtomicAdd64: ++ return rewriteValueLOONG64_OpLOONG64LoweredAtomicAdd64(v) ++ case OpLOONG64LoweredAtomicStore32: ++ return rewriteValueLOONG64_OpLOONG64LoweredAtomicStore32(v) ++ case OpLOONG64LoweredAtomicStore64: ++ return rewriteValueLOONG64_OpLOONG64LoweredAtomicStore64(v) ++ case OpLOONG64MOVBUload: ++ return rewriteValueLOONG64_OpLOONG64MOVBUload(v) ++ case OpLOONG64MOVBUreg: ++ return rewriteValueLOONG64_OpLOONG64MOVBUreg(v) ++ case OpLOONG64MOVBload: ++ return rewriteValueLOONG64_OpLOONG64MOVBload(v) ++ case OpLOONG64MOVBreg: ++ return rewriteValueLOONG64_OpLOONG64MOVBreg(v) ++ case OpLOONG64MOVBstore: ++ return rewriteValueLOONG64_OpLOONG64MOVBstore(v) ++ case OpLOONG64MOVBstorezero: ++ return rewriteValueLOONG64_OpLOONG64MOVBstorezero(v) ++ case OpLOONG64MOVDload: ++ return rewriteValueLOONG64_OpLOONG64MOVDload(v) ++ case OpLOONG64MOVDstore: ++ return rewriteValueLOONG64_OpLOONG64MOVDstore(v) ++ case OpLOONG64MOVFload: ++ return rewriteValueLOONG64_OpLOONG64MOVFload(v) ++ case OpLOONG64MOVFstore: ++ return rewriteValueLOONG64_OpLOONG64MOVFstore(v) ++ case OpLOONG64MOVHUload: ++ return rewriteValueLOONG64_OpLOONG64MOVHUload(v) ++ case OpLOONG64MOVHUreg: ++ return rewriteValueLOONG64_OpLOONG64MOVHUreg(v) ++ case OpLOONG64MOVHload: ++ return rewriteValueLOONG64_OpLOONG64MOVHload(v) ++ case OpLOONG64MOVHreg: ++ return rewriteValueLOONG64_OpLOONG64MOVHreg(v) ++ case OpLOONG64MOVHstore: ++ return rewriteValueLOONG64_OpLOONG64MOVHstore(v) ++ case OpLOONG64MOVHstorezero: ++ return rewriteValueLOONG64_OpLOONG64MOVHstorezero(v) ++ case OpLOONG64MOVVload: ++ return rewriteValueLOONG64_OpLOONG64MOVVload(v) ++ case OpLOONG64MOVVreg: ++ return rewriteValueLOONG64_OpLOONG64MOVVreg(v) ++ case OpLOONG64MOVVstore: ++ return rewriteValueLOONG64_OpLOONG64MOVVstore(v) ++ case OpLOONG64MOVVstorezero: ++ return rewriteValueLOONG64_OpLOONG64MOVVstorezero(v) ++ case OpLOONG64MOVWUload: ++ return rewriteValueLOONG64_OpLOONG64MOVWUload(v) ++ case OpLOONG64MOVWUreg: ++ return rewriteValueLOONG64_OpLOONG64MOVWUreg(v) ++ case OpLOONG64MOVWload: ++ return rewriteValueLOONG64_OpLOONG64MOVWload(v) ++ case OpLOONG64MOVWreg: ++ return rewriteValueLOONG64_OpLOONG64MOVWreg(v) ++ case OpLOONG64MOVWstore: ++ return rewriteValueLOONG64_OpLOONG64MOVWstore(v) ++ case OpLOONG64MOVWstorezero: ++ return rewriteValueLOONG64_OpLOONG64MOVWstorezero(v) ++ case OpLOONG64NEGV: ++ return rewriteValueLOONG64_OpLOONG64NEGV(v) ++ case OpLOONG64NOR: ++ return rewriteValueLOONG64_OpLOONG64NOR(v) ++ case OpLOONG64NORconst: ++ return rewriteValueLOONG64_OpLOONG64NORconst(v) ++ case OpLOONG64OR: ++ return rewriteValueLOONG64_OpLOONG64OR(v) ++ case OpLOONG64ORconst: ++ return rewriteValueLOONG64_OpLOONG64ORconst(v) ++ case OpLOONG64SGT: ++ return rewriteValueLOONG64_OpLOONG64SGT(v) ++ case OpLOONG64SGTU: ++ return rewriteValueLOONG64_OpLOONG64SGTU(v) ++ case OpLOONG64SGTUconst: ++ return rewriteValueLOONG64_OpLOONG64SGTUconst(v) ++ case OpLOONG64SGTconst: ++ return rewriteValueLOONG64_OpLOONG64SGTconst(v) ++ case OpLOONG64SLLV: ++ return rewriteValueLOONG64_OpLOONG64SLLV(v) ++ case OpLOONG64SLLVconst: ++ return rewriteValueLOONG64_OpLOONG64SLLVconst(v) ++ case OpLOONG64SRAV: ++ return rewriteValueLOONG64_OpLOONG64SRAV(v) ++ case OpLOONG64SRAVconst: ++ return rewriteValueLOONG64_OpLOONG64SRAVconst(v) ++ case OpLOONG64SRLV: ++ return rewriteValueLOONG64_OpLOONG64SRLV(v) ++ case OpLOONG64SRLVconst: ++ return rewriteValueLOONG64_OpLOONG64SRLVconst(v) ++ case OpLOONG64SUBV: ++ return rewriteValueLOONG64_OpLOONG64SUBV(v) ++ case OpLOONG64SUBVconst: ++ return rewriteValueLOONG64_OpLOONG64SUBVconst(v) ++ case OpLOONG64XOR: ++ return rewriteValueLOONG64_OpLOONG64XOR(v) ++ case OpLOONG64XORconst: ++ return rewriteValueLOONG64_OpLOONG64XORconst(v) ++ case OpLeq16: ++ return rewriteValueLOONG64_OpLeq16(v) ++ case OpLeq16U: ++ return rewriteValueLOONG64_OpLeq16U(v) ++ case OpLeq32: ++ return rewriteValueLOONG64_OpLeq32(v) ++ case OpLeq32F: ++ return rewriteValueLOONG64_OpLeq32F(v) ++ case OpLeq32U: ++ return rewriteValueLOONG64_OpLeq32U(v) ++ case OpLeq64: ++ return rewriteValueLOONG64_OpLeq64(v) ++ case OpLeq64F: ++ return rewriteValueLOONG64_OpLeq64F(v) ++ case OpLeq64U: ++ return rewriteValueLOONG64_OpLeq64U(v) ++ case OpLeq8: ++ return rewriteValueLOONG64_OpLeq8(v) ++ case OpLeq8U: ++ return rewriteValueLOONG64_OpLeq8U(v) ++ case OpLess16: ++ return rewriteValueLOONG64_OpLess16(v) ++ case OpLess16U: ++ return rewriteValueLOONG64_OpLess16U(v) ++ case OpLess32: ++ return rewriteValueLOONG64_OpLess32(v) ++ case OpLess32F: ++ return rewriteValueLOONG64_OpLess32F(v) ++ case OpLess32U: ++ return rewriteValueLOONG64_OpLess32U(v) ++ case OpLess64: ++ return rewriteValueLOONG64_OpLess64(v) ++ case OpLess64F: ++ return rewriteValueLOONG64_OpLess64F(v) ++ case OpLess64U: ++ return rewriteValueLOONG64_OpLess64U(v) ++ case OpLess8: ++ return rewriteValueLOONG64_OpLess8(v) ++ case OpLess8U: ++ return rewriteValueLOONG64_OpLess8U(v) ++ case OpLoad: ++ return rewriteValueLOONG64_OpLoad(v) ++ case OpLocalAddr: ++ return rewriteValueLOONG64_OpLocalAddr(v) ++ case OpLsh16x16: ++ return rewriteValueLOONG64_OpLsh16x16(v) ++ case OpLsh16x32: ++ return rewriteValueLOONG64_OpLsh16x32(v) ++ case OpLsh16x64: ++ return rewriteValueLOONG64_OpLsh16x64(v) ++ case OpLsh16x8: ++ return rewriteValueLOONG64_OpLsh16x8(v) ++ case OpLsh32x16: ++ return rewriteValueLOONG64_OpLsh32x16(v) ++ case OpLsh32x32: ++ return rewriteValueLOONG64_OpLsh32x32(v) ++ case OpLsh32x64: ++ return rewriteValueLOONG64_OpLsh32x64(v) ++ case OpLsh32x8: ++ return rewriteValueLOONG64_OpLsh32x8(v) ++ case OpLsh64x16: ++ return rewriteValueLOONG64_OpLsh64x16(v) ++ case OpLsh64x32: ++ return rewriteValueLOONG64_OpLsh64x32(v) ++ case OpLsh64x64: ++ return rewriteValueLOONG64_OpLsh64x64(v) ++ case OpLsh64x8: ++ return rewriteValueLOONG64_OpLsh64x8(v) ++ case OpLsh8x16: ++ return rewriteValueLOONG64_OpLsh8x16(v) ++ case OpLsh8x32: ++ return rewriteValueLOONG64_OpLsh8x32(v) ++ case OpLsh8x64: ++ return rewriteValueLOONG64_OpLsh8x64(v) ++ case OpLsh8x8: ++ return rewriteValueLOONG64_OpLsh8x8(v) ++ case OpMod16: ++ return rewriteValueLOONG64_OpMod16(v) ++ case OpMod16u: ++ return rewriteValueLOONG64_OpMod16u(v) ++ case OpMod32: ++ return rewriteValueLOONG64_OpMod32(v) ++ case OpMod32u: ++ return rewriteValueLOONG64_OpMod32u(v) ++ case OpMod64: ++ return rewriteValueLOONG64_OpMod64(v) ++ case OpMod64u: ++ return rewriteValueLOONG64_OpMod64u(v) ++ case OpMod8: ++ return rewriteValueLOONG64_OpMod8(v) ++ case OpMod8u: ++ return rewriteValueLOONG64_OpMod8u(v) ++ case OpMove: ++ return rewriteValueLOONG64_OpMove(v) ++ case OpMul16: ++ return rewriteValueLOONG64_OpMul16(v) ++ case OpMul32: ++ return rewriteValueLOONG64_OpMul32(v) ++ case OpMul32F: ++ v.Op = OpLOONG64MULF ++ return true ++ case OpMul64: ++ return rewriteValueLOONG64_OpMul64(v) ++ case OpMul64F: ++ v.Op = OpLOONG64MULD ++ return true ++ case OpMul64uhilo: ++ v.Op = OpLOONG64MULVU ++ return true ++ case OpMul8: ++ return rewriteValueLOONG64_OpMul8(v) ++ case OpNeg16: ++ v.Op = OpLOONG64NEGV ++ return true ++ case OpNeg32: ++ v.Op = OpLOONG64NEGV ++ return true ++ case OpNeg32F: ++ v.Op = OpLOONG64NEGF ++ return true ++ case OpNeg64: ++ v.Op = OpLOONG64NEGV ++ return true ++ case OpNeg64F: ++ v.Op = OpLOONG64NEGD ++ return true ++ case OpNeg8: ++ v.Op = OpLOONG64NEGV ++ return true ++ case OpNeq16: ++ return rewriteValueLOONG64_OpNeq16(v) ++ case OpNeq32: ++ return rewriteValueLOONG64_OpNeq32(v) ++ case OpNeq32F: ++ return rewriteValueLOONG64_OpNeq32F(v) ++ case OpNeq64: ++ return rewriteValueLOONG64_OpNeq64(v) ++ case OpNeq64F: ++ return rewriteValueLOONG64_OpNeq64F(v) ++ case OpNeq8: ++ return rewriteValueLOONG64_OpNeq8(v) ++ case OpNeqB: ++ v.Op = OpLOONG64XOR ++ return true ++ case OpNeqPtr: ++ return rewriteValueLOONG64_OpNeqPtr(v) ++ case OpNilCheck: ++ v.Op = OpLOONG64LoweredNilCheck ++ return true ++ case OpNot: ++ return rewriteValueLOONG64_OpNot(v) ++ case OpOffPtr: ++ return rewriteValueLOONG64_OpOffPtr(v) ++ case OpOr16: ++ v.Op = OpLOONG64OR ++ return true ++ case OpOr32: ++ v.Op = OpLOONG64OR ++ return true ++ case OpOr64: ++ v.Op = OpLOONG64OR ++ return true ++ case OpOr8: ++ v.Op = OpLOONG64OR ++ return true ++ case OpOrB: ++ v.Op = OpLOONG64OR ++ return true ++ case OpPanicBounds: ++ return rewriteValueLOONG64_OpPanicBounds(v) ++ case OpRotateLeft16: ++ return rewriteValueLOONG64_OpRotateLeft16(v) ++ case OpRotateLeft32: ++ return rewriteValueLOONG64_OpRotateLeft32(v) ++ case OpRotateLeft64: ++ return rewriteValueLOONG64_OpRotateLeft64(v) ++ case OpRotateLeft8: ++ return rewriteValueLOONG64_OpRotateLeft8(v) ++ case OpRound32F: ++ v.Op = OpCopy ++ return true ++ case OpRound64F: ++ v.Op = OpCopy ++ return true ++ case OpRsh16Ux16: ++ return rewriteValueLOONG64_OpRsh16Ux16(v) ++ case OpRsh16Ux32: ++ return rewriteValueLOONG64_OpRsh16Ux32(v) ++ case OpRsh16Ux64: ++ return rewriteValueLOONG64_OpRsh16Ux64(v) ++ case OpRsh16Ux8: ++ return rewriteValueLOONG64_OpRsh16Ux8(v) ++ case OpRsh16x16: ++ return rewriteValueLOONG64_OpRsh16x16(v) ++ case OpRsh16x32: ++ return rewriteValueLOONG64_OpRsh16x32(v) ++ case OpRsh16x64: ++ return rewriteValueLOONG64_OpRsh16x64(v) ++ case OpRsh16x8: ++ return rewriteValueLOONG64_OpRsh16x8(v) ++ case OpRsh32Ux16: ++ return rewriteValueLOONG64_OpRsh32Ux16(v) ++ case OpRsh32Ux32: ++ return rewriteValueLOONG64_OpRsh32Ux32(v) ++ case OpRsh32Ux64: ++ return rewriteValueLOONG64_OpRsh32Ux64(v) ++ case OpRsh32Ux8: ++ return rewriteValueLOONG64_OpRsh32Ux8(v) ++ case OpRsh32x16: ++ return rewriteValueLOONG64_OpRsh32x16(v) ++ case OpRsh32x32: ++ return rewriteValueLOONG64_OpRsh32x32(v) ++ case OpRsh32x64: ++ return rewriteValueLOONG64_OpRsh32x64(v) ++ case OpRsh32x8: ++ return rewriteValueLOONG64_OpRsh32x8(v) ++ case OpRsh64Ux16: ++ return rewriteValueLOONG64_OpRsh64Ux16(v) ++ case OpRsh64Ux32: ++ return rewriteValueLOONG64_OpRsh64Ux32(v) ++ case OpRsh64Ux64: ++ return rewriteValueLOONG64_OpRsh64Ux64(v) ++ case OpRsh64Ux8: ++ return rewriteValueLOONG64_OpRsh64Ux8(v) ++ case OpRsh64x16: ++ return rewriteValueLOONG64_OpRsh64x16(v) ++ case OpRsh64x32: ++ return rewriteValueLOONG64_OpRsh64x32(v) ++ case OpRsh64x64: ++ return rewriteValueLOONG64_OpRsh64x64(v) ++ case OpRsh64x8: ++ return rewriteValueLOONG64_OpRsh64x8(v) ++ case OpRsh8Ux16: ++ return rewriteValueLOONG64_OpRsh8Ux16(v) ++ case OpRsh8Ux32: ++ return rewriteValueLOONG64_OpRsh8Ux32(v) ++ case OpRsh8Ux64: ++ return rewriteValueLOONG64_OpRsh8Ux64(v) ++ case OpRsh8Ux8: ++ return rewriteValueLOONG64_OpRsh8Ux8(v) ++ case OpRsh8x16: ++ return rewriteValueLOONG64_OpRsh8x16(v) ++ case OpRsh8x32: ++ return rewriteValueLOONG64_OpRsh8x32(v) ++ case OpRsh8x64: ++ return rewriteValueLOONG64_OpRsh8x64(v) ++ case OpRsh8x8: ++ return rewriteValueLOONG64_OpRsh8x8(v) ++ case OpSelect0: ++ return rewriteValueLOONG64_OpSelect0(v) ++ case OpSelect1: ++ return rewriteValueLOONG64_OpSelect1(v) ++ case OpSignExt16to32: ++ v.Op = OpLOONG64MOVHreg ++ return true ++ case OpSignExt16to64: ++ v.Op = OpLOONG64MOVHreg ++ return true ++ case OpSignExt32to64: ++ v.Op = OpLOONG64MOVWreg ++ return true ++ case OpSignExt8to16: ++ v.Op = OpLOONG64MOVBreg ++ return true ++ case OpSignExt8to32: ++ v.Op = OpLOONG64MOVBreg ++ return true ++ case OpSignExt8to64: ++ v.Op = OpLOONG64MOVBreg ++ return true ++ case OpSlicemask: ++ return rewriteValueLOONG64_OpSlicemask(v) ++ case OpSqrt: ++ v.Op = OpLOONG64SQRTD ++ return true ++ case OpSqrt32: ++ v.Op = OpLOONG64SQRTF ++ return true ++ case OpStaticCall: ++ v.Op = OpLOONG64CALLstatic ++ return true ++ case OpStore: ++ return rewriteValueLOONG64_OpStore(v) ++ case OpSub16: ++ v.Op = OpLOONG64SUBV ++ return true ++ case OpSub32: ++ v.Op = OpLOONG64SUBV ++ return true ++ case OpSub32F: ++ v.Op = OpLOONG64SUBF ++ return true ++ case OpSub64: ++ v.Op = OpLOONG64SUBV ++ return true ++ case OpSub64F: ++ v.Op = OpLOONG64SUBD ++ return true ++ case OpSub8: ++ v.Op = OpLOONG64SUBV ++ return true ++ case OpSubPtr: ++ v.Op = OpLOONG64SUBV ++ return true ++ case OpTrunc16to8: ++ v.Op = OpCopy ++ return true ++ case OpTrunc32to16: ++ v.Op = OpCopy ++ return true ++ case OpTrunc32to8: ++ v.Op = OpCopy ++ return true ++ case OpTrunc64to16: ++ v.Op = OpCopy ++ return true ++ case OpTrunc64to32: ++ v.Op = OpCopy ++ return true ++ case OpTrunc64to8: ++ v.Op = OpCopy ++ return true ++ case OpWB: ++ v.Op = OpLOONG64LoweredWB ++ return true ++ case OpXor16: ++ v.Op = OpLOONG64XOR ++ return true ++ case OpXor32: ++ v.Op = OpLOONG64XOR ++ return true ++ case OpXor64: ++ v.Op = OpLOONG64XOR ++ return true ++ case OpXor8: ++ v.Op = OpLOONG64XOR ++ return true ++ case OpZero: ++ return rewriteValueLOONG64_OpZero(v) ++ case OpZeroExt16to32: ++ v.Op = OpLOONG64MOVHUreg ++ return true ++ case OpZeroExt16to64: ++ v.Op = OpLOONG64MOVHUreg ++ return true ++ case OpZeroExt32to64: ++ v.Op = OpLOONG64MOVWUreg ++ return true ++ case OpZeroExt8to16: ++ v.Op = OpLOONG64MOVBUreg ++ return true ++ case OpZeroExt8to32: ++ v.Op = OpLOONG64MOVBUreg ++ return true ++ case OpZeroExt8to64: ++ v.Op = OpLOONG64MOVBUreg ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpAddr(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (Addr {sym} base) ++ // result: (MOVVaddr {sym} base) ++ for { ++ sym := auxToSym(v.Aux) ++ base := v_0 ++ v.reset(OpLOONG64MOVVaddr) ++ v.Aux = symToAux(sym) ++ v.AddArg(base) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpAvg64u(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ // match: (Avg64u x y) ++ // result: (ADDV (SRLVconst (SUBV x y) [1]) y) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64ADDV) ++ v0 := b.NewValue0(v.Pos, OpLOONG64SRLVconst, t) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SUBV, t) ++ v1.AddArg2(x, y) ++ v0.AddArg(v1) ++ v.AddArg2(v0, y) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpCom16(v *Value) bool { ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Com16 x) ++ // result: (NOR (MOVVconst [0]) x) ++ for { ++ x := v_0 ++ v.reset(OpLOONG64NOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v.AddArg2(v0, x) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpCom32(v *Value) bool { ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Com32 x) ++ // result: (NOR (MOVVconst [0]) x) ++ for { ++ x := v_0 ++ v.reset(OpLOONG64NOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v.AddArg2(v0, x) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpCom64(v *Value) bool { ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Com64 x) ++ // result: (NOR (MOVVconst [0]) x) ++ for { ++ x := v_0 ++ v.reset(OpLOONG64NOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v.AddArg2(v0, x) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpCom8(v *Value) bool { ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Com8 x) ++ // result: (NOR (MOVVconst [0]) x) ++ for { ++ x := v_0 ++ v.reset(OpLOONG64NOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v.AddArg2(v0, x) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpConst16(v *Value) bool { ++ // match: (Const16 [val]) ++ // result: (MOVVconst [int64(val)]) ++ for { ++ val := auxIntToInt16(v.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(val)) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpConst32(v *Value) bool { ++ // match: (Const32 [val]) ++ // result: (MOVVconst [int64(val)]) ++ for { ++ val := auxIntToInt32(v.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(val)) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpConst32F(v *Value) bool { ++ // match: (Const32F [val]) ++ // result: (MOVFconst [float64(val)]) ++ for { ++ val := auxIntToFloat32(v.AuxInt) ++ v.reset(OpLOONG64MOVFconst) ++ v.AuxInt = float64ToAuxInt(float64(val)) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpConst64(v *Value) bool { ++ // match: (Const64 [val]) ++ // result: (MOVVconst [int64(val)]) ++ for { ++ val := auxIntToInt64(v.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(val)) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpConst64F(v *Value) bool { ++ // match: (Const64F [val]) ++ // result: (MOVDconst [float64(val)]) ++ for { ++ val := auxIntToFloat64(v.AuxInt) ++ v.reset(OpLOONG64MOVDconst) ++ v.AuxInt = float64ToAuxInt(float64(val)) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpConst8(v *Value) bool { ++ // match: (Const8 [val]) ++ // result: (MOVVconst [int64(val)]) ++ for { ++ val := auxIntToInt8(v.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(val)) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpConstBool(v *Value) bool { ++ // match: (ConstBool [t]) ++ // result: (MOVVconst [int64(b2i(t))]) ++ for { ++ t := auxIntToBool(v.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(b2i(t))) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpConstNil(v *Value) bool { ++ // match: (ConstNil) ++ // result: (MOVVconst [0]) ++ for { ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpDiv16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Div16 x y) ++ // result: (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVV, types.NewTuple(typ.Int64, typ.Int64)) ++ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpDiv16u(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Div16u x y) ++ // result: (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpDiv32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Div32 x y) ++ // result: (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVV, types.NewTuple(typ.Int64, typ.Int64)) ++ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpDiv32u(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Div32u x y) ++ // result: (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpDiv64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Div64 x y) ++ // result: (Select1 (DIVV x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVV, types.NewTuple(typ.Int64, typ.Int64)) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpDiv64u(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Div64u x y) ++ // result: (Select1 (DIVVU x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpDiv8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Div8 x y) ++ // result: (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVV, types.NewTuple(typ.Int64, typ.Int64)) ++ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpDiv8u(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Div8u x y) ++ // result: (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpEq16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Eq16 x y) ++ // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) ++ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v2.AddArg(x) ++ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpEq32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Eq32 x y) ++ // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) ++ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v2.AddArg(x) ++ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpEq32F(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ // match: (Eq32F x y) ++ // result: (FPFlagTrue (CMPEQF x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64FPFlagTrue) ++ v0 := b.NewValue0(v.Pos, OpLOONG64CMPEQF, types.TypeFlags) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpEq64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Eq64 x y) ++ // result: (SGTU (MOVVconst [1]) (XOR x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) ++ v1.AddArg2(x, y) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpEq64F(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ // match: (Eq64F x y) ++ // result: (FPFlagTrue (CMPEQD x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64FPFlagTrue) ++ v0 := b.NewValue0(v.Pos, OpLOONG64CMPEQD, types.TypeFlags) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpEq8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Eq8 x y) ++ // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) ++ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v2.AddArg(x) ++ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpEqB(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (EqB x y) ++ // result: (XOR (MOVVconst [1]) (XOR x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64XOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.Bool) ++ v1.AddArg2(x, y) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpEqPtr(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (EqPtr x y) ++ // result: (SGTU (MOVVconst [1]) (XOR x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) ++ v1.AddArg2(x, y) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpHmul32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Hmul32 x y) ++ // result: (SRAVconst (Select1 (MULV (SignExt32to64 x) (SignExt32to64 y))) [32]) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAVconst) ++ v.AuxInt = int64ToAuxInt(32) ++ v0 := b.NewValue0(v.Pos, OpSelect1, typ.Int64) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MULV, types.NewTuple(typ.Int64, typ.Int64)) ++ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v2.AddArg(x) ++ v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpHmul32u(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Hmul32u x y) ++ // result: (SRLVconst (Select1 (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32]) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRLVconst) ++ v.AuxInt = int64ToAuxInt(32) ++ v0 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v2.AddArg(x) ++ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpHmul64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Hmul64 x y) ++ // result: (Select0 (MULV x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect0) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MULV, types.NewTuple(typ.Int64, typ.Int64)) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpHmul64u(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Hmul64u x y) ++ // result: (Select0 (MULVU x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect0) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpIsInBounds(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (IsInBounds idx len) ++ // result: (SGTU len idx) ++ for { ++ idx := v_0 ++ len := v_1 ++ v.reset(OpLOONG64SGTU) ++ v.AddArg2(len, idx) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpIsNonNil(v *Value) bool { ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (IsNonNil ptr) ++ // result: (SGTU ptr (MOVVconst [0])) ++ for { ++ ptr := v_0 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v.AddArg2(ptr, v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpIsSliceInBounds(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (IsSliceInBounds idx len) ++ // result: (XOR (MOVVconst [1]) (SGTU idx len)) ++ for { ++ idx := v_0 ++ len := v_1 ++ v.reset(OpLOONG64XOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v1.AddArg2(idx, len) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLOONG64ADDV(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (ADDV x (MOVVconst [c])) ++ // cond: is32Bit(c) ++ // result: (ADDVconst [c] x) ++ for { ++ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ continue ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ if !(is32Bit(c)) { ++ continue ++ } ++ v.reset(OpLOONG64ADDVconst) ++ v.AuxInt = int64ToAuxInt(c) ++ v.AddArg(x) ++ return true ++ } ++ break ++ } ++ // match: (ADDV x (NEGV y)) ++ // result: (SUBV x y) ++ for { ++ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { ++ x := v_0 ++ if v_1.Op != OpLOONG64NEGV { ++ continue ++ } ++ y := v_1.Args[0] ++ v.reset(OpLOONG64SUBV) ++ v.AddArg2(x, y) ++ return true ++ } ++ break ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64ADDVconst(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) ++ // cond: is32Bit(off1+int64(off2)) ++ // result: (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr) ++ for { ++ off1 := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ if !(is32Bit(off1 + int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVVaddr) ++ v.AuxInt = int32ToAuxInt(int32(off1) + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg(ptr) ++ return true ++ } ++ // match: (ADDVconst [0] x) ++ // result: x ++ for { ++ if auxIntToInt64(v.AuxInt) != 0 { ++ break ++ } ++ x := v_0 ++ v.copyOf(x) ++ return true ++ } ++ // match: (ADDVconst [c] (MOVVconst [d])) ++ // result: (MOVVconst [c+d]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(c + d) ++ return true ++ } ++ // match: (ADDVconst [c] (ADDVconst [d] x)) ++ // cond: is32Bit(c+d) ++ // result: (ADDVconst [c+d] x) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ x := v_0.Args[0] ++ if !(is32Bit(c + d)) { ++ break ++ } ++ v.reset(OpLOONG64ADDVconst) ++ v.AuxInt = int64ToAuxInt(c + d) ++ v.AddArg(x) ++ return true ++ } ++ // match: (ADDVconst [c] (SUBVconst [d] x)) ++ // cond: is32Bit(c-d) ++ // result: (ADDVconst [c-d] x) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64SUBVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ x := v_0.Args[0] ++ if !(is32Bit(c - d)) { ++ break ++ } ++ v.reset(OpLOONG64ADDVconst) ++ v.AuxInt = int64ToAuxInt(c - d) ++ v.AddArg(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64AND(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (AND x (MOVVconst [c])) ++ // cond: is32Bit(c) ++ // result: (ANDconst [c] x) ++ for { ++ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ continue ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ if !(is32Bit(c)) { ++ continue ++ } ++ v.reset(OpLOONG64ANDconst) ++ v.AuxInt = int64ToAuxInt(c) ++ v.AddArg(x) ++ return true ++ } ++ break ++ } ++ // match: (AND x x) ++ // result: x ++ for { ++ x := v_0 ++ if x != v_1 { ++ break ++ } ++ v.copyOf(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64ANDconst(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (ANDconst [0] _) ++ // result: (MOVVconst [0]) ++ for { ++ if auxIntToInt64(v.AuxInt) != 0 { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (ANDconst [-1] x) ++ // result: x ++ for { ++ if auxIntToInt64(v.AuxInt) != -1 { ++ break ++ } ++ x := v_0 ++ v.copyOf(x) ++ return true ++ } ++ // match: (ANDconst [c] (MOVVconst [d])) ++ // result: (MOVVconst [c&d]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(c & d) ++ return true ++ } ++ // match: (ANDconst [c] (ANDconst [d] x)) ++ // result: (ANDconst [c&d] x) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64ANDconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ x := v_0.Args[0] ++ v.reset(OpLOONG64ANDconst) ++ v.AuxInt = int64ToAuxInt(c & d) ++ v.AddArg(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64LoweredAtomicAdd32(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) ++ // cond: is32Bit(c) ++ // result: (LoweredAtomicAddconst32 [int32(c)] ptr mem) ++ for { ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ mem := v_2 ++ if !(is32Bit(c)) { ++ break ++ } ++ v.reset(OpLOONG64LoweredAtomicAddconst32) ++ v.AuxInt = int32ToAuxInt(int32(c)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64LoweredAtomicAdd64(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) ++ // cond: is32Bit(c) ++ // result: (LoweredAtomicAddconst64 [c] ptr mem) ++ for { ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ mem := v_2 ++ if !(is32Bit(c)) { ++ break ++ } ++ v.reset(OpLOONG64LoweredAtomicAddconst64) ++ v.AuxInt = int64ToAuxInt(c) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64LoweredAtomicStore32(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (LoweredAtomicStore32 ptr (MOVVconst [0]) mem) ++ // result: (LoweredAtomicStorezero32 ptr mem) ++ for { ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 { ++ break ++ } ++ mem := v_2 ++ v.reset(OpLOONG64LoweredAtomicStorezero32) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64LoweredAtomicStore64(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (LoweredAtomicStore64 ptr (MOVVconst [0]) mem) ++ // result: (LoweredAtomicStorezero64 ptr mem) ++ for { ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 { ++ break ++ } ++ mem := v_2 ++ v.reset(OpLOONG64LoweredAtomicStorezero64) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVBUload [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVBUload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVBUload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVBUreg(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (MOVBUreg x:(MOVBUload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBUload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVBUreg x:(MOVBUreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBUreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVBUreg (MOVVconst [c])) ++ // result: (MOVVconst [int64(uint8(c))]) ++ for { ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(uint8(c))) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVBload [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVBload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVBload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVBreg(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (MOVBreg x:(MOVBload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVBreg x:(MOVBreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVBreg (MOVVconst [c])) ++ // result: (MOVVconst [int64(int8(c))]) ++ for { ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(int8(c))) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVBstore(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVBstore [off1+int32(off2)] {sym} ptr val mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ val := v_1 ++ mem := v_2 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ val := v_1 ++ mem := v_2 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) ++ // result: (MOVBstore [off] {sym} ptr x mem) ++ for { ++ off := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVBreg { ++ break ++ } ++ x := v_1.Args[0] ++ mem := v_2 ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(off) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, x, mem) ++ return true ++ } ++ // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) ++ // result: (MOVBstore [off] {sym} ptr x mem) ++ for { ++ off := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVBUreg { ++ break ++ } ++ x := v_1.Args[0] ++ mem := v_2 ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(off) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, x, mem) ++ return true ++ } ++ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) ++ // result: (MOVBstore [off] {sym} ptr x mem) ++ for { ++ off := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVHreg { ++ break ++ } ++ x := v_1.Args[0] ++ mem := v_2 ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(off) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, x, mem) ++ return true ++ } ++ // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) ++ // result: (MOVBstore [off] {sym} ptr x mem) ++ for { ++ off := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVHUreg { ++ break ++ } ++ x := v_1.Args[0] ++ mem := v_2 ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(off) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, x, mem) ++ return true ++ } ++ // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem) ++ // result: (MOVBstore [off] {sym} ptr x mem) ++ for { ++ off := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVWreg { ++ break ++ } ++ x := v_1.Args[0] ++ mem := v_2 ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(off) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, x, mem) ++ return true ++ } ++ // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) ++ // result: (MOVBstore [off] {sym} ptr x mem) ++ for { ++ off := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVWUreg { ++ break ++ } ++ x := v_1.Args[0] ++ mem := v_2 ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(off) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, x, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVBstorezero(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVBstorezero) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVBstorezero) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVDload(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVDload [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVDload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVDload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVDstore(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVDstore [off1+int32(off2)] {sym} ptr val mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ val := v_1 ++ mem := v_2 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVDstore) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ val := v_1 ++ mem := v_2 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVDstore) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVFload(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVFload [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVFload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVFload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVFstore(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVFstore [off1+int32(off2)] {sym} ptr val mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ val := v_1 ++ mem := v_2 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVFstore) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ val := v_1 ++ mem := v_2 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVFstore) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVHUload(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVHUload [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVHUload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVHUload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVHUreg(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (MOVHUreg x:(MOVBUload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBUload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVHUreg x:(MOVHUload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVHUload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVHUreg x:(MOVBUreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBUreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVHUreg x:(MOVHUreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVHUreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVHUreg (MOVVconst [c])) ++ // result: (MOVVconst [int64(uint16(c))]) ++ for { ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(uint16(c))) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVHload [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVHload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVHload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVHreg(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (MOVHreg x:(MOVBload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVHreg x:(MOVBUload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBUload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVHreg x:(MOVHload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVHload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVHreg x:(MOVBreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVHreg x:(MOVBUreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBUreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVHreg x:(MOVHreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVHreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVHreg (MOVVconst [c])) ++ // result: (MOVVconst [int64(int16(c))]) ++ for { ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(int16(c))) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVHstore(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVHstore [off1+int32(off2)] {sym} ptr val mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ val := v_1 ++ mem := v_2 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstore) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ val := v_1 ++ mem := v_2 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstore) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) ++ // result: (MOVHstore [off] {sym} ptr x mem) ++ for { ++ off := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVHreg { ++ break ++ } ++ x := v_1.Args[0] ++ mem := v_2 ++ v.reset(OpLOONG64MOVHstore) ++ v.AuxInt = int32ToAuxInt(off) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, x, mem) ++ return true ++ } ++ // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) ++ // result: (MOVHstore [off] {sym} ptr x mem) ++ for { ++ off := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVHUreg { ++ break ++ } ++ x := v_1.Args[0] ++ mem := v_2 ++ v.reset(OpLOONG64MOVHstore) ++ v.AuxInt = int32ToAuxInt(off) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, x, mem) ++ return true ++ } ++ // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem) ++ // result: (MOVHstore [off] {sym} ptr x mem) ++ for { ++ off := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVWreg { ++ break ++ } ++ x := v_1.Args[0] ++ mem := v_2 ++ v.reset(OpLOONG64MOVHstore) ++ v.AuxInt = int32ToAuxInt(off) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, x, mem) ++ return true ++ } ++ // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) ++ // result: (MOVHstore [off] {sym} ptr x mem) ++ for { ++ off := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVWUreg { ++ break ++ } ++ x := v_1.Args[0] ++ mem := v_2 ++ v.reset(OpLOONG64MOVHstore) ++ v.AuxInt = int32ToAuxInt(off) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, x, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVHstorezero(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstorezero) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstorezero) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVVload(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVVload [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVVload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVVload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVVreg(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (MOVVreg x) ++ // cond: x.Uses == 1 ++ // result: (MOVVnop x) ++ for { ++ x := v_0 ++ if !(x.Uses == 1) { ++ break ++ } ++ v.reset(OpLOONG64MOVVnop) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVVreg (MOVVconst [c])) ++ // result: (MOVVconst [c]) ++ for { ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(c) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVVstore(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVVstore [off1+int32(off2)] {sym} ptr val mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ val := v_1 ++ mem := v_2 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVVstore) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ val := v_1 ++ mem := v_2 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVVstore) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVVstorezero(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVVstorezero [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVVstorezero) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVVstorezero) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVWUload(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVWUload [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVWUload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVWUload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVWUreg(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (MOVWUreg x:(MOVBUload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBUload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWUreg x:(MOVHUload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVHUload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWUreg x:(MOVWUload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVWUload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWUreg x:(MOVBUreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBUreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWUreg x:(MOVHUreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVHUreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWUreg x:(MOVWUreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVWUreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWUreg (MOVVconst [c])) ++ // result: (MOVVconst [int64(uint32(c))]) ++ for { ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(uint32(c))) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVWload [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVWload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVWload) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVWreg(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (MOVWreg x:(MOVBload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWreg x:(MOVBUload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBUload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWreg x:(MOVHload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVHload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWreg x:(MOVHUload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVHUload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWreg x:(MOVWload _ _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVWload { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWreg x:(MOVBreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWreg x:(MOVBUreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVBUreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWreg x:(MOVHreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVHreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWreg x:(MOVWreg _)) ++ // result: (MOVVreg x) ++ for { ++ x := v_0 ++ if x.Op != OpLOONG64MOVWreg { ++ break ++ } ++ v.reset(OpLOONG64MOVVreg) ++ v.AddArg(x) ++ return true ++ } ++ // match: (MOVWreg (MOVVconst [c])) ++ // result: (MOVVconst [int64(int32(c))]) ++ for { ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(int32(c))) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVWstore(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVWstore [off1+int32(off2)] {sym} ptr val mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ val := v_1 ++ mem := v_2 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVWstore) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ val := v_1 ++ mem := v_2 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVWstore) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) ++ // result: (MOVWstore [off] {sym} ptr x mem) ++ for { ++ off := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVWreg { ++ break ++ } ++ x := v_1.Args[0] ++ mem := v_2 ++ v.reset(OpLOONG64MOVWstore) ++ v.AuxInt = int32ToAuxInt(off) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, x, mem) ++ return true ++ } ++ // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) ++ // result: (MOVWstore [off] {sym} ptr x mem) ++ for { ++ off := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ ptr := v_0 ++ if v_1.Op != OpLOONG64MOVWUreg { ++ break ++ } ++ x := v_1.Args[0] ++ mem := v_2 ++ v.reset(OpLOONG64MOVWstore) ++ v.AuxInt = int32ToAuxInt(off) ++ v.Aux = symToAux(sym) ++ v.AddArg3(ptr, x, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64MOVWstorezero(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) ++ // cond: is32Bit(int64(off1)+off2) ++ // result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ off2 := auxIntToInt64(v_0.AuxInt) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(is32Bit(int64(off1) + off2)) { ++ break ++ } ++ v.reset(OpLOONG64MOVWstorezero) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(sym) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) ++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) ++ // result: (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) ++ for { ++ off1 := auxIntToInt32(v.AuxInt) ++ sym1 := auxToSym(v.Aux) ++ if v_0.Op != OpLOONG64MOVVaddr { ++ break ++ } ++ off2 := auxIntToInt32(v_0.AuxInt) ++ sym2 := auxToSym(v_0.Aux) ++ ptr := v_0.Args[0] ++ mem := v_1 ++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { ++ break ++ } ++ v.reset(OpLOONG64MOVWstorezero) ++ v.AuxInt = int32ToAuxInt(off1 + int32(off2)) ++ v.Aux = symToAux(mergeSym(sym1, sym2)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64NEGV(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (NEGV (MOVVconst [c])) ++ // result: (MOVVconst [-c]) ++ for { ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(-c) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64NOR(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (NOR x (MOVVconst [c])) ++ // cond: is32Bit(c) ++ // result: (NORconst [c] x) ++ for { ++ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ continue ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ if !(is32Bit(c)) { ++ continue ++ } ++ v.reset(OpLOONG64NORconst) ++ v.AuxInt = int64ToAuxInt(c) ++ v.AddArg(x) ++ return true ++ } ++ break ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64NORconst(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (NORconst [c] (MOVVconst [d])) ++ // result: (MOVVconst [^(c|d)]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(^(c | d)) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64OR(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (OR x (MOVVconst [c])) ++ // cond: is32Bit(c) ++ // result: (ORconst [c] x) ++ for { ++ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ continue ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ if !(is32Bit(c)) { ++ continue ++ } ++ v.reset(OpLOONG64ORconst) ++ v.AuxInt = int64ToAuxInt(c) ++ v.AddArg(x) ++ return true ++ } ++ break ++ } ++ // match: (OR x x) ++ // result: x ++ for { ++ x := v_0 ++ if x != v_1 { ++ break ++ } ++ v.copyOf(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64ORconst(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (ORconst [0] x) ++ // result: x ++ for { ++ if auxIntToInt64(v.AuxInt) != 0 { ++ break ++ } ++ x := v_0 ++ v.copyOf(x) ++ return true ++ } ++ // match: (ORconst [-1] _) ++ // result: (MOVVconst [-1]) ++ for { ++ if auxIntToInt64(v.AuxInt) != -1 { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(-1) ++ return true ++ } ++ // match: (ORconst [c] (MOVVconst [d])) ++ // result: (MOVVconst [c|d]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(c | d) ++ return true ++ } ++ // match: (ORconst [c] (ORconst [d] x)) ++ // cond: is32Bit(c|d) ++ // result: (ORconst [c|d] x) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64ORconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ x := v_0.Args[0] ++ if !(is32Bit(c | d)) { ++ break ++ } ++ v.reset(OpLOONG64ORconst) ++ v.AuxInt = int64ToAuxInt(c | d) ++ v.AddArg(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64SGT(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (SGT (MOVVconst [c]) x) ++ // cond: is32Bit(c) ++ // result: (SGTconst [c] x) ++ for { ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0.AuxInt) ++ x := v_1 ++ if !(is32Bit(c)) { ++ break ++ } ++ v.reset(OpLOONG64SGTconst) ++ v.AuxInt = int64ToAuxInt(c) ++ v.AddArg(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64SGTU(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (SGTU (MOVVconst [c]) x) ++ // cond: is32Bit(c) ++ // result: (SGTUconst [c] x) ++ for { ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0.AuxInt) ++ x := v_1 ++ if !(is32Bit(c)) { ++ break ++ } ++ v.reset(OpLOONG64SGTUconst) ++ v.AuxInt = int64ToAuxInt(c) ++ v.AddArg(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64SGTUconst(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (SGTUconst [c] (MOVVconst [d])) ++ // cond: uint64(c)>uint64(d) ++ // result: (MOVVconst [1]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ if !(uint64(c) > uint64(d)) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(1) ++ return true ++ } ++ // match: (SGTUconst [c] (MOVVconst [d])) ++ // cond: uint64(c)<=uint64(d) ++ // result: (MOVVconst [0]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ if !(uint64(c) <= uint64(d)) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (SGTUconst [c] (MOVBUreg _)) ++ // cond: 0xff < uint64(c) ++ // result: (MOVVconst [1]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVBUreg || !(0xff < uint64(c)) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(1) ++ return true ++ } ++ // match: (SGTUconst [c] (MOVHUreg _)) ++ // cond: 0xffff < uint64(c) ++ // result: (MOVVconst [1]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVHUreg || !(0xffff < uint64(c)) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(1) ++ return true ++ } ++ // match: (SGTUconst [c] (ANDconst [m] _)) ++ // cond: uint64(m) < uint64(c) ++ // result: (MOVVconst [1]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64ANDconst { ++ break ++ } ++ m := auxIntToInt64(v_0.AuxInt) ++ if !(uint64(m) < uint64(c)) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(1) ++ return true ++ } ++ // match: (SGTUconst [c] (SRLVconst _ [d])) ++ // cond: 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) ++ // result: (MOVVconst [1]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64SRLVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ if !(0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(1) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64SGTconst(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (SGTconst [c] (MOVVconst [d])) ++ // cond: c>d ++ // result: (MOVVconst [1]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ if !(c > d) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(1) ++ return true ++ } ++ // match: (SGTconst [c] (MOVVconst [d])) ++ // cond: c<=d ++ // result: (MOVVconst [0]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ if !(c <= d) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (SGTconst [c] (MOVBreg _)) ++ // cond: 0x7f < c ++ // result: (MOVVconst [1]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVBreg || !(0x7f < c) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(1) ++ return true ++ } ++ // match: (SGTconst [c] (MOVBreg _)) ++ // cond: c <= -0x80 ++ // result: (MOVVconst [0]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVBreg || !(c <= -0x80) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (SGTconst [c] (MOVBUreg _)) ++ // cond: 0xff < c ++ // result: (MOVVconst [1]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVBUreg || !(0xff < c) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(1) ++ return true ++ } ++ // match: (SGTconst [c] (MOVBUreg _)) ++ // cond: c < 0 ++ // result: (MOVVconst [0]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVBUreg || !(c < 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (SGTconst [c] (MOVHreg _)) ++ // cond: 0x7fff < c ++ // result: (MOVVconst [1]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVHreg || !(0x7fff < c) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(1) ++ return true ++ } ++ // match: (SGTconst [c] (MOVHreg _)) ++ // cond: c <= -0x8000 ++ // result: (MOVVconst [0]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVHreg || !(c <= -0x8000) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (SGTconst [c] (MOVHUreg _)) ++ // cond: 0xffff < c ++ // result: (MOVVconst [1]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVHUreg || !(0xffff < c) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(1) ++ return true ++ } ++ // match: (SGTconst [c] (MOVHUreg _)) ++ // cond: c < 0 ++ // result: (MOVVconst [0]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVHUreg || !(c < 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (SGTconst [c] (MOVWUreg _)) ++ // cond: c < 0 ++ // result: (MOVVconst [0]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVWUreg || !(c < 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (SGTconst [c] (ANDconst [m] _)) ++ // cond: 0 <= m && m < c ++ // result: (MOVVconst [1]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64ANDconst { ++ break ++ } ++ m := auxIntToInt64(v_0.AuxInt) ++ if !(0 <= m && m < c) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(1) ++ return true ++ } ++ // match: (SGTconst [c] (SRLVconst _ [d])) ++ // cond: 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) ++ // result: (MOVVconst [1]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64SRLVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ if !(0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(1) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64SLLV(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (SLLV _ (MOVVconst [c])) ++ // cond: uint64(c)>=64 ++ // result: (MOVVconst [0]) ++ for { ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ if !(uint64(c) >= 64) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (SLLV x (MOVVconst [c])) ++ // result: (SLLVconst x [c]) ++ for { ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ v.reset(OpLOONG64SLLVconst) ++ v.AuxInt = int64ToAuxInt(c) ++ v.AddArg(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64SLLVconst(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (SLLVconst [c] (MOVVconst [d])) ++ // result: (MOVVconst [d<=64 ++ // result: (SRAVconst x [63]) ++ for { ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ if !(uint64(c) >= 64) { ++ break ++ } ++ v.reset(OpLOONG64SRAVconst) ++ v.AuxInt = int64ToAuxInt(63) ++ v.AddArg(x) ++ return true ++ } ++ // match: (SRAV x (MOVVconst [c])) ++ // result: (SRAVconst x [c]) ++ for { ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ v.reset(OpLOONG64SRAVconst) ++ v.AuxInt = int64ToAuxInt(c) ++ v.AddArg(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64SRAVconst(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (SRAVconst [c] (MOVVconst [d])) ++ // result: (MOVVconst [d>>uint64(c)]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(d >> uint64(c)) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64SRLV(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (SRLV _ (MOVVconst [c])) ++ // cond: uint64(c)>=64 ++ // result: (MOVVconst [0]) ++ for { ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ if !(uint64(c) >= 64) { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (SRLV x (MOVVconst [c])) ++ // result: (SRLVconst x [c]) ++ for { ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ v.reset(OpLOONG64SRLVconst) ++ v.AuxInt = int64ToAuxInt(c) ++ v.AddArg(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64SRLVconst(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (SRLVconst [c] (MOVVconst [d])) ++ // result: (MOVVconst [int64(uint64(d)>>uint64(c))]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(uint64(d) >> uint64(c))) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64SUBV(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (SUBV x (MOVVconst [c])) ++ // cond: is32Bit(c) ++ // result: (SUBVconst [c] x) ++ for { ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ if !(is32Bit(c)) { ++ break ++ } ++ v.reset(OpLOONG64SUBVconst) ++ v.AuxInt = int64ToAuxInt(c) ++ v.AddArg(x) ++ return true ++ } ++ // match: (SUBV x x) ++ // result: (MOVVconst [0]) ++ for { ++ x := v_0 ++ if x != v_1 { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (SUBV (MOVVconst [0]) x) ++ // result: (NEGV x) ++ for { ++ if v_0.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0.AuxInt) != 0 { ++ break ++ } ++ x := v_1 ++ v.reset(OpLOONG64NEGV) ++ v.AddArg(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64SUBVconst(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (SUBVconst [0] x) ++ // result: x ++ for { ++ if auxIntToInt64(v.AuxInt) != 0 { ++ break ++ } ++ x := v_0 ++ v.copyOf(x) ++ return true ++ } ++ // match: (SUBVconst [c] (MOVVconst [d])) ++ // result: (MOVVconst [d-c]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(d - c) ++ return true ++ } ++ // match: (SUBVconst [c] (SUBVconst [d] x)) ++ // cond: is32Bit(-c-d) ++ // result: (ADDVconst [-c-d] x) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64SUBVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ x := v_0.Args[0] ++ if !(is32Bit(-c - d)) { ++ break ++ } ++ v.reset(OpLOONG64ADDVconst) ++ v.AuxInt = int64ToAuxInt(-c - d) ++ v.AddArg(x) ++ return true ++ } ++ // match: (SUBVconst [c] (ADDVconst [d] x)) ++ // cond: is32Bit(-c+d) ++ // result: (ADDVconst [-c+d] x) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64ADDVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ x := v_0.Args[0] ++ if !(is32Bit(-c + d)) { ++ break ++ } ++ v.reset(OpLOONG64ADDVconst) ++ v.AuxInt = int64ToAuxInt(-c + d) ++ v.AddArg(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64XOR(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (XOR x (MOVVconst [c])) ++ // cond: is32Bit(c) ++ // result: (XORconst [c] x) ++ for { ++ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ continue ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ if !(is32Bit(c)) { ++ continue ++ } ++ v.reset(OpLOONG64XORconst) ++ v.AuxInt = int64ToAuxInt(c) ++ v.AddArg(x) ++ return true ++ } ++ break ++ } ++ // match: (XOR x x) ++ // result: (MOVVconst [0]) ++ for { ++ x := v_0 ++ if x != v_1 { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLOONG64XORconst(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (XORconst [0] x) ++ // result: x ++ for { ++ if auxIntToInt64(v.AuxInt) != 0 { ++ break ++ } ++ x := v_0 ++ v.copyOf(x) ++ return true ++ } ++ // match: (XORconst [-1] x) ++ // result: (NORconst [0] x) ++ for { ++ if auxIntToInt64(v.AuxInt) != -1 { ++ break ++ } ++ x := v_0 ++ v.reset(OpLOONG64NORconst) ++ v.AuxInt = int64ToAuxInt(0) ++ v.AddArg(x) ++ return true ++ } ++ // match: (XORconst [c] (MOVVconst [d])) ++ // result: (MOVVconst [c^d]) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(c ^ d) ++ return true ++ } ++ // match: (XORconst [c] (XORconst [d] x)) ++ // cond: is32Bit(c^d) ++ // result: (XORconst [c^d] x) ++ for { ++ c := auxIntToInt64(v.AuxInt) ++ if v_0.Op != OpLOONG64XORconst { ++ break ++ } ++ d := auxIntToInt64(v_0.AuxInt) ++ x := v_0.Args[0] ++ if !(is32Bit(c ^ d)) { ++ break ++ } ++ v.reset(OpLOONG64XORconst) ++ v.AuxInt = int64ToAuxInt(c ^ d) ++ v.AddArg(x) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLeq16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Leq16 x y) ++ // result: (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64XOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGT, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) ++ v2.AddArg(x) ++ v3 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLeq16U(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Leq16U x y) ++ // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64XOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v2.AddArg(x) ++ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLeq32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Leq32 x y) ++ // result: (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64XOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGT, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v2.AddArg(x) ++ v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLeq32F(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ // match: (Leq32F x y) ++ // result: (FPFlagTrue (CMPGEF y x)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64FPFlagTrue) ++ v0 := b.NewValue0(v.Pos, OpLOONG64CMPGEF, types.TypeFlags) ++ v0.AddArg2(y, x) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLeq32U(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Leq32U x y) ++ // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64XOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v2.AddArg(x) ++ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLeq64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Leq64 x y) ++ // result: (XOR (MOVVconst [1]) (SGT x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64XOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGT, typ.Bool) ++ v1.AddArg2(x, y) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLeq64F(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ // match: (Leq64F x y) ++ // result: (FPFlagTrue (CMPGED y x)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64FPFlagTrue) ++ v0 := b.NewValue0(v.Pos, OpLOONG64CMPGED, types.TypeFlags) ++ v0.AddArg2(y, x) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLeq64U(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Leq64U x y) ++ // result: (XOR (MOVVconst [1]) (SGTU x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64XOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v1.AddArg2(x, y) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLeq8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Leq8 x y) ++ // result: (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64XOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGT, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) ++ v2.AddArg(x) ++ v3 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLeq8U(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Leq8U x y) ++ // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64XOR) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(1) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v2.AddArg(x) ++ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLess16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Less16 x y) ++ // result: (SGT (SignExt16to64 y) (SignExt16to64 x)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGT) ++ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) ++ v0.AddArg(y) ++ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) ++ v1.AddArg(x) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLess16U(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Less16U x y) ++ // result: (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v0.AddArg(y) ++ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v1.AddArg(x) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLess32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Less32 x y) ++ // result: (SGT (SignExt32to64 y) (SignExt32to64 x)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGT) ++ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v0.AddArg(y) ++ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v1.AddArg(x) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLess32F(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ // match: (Less32F x y) ++ // result: (FPFlagTrue (CMPGTF y x)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64FPFlagTrue) ++ v0 := b.NewValue0(v.Pos, OpLOONG64CMPGTF, types.TypeFlags) ++ v0.AddArg2(y, x) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLess32U(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Less32U x y) ++ // result: (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v0.AddArg(y) ++ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v1.AddArg(x) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLess64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (Less64 x y) ++ // result: (SGT y x) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGT) ++ v.AddArg2(y, x) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLess64F(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ // match: (Less64F x y) ++ // result: (FPFlagTrue (CMPGTD y x)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64FPFlagTrue) ++ v0 := b.NewValue0(v.Pos, OpLOONG64CMPGTD, types.TypeFlags) ++ v0.AddArg2(y, x) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLess64U(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (Less64U x y) ++ // result: (SGTU y x) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v.AddArg2(y, x) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLess8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Less8 x y) ++ // result: (SGT (SignExt8to64 y) (SignExt8to64 x)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGT) ++ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) ++ v0.AddArg(y) ++ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) ++ v1.AddArg(x) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLess8U(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Less8U x y) ++ // result: (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v0.AddArg(y) ++ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v1.AddArg(x) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLoad(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (Load ptr mem) ++ // cond: t.IsBoolean() ++ // result: (MOVBUload ptr mem) ++ for { ++ t := v.Type ++ ptr := v_0 ++ mem := v_1 ++ if !(t.IsBoolean()) { ++ break ++ } ++ v.reset(OpLOONG64MOVBUload) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (Load ptr mem) ++ // cond: (is8BitInt(t) && isSigned(t)) ++ // result: (MOVBload ptr mem) ++ for { ++ t := v.Type ++ ptr := v_0 ++ mem := v_1 ++ if !(is8BitInt(t) && isSigned(t)) { ++ break ++ } ++ v.reset(OpLOONG64MOVBload) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (Load ptr mem) ++ // cond: (is8BitInt(t) && !isSigned(t)) ++ // result: (MOVBUload ptr mem) ++ for { ++ t := v.Type ++ ptr := v_0 ++ mem := v_1 ++ if !(is8BitInt(t) && !isSigned(t)) { ++ break ++ } ++ v.reset(OpLOONG64MOVBUload) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (Load ptr mem) ++ // cond: (is16BitInt(t) && isSigned(t)) ++ // result: (MOVHload ptr mem) ++ for { ++ t := v.Type ++ ptr := v_0 ++ mem := v_1 ++ if !(is16BitInt(t) && isSigned(t)) { ++ break ++ } ++ v.reset(OpLOONG64MOVHload) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (Load ptr mem) ++ // cond: (is16BitInt(t) && !isSigned(t)) ++ // result: (MOVHUload ptr mem) ++ for { ++ t := v.Type ++ ptr := v_0 ++ mem := v_1 ++ if !(is16BitInt(t) && !isSigned(t)) { ++ break ++ } ++ v.reset(OpLOONG64MOVHUload) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (Load ptr mem) ++ // cond: (is32BitInt(t) && isSigned(t)) ++ // result: (MOVWload ptr mem) ++ for { ++ t := v.Type ++ ptr := v_0 ++ mem := v_1 ++ if !(is32BitInt(t) && isSigned(t)) { ++ break ++ } ++ v.reset(OpLOONG64MOVWload) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (Load ptr mem) ++ // cond: (is32BitInt(t) && !isSigned(t)) ++ // result: (MOVWUload ptr mem) ++ for { ++ t := v.Type ++ ptr := v_0 ++ mem := v_1 ++ if !(is32BitInt(t) && !isSigned(t)) { ++ break ++ } ++ v.reset(OpLOONG64MOVWUload) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (Load ptr mem) ++ // cond: (is64BitInt(t) || isPtr(t)) ++ // result: (MOVVload ptr mem) ++ for { ++ t := v.Type ++ ptr := v_0 ++ mem := v_1 ++ if !(is64BitInt(t) || isPtr(t)) { ++ break ++ } ++ v.reset(OpLOONG64MOVVload) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (Load ptr mem) ++ // cond: is32BitFloat(t) ++ // result: (MOVFload ptr mem) ++ for { ++ t := v.Type ++ ptr := v_0 ++ mem := v_1 ++ if !(is32BitFloat(t)) { ++ break ++ } ++ v.reset(OpLOONG64MOVFload) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (Load ptr mem) ++ // cond: is64BitFloat(t) ++ // result: (MOVDload ptr mem) ++ for { ++ t := v.Type ++ ptr := v_0 ++ mem := v_1 ++ if !(is64BitFloat(t)) { ++ break ++ } ++ v.reset(OpLOONG64MOVDload) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpLocalAddr(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (LocalAddr {sym} base _) ++ // result: (MOVVaddr {sym} base) ++ for { ++ sym := auxToSym(v.Aux) ++ base := v_0 ++ v.reset(OpLOONG64MOVVaddr) ++ v.Aux = symToAux(sym) ++ v.AddArg(base) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh16x16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh16x16 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh16x32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh16x32 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh16x64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh16x64 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v1.AddArg2(v2, y) ++ v0.AddArg(v1) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v3.AddArg2(x, y) ++ v.AddArg2(v0, v3) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh16x8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh16x8 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh32x16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh32x16 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh32x32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh32x32 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh32x64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh32x64 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v1.AddArg2(v2, y) ++ v0.AddArg(v1) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v3.AddArg2(x, y) ++ v.AddArg2(v0, v3) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh32x8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh32x8 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh64x16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh64x16 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh64x32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh64x32 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh64x64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh64x64 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v1.AddArg2(v2, y) ++ v0.AddArg(v1) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v3.AddArg2(x, y) ++ v.AddArg2(v0, v3) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh64x8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh64x8 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh8x16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh8x16 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh8x32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh8x32 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh8x64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh8x64 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v1.AddArg2(v2, y) ++ v0.AddArg(v1) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v3.AddArg2(x, y) ++ v.AddArg2(v0, v3) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpLsh8x8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Lsh8x8 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpMod16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Mod16 x y) ++ // result: (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect0) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVV, types.NewTuple(typ.Int64, typ.Int64)) ++ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpMod16u(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Mod16u x y) ++ // result: (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect0) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpMod32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Mod32 x y) ++ // result: (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect0) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVV, types.NewTuple(typ.Int64, typ.Int64)) ++ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpMod32u(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Mod32u x y) ++ // result: (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect0) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpMod64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Mod64 x y) ++ // result: (Select0 (DIVV x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect0) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVV, types.NewTuple(typ.Int64, typ.Int64)) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpMod64u(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Mod64u x y) ++ // result: (Select0 (DIVVU x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect0) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpMod8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Mod8 x y) ++ // result: (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect0) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVV, types.NewTuple(typ.Int64, typ.Int64)) ++ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpMod8u(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Mod8u x y) ++ // result: (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect0) ++ v0 := b.NewValue0(v.Pos, OpLOONG64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpMove(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ config := b.Func.Config ++ typ := &b.Func.Config.Types ++ // match: (Move [0] _ _ mem) ++ // result: mem ++ for { ++ if auxIntToInt64(v.AuxInt) != 0 { ++ break ++ } ++ mem := v_2 ++ v.copyOf(mem) ++ return true ++ } ++ // match: (Move [1] dst src mem) ++ // result: (MOVBstore dst (MOVBload src mem) mem) ++ for { ++ if auxIntToInt64(v.AuxInt) != 1 { ++ break ++ } ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ v.reset(OpLOONG64MOVBstore) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) ++ v0.AddArg2(src, mem) ++ v.AddArg3(dst, v0, mem) ++ return true ++ } ++ // match: (Move [2] {t} dst src mem) ++ // cond: t.Alignment()%2 == 0 ++ // result: (MOVHstore dst (MOVHload src mem) mem) ++ for { ++ if auxIntToInt64(v.AuxInt) != 2 { ++ break ++ } ++ t := auxToType(v.Aux) ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ if !(t.Alignment()%2 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstore) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) ++ v0.AddArg2(src, mem) ++ v.AddArg3(dst, v0, mem) ++ return true ++ } ++ // match: (Move [2] dst src mem) ++ // result: (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)) ++ for { ++ if auxIntToInt64(v.AuxInt) != 2 { ++ break ++ } ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) ++ v0.AuxInt = int32ToAuxInt(1) ++ v0.AddArg2(src, mem) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) ++ v2.AddArg2(src, mem) ++ v1.AddArg3(dst, v2, mem) ++ v.AddArg3(dst, v0, v1) ++ return true ++ } ++ // match: (Move [4] {t} dst src mem) ++ // cond: t.Alignment()%4 == 0 ++ // result: (MOVWstore dst (MOVWload src mem) mem) ++ for { ++ if auxIntToInt64(v.AuxInt) != 4 { ++ break ++ } ++ t := auxToType(v.Aux) ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ if !(t.Alignment()%4 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVWstore) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32) ++ v0.AddArg2(src, mem) ++ v.AddArg3(dst, v0, mem) ++ return true ++ } ++ // match: (Move [4] {t} dst src mem) ++ // cond: t.Alignment()%2 == 0 ++ // result: (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)) ++ for { ++ if auxIntToInt64(v.AuxInt) != 4 { ++ break ++ } ++ t := auxToType(v.Aux) ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ if !(t.Alignment()%2 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstore) ++ v.AuxInt = int32ToAuxInt(2) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) ++ v0.AuxInt = int32ToAuxInt(2) ++ v0.AddArg2(src, mem) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) ++ v2.AddArg2(src, mem) ++ v1.AddArg3(dst, v2, mem) ++ v.AddArg3(dst, v0, v1) ++ return true ++ } ++ // match: (Move [4] dst src mem) ++ // result: (MOVBstore [3] dst (MOVBload [3] src mem) (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)))) ++ for { ++ if auxIntToInt64(v.AuxInt) != 4 { ++ break ++ } ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(3) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) ++ v0.AuxInt = int32ToAuxInt(3) ++ v0.AddArg2(src, mem) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(2) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) ++ v2.AuxInt = int32ToAuxInt(2) ++ v2.AddArg2(src, mem) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) ++ v3.AuxInt = int32ToAuxInt(1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) ++ v4.AuxInt = int32ToAuxInt(1) ++ v4.AddArg2(src, mem) ++ v5 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) ++ v6 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) ++ v6.AddArg2(src, mem) ++ v5.AddArg3(dst, v6, mem) ++ v3.AddArg3(dst, v4, v5) ++ v1.AddArg3(dst, v2, v3) ++ v.AddArg3(dst, v0, v1) ++ return true ++ } ++ // match: (Move [8] {t} dst src mem) ++ // cond: t.Alignment()%8 == 0 ++ // result: (MOVVstore dst (MOVVload src mem) mem) ++ for { ++ if auxIntToInt64(v.AuxInt) != 8 { ++ break ++ } ++ t := auxToType(v.Aux) ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ if !(t.Alignment()%8 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVVstore) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64) ++ v0.AddArg2(src, mem) ++ v.AddArg3(dst, v0, mem) ++ return true ++ } ++ // match: (Move [8] {t} dst src mem) ++ // cond: t.Alignment()%4 == 0 ++ // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)) ++ for { ++ if auxIntToInt64(v.AuxInt) != 8 { ++ break ++ } ++ t := auxToType(v.Aux) ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ if !(t.Alignment()%4 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVWstore) ++ v.AuxInt = int32ToAuxInt(4) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32) ++ v0.AuxInt = int32ToAuxInt(4) ++ v0.AddArg2(src, mem) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32) ++ v2.AddArg2(src, mem) ++ v1.AddArg3(dst, v2, mem) ++ v.AddArg3(dst, v0, v1) ++ return true ++ } ++ // match: (Move [8] {t} dst src mem) ++ // cond: t.Alignment()%2 == 0 ++ // result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))) ++ for { ++ if auxIntToInt64(v.AuxInt) != 8 { ++ break ++ } ++ t := auxToType(v.Aux) ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ if !(t.Alignment()%2 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstore) ++ v.AuxInt = int32ToAuxInt(6) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) ++ v0.AuxInt = int32ToAuxInt(6) ++ v0.AddArg2(src, mem) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(4) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) ++ v2.AuxInt = int32ToAuxInt(4) ++ v2.AddArg2(src, mem) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) ++ v3.AuxInt = int32ToAuxInt(2) ++ v4 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) ++ v4.AuxInt = int32ToAuxInt(2) ++ v4.AddArg2(src, mem) ++ v5 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) ++ v6 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) ++ v6.AddArg2(src, mem) ++ v5.AddArg3(dst, v6, mem) ++ v3.AddArg3(dst, v4, v5) ++ v1.AddArg3(dst, v2, v3) ++ v.AddArg3(dst, v0, v1) ++ return true ++ } ++ // match: (Move [3] dst src mem) ++ // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))) ++ for { ++ if auxIntToInt64(v.AuxInt) != 3 { ++ break ++ } ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(2) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) ++ v0.AuxInt = int32ToAuxInt(2) ++ v0.AddArg2(src, mem) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(1) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) ++ v2.AuxInt = int32ToAuxInt(1) ++ v2.AddArg2(src, mem) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) ++ v4 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) ++ v4.AddArg2(src, mem) ++ v3.AddArg3(dst, v4, mem) ++ v1.AddArg3(dst, v2, v3) ++ v.AddArg3(dst, v0, v1) ++ return true ++ } ++ // match: (Move [6] {t} dst src mem) ++ // cond: t.Alignment()%2 == 0 ++ // result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))) ++ for { ++ if auxIntToInt64(v.AuxInt) != 6 { ++ break ++ } ++ t := auxToType(v.Aux) ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ if !(t.Alignment()%2 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstore) ++ v.AuxInt = int32ToAuxInt(4) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) ++ v0.AuxInt = int32ToAuxInt(4) ++ v0.AddArg2(src, mem) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(2) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) ++ v2.AuxInt = int32ToAuxInt(2) ++ v2.AddArg2(src, mem) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) ++ v4 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) ++ v4.AddArg2(src, mem) ++ v3.AddArg3(dst, v4, mem) ++ v1.AddArg3(dst, v2, v3) ++ v.AddArg3(dst, v0, v1) ++ return true ++ } ++ // match: (Move [12] {t} dst src mem) ++ // cond: t.Alignment()%4 == 0 ++ // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))) ++ for { ++ if auxIntToInt64(v.AuxInt) != 12 { ++ break ++ } ++ t := auxToType(v.Aux) ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ if !(t.Alignment()%4 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVWstore) ++ v.AuxInt = int32ToAuxInt(8) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32) ++ v0.AuxInt = int32ToAuxInt(8) ++ v0.AddArg2(src, mem) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(4) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32) ++ v2.AuxInt = int32ToAuxInt(4) ++ v2.AddArg2(src, mem) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem) ++ v4 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32) ++ v4.AddArg2(src, mem) ++ v3.AddArg3(dst, v4, mem) ++ v1.AddArg3(dst, v2, v3) ++ v.AddArg3(dst, v0, v1) ++ return true ++ } ++ // match: (Move [16] {t} dst src mem) ++ // cond: t.Alignment()%8 == 0 ++ // result: (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem)) ++ for { ++ if auxIntToInt64(v.AuxInt) != 16 { ++ break ++ } ++ t := auxToType(v.Aux) ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ if !(t.Alignment()%8 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVVstore) ++ v.AuxInt = int32ToAuxInt(8) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64) ++ v0.AuxInt = int32ToAuxInt(8) ++ v0.AddArg2(src, mem) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64) ++ v2.AddArg2(src, mem) ++ v1.AddArg3(dst, v2, mem) ++ v.AddArg3(dst, v0, v1) ++ return true ++ } ++ // match: (Move [24] {t} dst src mem) ++ // cond: t.Alignment()%8 == 0 ++ // result: (MOVVstore [16] dst (MOVVload [16] src mem) (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem))) ++ for { ++ if auxIntToInt64(v.AuxInt) != 24 { ++ break ++ } ++ t := auxToType(v.Aux) ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ if !(t.Alignment()%8 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVVstore) ++ v.AuxInt = int32ToAuxInt(16) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64) ++ v0.AuxInt = int32ToAuxInt(16) ++ v0.AddArg2(src, mem) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(8) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64) ++ v2.AuxInt = int32ToAuxInt(8) ++ v2.AddArg2(src, mem) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem) ++ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64) ++ v4.AddArg2(src, mem) ++ v3.AddArg3(dst, v4, mem) ++ v1.AddArg3(dst, v2, v3) ++ v.AddArg3(dst, v0, v1) ++ return true ++ } ++ // match: (Move [s] {t} dst src mem) ++ // cond: s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s) ++ // result: (DUFFCOPY [16 * (128 - s/8)] dst src mem) ++ for { ++ s := auxIntToInt64(v.AuxInt) ++ t := auxToType(v.Aux) ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ if !(s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) { ++ break ++ } ++ v.reset(OpLOONG64DUFFCOPY) ++ v.AuxInt = int64ToAuxInt(16 * (128 - s/8)) ++ v.AddArg3(dst, src, mem) ++ return true ++ } ++ // match: (Move [s] {t} dst src mem) ++ // cond: s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0 ++ // result: (LoweredMove [t.Alignment()] dst src (ADDVconst src [s-moveSize(t.Alignment(), config)]) mem) ++ for { ++ s := auxIntToInt64(v.AuxInt) ++ t := auxToType(v.Aux) ++ dst := v_0 ++ src := v_1 ++ mem := v_2 ++ if !(s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0) { ++ break ++ } ++ v.reset(OpLOONG64LoweredMove) ++ v.AuxInt = int64ToAuxInt(t.Alignment()) ++ v0 := b.NewValue0(v.Pos, OpLOONG64ADDVconst, src.Type) ++ v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config)) ++ v0.AddArg(src) ++ v.AddArg4(dst, src, v0, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpMul16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Mul16 x y) ++ // result: (Select1 (MULVU x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpMul32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Mul32 x y) ++ // result: (Select1 (MULVU x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpMul64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Mul64 x y) ++ // result: (Select1 (MULVU x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpMul8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Mul8 x y) ++ // result: (Select1 (MULVU x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpSelect1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpNeq16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Neq16 x y) ++ // result: (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0])) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) ++ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v3.AuxInt = int64ToAuxInt(0) ++ v.AddArg2(v0, v3) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpNeq32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Neq32 x y) ++ // result: (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0])) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) ++ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v3.AuxInt = int64ToAuxInt(0) ++ v.AddArg2(v0, v3) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpNeq32F(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ // match: (Neq32F x y) ++ // result: (FPFlagFalse (CMPEQF x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64FPFlagFalse) ++ v0 := b.NewValue0(v.Pos, OpLOONG64CMPEQF, types.TypeFlags) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpNeq64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Neq64 x y) ++ // result: (SGTU (XOR x y) (MOVVconst [0])) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) ++ v0.AddArg2(x, y) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v1.AuxInt = int64ToAuxInt(0) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpNeq64F(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ // match: (Neq64F x y) ++ // result: (FPFlagFalse (CMPEQD x y)) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64FPFlagFalse) ++ v0 := b.NewValue0(v.Pos, OpLOONG64CMPEQD, types.TypeFlags) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpNeq8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Neq8 x y) ++ // result: (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0])) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) ++ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v1.AddArg(x) ++ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v2.AddArg(y) ++ v0.AddArg2(v1, v2) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v3.AuxInt = int64ToAuxInt(0) ++ v.AddArg2(v0, v3) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpNeqPtr(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (NeqPtr x y) ++ // result: (SGTU (XOR x y) (MOVVconst [0])) ++ for { ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SGTU) ++ v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) ++ v0.AddArg2(x, y) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v1.AuxInt = int64ToAuxInt(0) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpNot(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (Not x) ++ // result: (XORconst [1] x) ++ for { ++ x := v_0 ++ v.reset(OpLOONG64XORconst) ++ v.AuxInt = int64ToAuxInt(1) ++ v.AddArg(x) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpOffPtr(v *Value) bool { ++ v_0 := v.Args[0] ++ // match: (OffPtr [off] ptr:(SP)) ++ // result: (MOVVaddr [int32(off)] ptr) ++ for { ++ off := auxIntToInt64(v.AuxInt) ++ ptr := v_0 ++ if ptr.Op != OpSP { ++ break ++ } ++ v.reset(OpLOONG64MOVVaddr) ++ v.AuxInt = int32ToAuxInt(int32(off)) ++ v.AddArg(ptr) ++ return true ++ } ++ // match: (OffPtr [off] ptr) ++ // result: (ADDVconst [off] ptr) ++ for { ++ off := auxIntToInt64(v.AuxInt) ++ ptr := v_0 ++ v.reset(OpLOONG64ADDVconst) ++ v.AuxInt = int64ToAuxInt(off) ++ v.AddArg(ptr) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpPanicBounds(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (PanicBounds [kind] x y mem) ++ // cond: boundsABI(kind) == 0 ++ // result: (LoweredPanicBoundsA [kind] x y mem) ++ for { ++ kind := auxIntToInt64(v.AuxInt) ++ x := v_0 ++ y := v_1 ++ mem := v_2 ++ if !(boundsABI(kind) == 0) { ++ break ++ } ++ v.reset(OpLOONG64LoweredPanicBoundsA) ++ v.AuxInt = int64ToAuxInt(kind) ++ v.AddArg3(x, y, mem) ++ return true ++ } ++ // match: (PanicBounds [kind] x y mem) ++ // cond: boundsABI(kind) == 1 ++ // result: (LoweredPanicBoundsB [kind] x y mem) ++ for { ++ kind := auxIntToInt64(v.AuxInt) ++ x := v_0 ++ y := v_1 ++ mem := v_2 ++ if !(boundsABI(kind) == 1) { ++ break ++ } ++ v.reset(OpLOONG64LoweredPanicBoundsB) ++ v.AuxInt = int64ToAuxInt(kind) ++ v.AddArg3(x, y, mem) ++ return true ++ } ++ // match: (PanicBounds [kind] x y mem) ++ // cond: boundsABI(kind) == 2 ++ // result: (LoweredPanicBoundsC [kind] x y mem) ++ for { ++ kind := auxIntToInt64(v.AuxInt) ++ x := v_0 ++ y := v_1 ++ mem := v_2 ++ if !(boundsABI(kind) == 2) { ++ break ++ } ++ v.reset(OpLOONG64LoweredPanicBoundsC) ++ v.AuxInt = int64ToAuxInt(kind) ++ v.AddArg3(x, y, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpRotateLeft16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (RotateLeft16 x (MOVVconst [c])) ++ // result: (Or16 (Lsh16x64 x (MOVVconst [c&15])) (Rsh16Ux64 x (MOVVconst [-c&15]))) ++ for { ++ t := v.Type ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ v.reset(OpOr16) ++ v0 := b.NewValue0(v.Pos, OpLsh16x64, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v1.AuxInt = int64ToAuxInt(c & 15) ++ v0.AddArg2(x, v1) ++ v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v3.AuxInt = int64ToAuxInt(-c & 15) ++ v2.AddArg2(x, v3) ++ v.AddArg2(v0, v2) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpRotateLeft32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (RotateLeft32 x (MOVVconst [c])) ++ // result: (Or32 (Lsh32x64 x (MOVVconst [c&31])) (Rsh32Ux64 x (MOVVconst [-c&31]))) ++ for { ++ t := v.Type ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ v.reset(OpOr32) ++ v0 := b.NewValue0(v.Pos, OpLsh32x64, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v1.AuxInt = int64ToAuxInt(c & 31) ++ v0.AddArg2(x, v1) ++ v2 := b.NewValue0(v.Pos, OpRsh32Ux64, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v3.AuxInt = int64ToAuxInt(-c & 31) ++ v2.AddArg2(x, v3) ++ v.AddArg2(v0, v2) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpRotateLeft64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (RotateLeft64 x (MOVVconst [c])) ++ // result: (Or64 (Lsh64x64 x (MOVVconst [c&63])) (Rsh64Ux64 x (MOVVconst [-c&63]))) ++ for { ++ t := v.Type ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ v.reset(OpOr64) ++ v0 := b.NewValue0(v.Pos, OpLsh64x64, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v1.AuxInt = int64ToAuxInt(c & 63) ++ v0.AddArg2(x, v1) ++ v2 := b.NewValue0(v.Pos, OpRsh64Ux64, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v3.AuxInt = int64ToAuxInt(-c & 63) ++ v2.AddArg2(x, v3) ++ v.AddArg2(v0, v2) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpRotateLeft8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (RotateLeft8 x (MOVVconst [c])) ++ // result: (Or8 (Lsh8x64 x (MOVVconst [c&7])) (Rsh8Ux64 x (MOVVconst [-c&7]))) ++ for { ++ t := v.Type ++ x := v_0 ++ if v_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_1.AuxInt) ++ v.reset(OpOr8) ++ v0 := b.NewValue0(v.Pos, OpLsh8x64, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v1.AuxInt = int64ToAuxInt(c & 7) ++ v0.AddArg2(x, v1) ++ v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v3.AuxInt = int64ToAuxInt(-c & 7) ++ v2.AddArg2(x, v3) ++ v.AddArg2(v0, v2) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpRsh16Ux16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh16Ux16 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt16to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v5.AddArg(x) ++ v4.AddArg2(v5, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh16Ux32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh16Ux32 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt32to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v5.AddArg(x) ++ v4.AddArg2(v5, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh16Ux64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh16Ux64 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt16to64 x) y)) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v1.AddArg2(v2, y) ++ v0.AddArg(v1) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v4.AddArg(x) ++ v3.AddArg2(v4, y) ++ v.AddArg2(v0, v3) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh16Ux8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh16Ux8 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt8to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v5.AddArg(x) ++ v4.AddArg2(v5, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh16x16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh16x16 x y) ++ // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) ++ v0.AddArg(x) ++ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v4.AddArg(y) ++ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v5.AuxInt = int64ToAuxInt(63) ++ v3.AddArg2(v4, v5) ++ v2.AddArg(v3) ++ v1.AddArg2(v2, v4) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh16x32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh16x32 x y) ++ // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) ++ v0.AddArg(x) ++ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v4.AddArg(y) ++ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v5.AuxInt = int64ToAuxInt(63) ++ v3.AddArg2(v4, v5) ++ v2.AddArg(v3) ++ v1.AddArg2(v2, v4) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh16x64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh16x64 x y) ++ // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) ++ v0.AddArg(x) ++ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v4.AuxInt = int64ToAuxInt(63) ++ v3.AddArg2(y, v4) ++ v2.AddArg(v3) ++ v1.AddArg2(v2, y) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh16x8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh16x8 x y) ++ // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) ++ v0.AddArg(x) ++ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v4.AddArg(y) ++ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v5.AuxInt = int64ToAuxInt(63) ++ v3.AddArg2(v4, v5) ++ v2.AddArg(v3) ++ v1.AddArg2(v2, v4) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh32Ux16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh32Ux16 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt16to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v5.AddArg(x) ++ v4.AddArg2(v5, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh32Ux32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh32Ux32 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt32to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v5.AddArg(x) ++ v4.AddArg2(v5, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh32Ux64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh32Ux64 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt32to64 x) y)) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v1.AddArg2(v2, y) ++ v0.AddArg(v1) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v4.AddArg(x) ++ v3.AddArg2(v4, y) ++ v.AddArg2(v0, v3) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh32Ux8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh32Ux8 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt8to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v5.AddArg(x) ++ v4.AddArg2(v5, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh32x16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh32x16 x y) ++ // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v0.AddArg(x) ++ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v4.AddArg(y) ++ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v5.AuxInt = int64ToAuxInt(63) ++ v3.AddArg2(v4, v5) ++ v2.AddArg(v3) ++ v1.AddArg2(v2, v4) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh32x32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh32x32 x y) ++ // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v0.AddArg(x) ++ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v4.AddArg(y) ++ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v5.AuxInt = int64ToAuxInt(63) ++ v3.AddArg2(v4, v5) ++ v2.AddArg(v3) ++ v1.AddArg2(v2, v4) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh32x64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh32x64 x y) ++ // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v0.AddArg(x) ++ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v4.AuxInt = int64ToAuxInt(63) ++ v3.AddArg2(y, v4) ++ v2.AddArg(v3) ++ v1.AddArg2(v2, y) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh32x8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh32x8 x y) ++ // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) ++ v0.AddArg(x) ++ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v4.AddArg(y) ++ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v5.AuxInt = int64ToAuxInt(63) ++ v3.AddArg2(v4, v5) ++ v2.AddArg(v3) ++ v1.AddArg2(v2, v4) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh64Ux16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh64Ux16 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV x (ZeroExt16to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh64Ux32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh64Ux32 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV x (ZeroExt32to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh64Ux64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh64Ux64 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV x y)) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v1.AddArg2(v2, y) ++ v0.AddArg(v1) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v3.AddArg2(x, y) ++ v.AddArg2(v0, v3) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh64Ux8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh64Ux8 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV x (ZeroExt8to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v4.AddArg2(x, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh64x16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh64x16 x y) ++ // result: (SRAV x (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v3.AddArg(y) ++ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v4.AuxInt = int64ToAuxInt(63) ++ v2.AddArg2(v3, v4) ++ v1.AddArg(v2) ++ v0.AddArg2(v1, v3) ++ v.AddArg2(x, v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh64x32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh64x32 x y) ++ // result: (SRAV x (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v3.AddArg(y) ++ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v4.AuxInt = int64ToAuxInt(63) ++ v2.AddArg2(v3, v4) ++ v1.AddArg(v2) ++ v0.AddArg2(v1, v3) ++ v.AddArg2(x, v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh64x64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh64x64 x y) ++ // result: (SRAV x (OR (NEGV (SGTU y (MOVVconst [63]))) y)) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v3.AuxInt = int64ToAuxInt(63) ++ v2.AddArg2(y, v3) ++ v1.AddArg(v2) ++ v0.AddArg2(v1, y) ++ v.AddArg2(x, v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh64x8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh64x8 x y) ++ // result: (SRAV x (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v3.AddArg(y) ++ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v4.AuxInt = int64ToAuxInt(63) ++ v2.AddArg2(v3, v4) ++ v1.AddArg(v2) ++ v0.AddArg2(v1, v3) ++ v.AddArg2(x, v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh8Ux16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh8Ux16 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt16to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v5.AddArg(x) ++ v4.AddArg2(v5, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh8Ux32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh8Ux32 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt32to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v5.AddArg(x) ++ v4.AddArg2(v5, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh8Ux64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh8Ux64 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt8to64 x) y)) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v1.AddArg2(v2, y) ++ v0.AddArg(v1) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v4.AddArg(x) ++ v3.AddArg2(v4, y) ++ v.AddArg2(v0, v3) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh8Ux8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh8Ux8 x y) ++ // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt8to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64AND) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(64) ++ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v3.AddArg(y) ++ v1.AddArg2(v2, v3) ++ v0.AddArg(v1) ++ v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) ++ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v5.AddArg(x) ++ v4.AddArg2(v5, v3) ++ v.AddArg2(v0, v4) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh8x16(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh8x16 x y) ++ // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) ++ v0.AddArg(x) ++ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) ++ v4.AddArg(y) ++ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v5.AuxInt = int64ToAuxInt(63) ++ v3.AddArg2(v4, v5) ++ v2.AddArg(v3) ++ v1.AddArg2(v2, v4) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh8x32(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh8x32 x y) ++ // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) ++ v0.AddArg(x) ++ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) ++ v4.AddArg(y) ++ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v5.AuxInt = int64ToAuxInt(63) ++ v3.AddArg2(v4, v5) ++ v2.AddArg(v3) ++ v1.AddArg2(v2, v4) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh8x64(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh8x64 x y) ++ // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) ++ v0.AddArg(x) ++ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v4.AuxInt = int64ToAuxInt(63) ++ v3.AddArg2(y, v4) ++ v2.AddArg(v3) ++ v1.AddArg2(v2, y) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpRsh8x8(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Rsh8x8 x y) ++ // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) ++ for { ++ t := v.Type ++ x := v_0 ++ y := v_1 ++ v.reset(OpLOONG64SRAV) ++ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) ++ v0.AddArg(x) ++ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) ++ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) ++ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) ++ v4.AddArg(y) ++ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v5.AuxInt = int64ToAuxInt(63) ++ v3.AddArg2(v4, v5) ++ v2.AddArg(v3) ++ v1.AddArg2(v2, v4) ++ v.AddArg2(v0, v1) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpSelect0(v *Value) bool { ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Select0 (Mul64uover x y)) ++ // result: (Select1 (MULVU x y)) ++ for { ++ if v_0.Op != OpMul64uover { ++ break ++ } ++ y := v_0.Args[1] ++ x := v_0.Args[0] ++ v.reset(OpSelect1) ++ v.Type = typ.UInt64 ++ v0 := b.NewValue0(v.Pos, OpLOONG64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v0.AddArg2(x, y) ++ v.AddArg(v0) ++ return true ++ } ++ // match: (Select0 (DIVVU _ (MOVVconst [1]))) ++ // result: (MOVVconst [0]) ++ for { ++ if v_0.Op != OpLOONG64DIVVU { ++ break ++ } ++ _ = v_0.Args[1] ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (Select0 (DIVVU x (MOVVconst [c]))) ++ // cond: isPowerOfTwo64(c) ++ // result: (ANDconst [c-1] x) ++ for { ++ if v_0.Op != OpLOONG64DIVVU { ++ break ++ } ++ _ = v_0.Args[1] ++ x := v_0.Args[0] ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0_1.AuxInt) ++ if !(isPowerOfTwo64(c)) { ++ break ++ } ++ v.reset(OpLOONG64ANDconst) ++ v.AuxInt = int64ToAuxInt(c - 1) ++ v.AddArg(x) ++ return true ++ } ++ // match: (Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) ++ // result: (MOVVconst [c%d]) ++ for { ++ if v_0.Op != OpLOONG64DIVV { ++ break ++ } ++ _ = v_0.Args[1] ++ v_0_0 := v_0.Args[0] ++ if v_0_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0_0.AuxInt) ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0_1.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(c % d) ++ return true ++ } ++ // match: (Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) ++ // result: (MOVVconst [int64(uint64(c)%uint64(d))]) ++ for { ++ if v_0.Op != OpLOONG64DIVVU { ++ break ++ } ++ _ = v_0.Args[1] ++ v_0_0 := v_0.Args[0] ++ if v_0_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0_0.AuxInt) ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0_1.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(uint64(c) % uint64(d))) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpSelect1(v *Value) bool { ++ v_0 := v.Args[0] ++ b := v.Block ++ typ := &b.Func.Config.Types ++ // match: (Select1 (Mul64uover x y)) ++ // result: (SGTU (Select0 (MULVU x y)) (MOVVconst [0])) ++ for { ++ if v_0.Op != OpMul64uover { ++ break ++ } ++ y := v_0.Args[1] ++ x := v_0.Args[0] ++ v.reset(OpLOONG64SGTU) ++ v.Type = typ.Bool ++ v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) ++ v1.AddArg2(x, y) ++ v0.AddArg(v1) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v2.AuxInt = int64ToAuxInt(0) ++ v.AddArg2(v0, v2) ++ return true ++ } ++ // match: (Select1 (MULVU x (MOVVconst [-1]))) ++ // result: (NEGV x) ++ for { ++ if v_0.Op != OpLOONG64MULVU { ++ break ++ } ++ _ = v_0.Args[1] ++ x := v_0.Args[0] ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != -1 { ++ break ++ } ++ v.reset(OpLOONG64NEGV) ++ v.AddArg(x) ++ return true ++ } ++ // match: (Select1 (MULVU _ (MOVVconst [0]))) ++ // result: (MOVVconst [0]) ++ for { ++ if v_0.Op != OpLOONG64MULVU { ++ break ++ } ++ _ = v_0.Args[1] ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { ++ break ++ } ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(0) ++ return true ++ } ++ // match: (Select1 (MULVU x (MOVVconst [1]))) ++ // result: x ++ for { ++ if v_0.Op != OpLOONG64MULVU { ++ break ++ } ++ _ = v_0.Args[1] ++ x := v_0.Args[0] ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 { ++ break ++ } ++ v.copyOf(x) ++ return true ++ } ++ // match: (Select1 (MULVU x (MOVVconst [c]))) ++ // cond: isPowerOfTwo64(c) ++ // result: (SLLVconst [log64(c)] x) ++ for { ++ if v_0.Op != OpLOONG64MULVU { ++ break ++ } ++ _ = v_0.Args[1] ++ x := v_0.Args[0] ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0_1.AuxInt) ++ if !(isPowerOfTwo64(c)) { ++ break ++ } ++ v.reset(OpLOONG64SLLVconst) ++ v.AuxInt = int64ToAuxInt(log64(c)) ++ v.AddArg(x) ++ return true ++ } ++ // match: (Select1 (DIVVU x (MOVVconst [1]))) ++ // result: x ++ for { ++ if v_0.Op != OpLOONG64DIVVU { ++ break ++ } ++ _ = v_0.Args[1] ++ x := v_0.Args[0] ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 { ++ break ++ } ++ v.copyOf(x) ++ return true ++ } ++ // match: (Select1 (DIVVU x (MOVVconst [c]))) ++ // cond: isPowerOfTwo64(c) ++ // result: (SRLVconst [log64(c)] x) ++ for { ++ if v_0.Op != OpLOONG64DIVVU { ++ break ++ } ++ _ = v_0.Args[1] ++ x := v_0.Args[0] ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0_1.AuxInt) ++ if !(isPowerOfTwo64(c)) { ++ break ++ } ++ v.reset(OpLOONG64SRLVconst) ++ v.AuxInt = int64ToAuxInt(log64(c)) ++ v.AddArg(x) ++ return true ++ } ++ // match: (Select1 (MULVU (MOVVconst [c]) (MOVVconst [d]))) ++ // result: (MOVVconst [c*d]) ++ for { ++ if v_0.Op != OpLOONG64MULVU { ++ break ++ } ++ _ = v_0.Args[1] ++ v_0_0 := v_0.Args[0] ++ if v_0_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0_0.AuxInt) ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0_1.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(c * d) ++ return true ++ } ++ // match: (Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) ++ // result: (MOVVconst [c/d]) ++ for { ++ if v_0.Op != OpLOONG64DIVV { ++ break ++ } ++ _ = v_0.Args[1] ++ v_0_0 := v_0.Args[0] ++ if v_0_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0_0.AuxInt) ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0_1.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(c / d) ++ return true ++ } ++ // match: (Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) ++ // result: (MOVVconst [int64(uint64(c)/uint64(d))]) ++ for { ++ if v_0.Op != OpLOONG64DIVVU { ++ break ++ } ++ _ = v_0.Args[1] ++ v_0_0 := v_0.Args[0] ++ if v_0_0.Op != OpLOONG64MOVVconst { ++ break ++ } ++ c := auxIntToInt64(v_0_0.AuxInt) ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst { ++ break ++ } ++ d := auxIntToInt64(v_0_1.AuxInt) ++ v.reset(OpLOONG64MOVVconst) ++ v.AuxInt = int64ToAuxInt(int64(uint64(c) / uint64(d))) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpSlicemask(v *Value) bool { ++ v_0 := v.Args[0] ++ b := v.Block ++ // match: (Slicemask x) ++ // result: (SRAVconst (NEGV x) [63]) ++ for { ++ t := v.Type ++ x := v_0 ++ v.reset(OpLOONG64SRAVconst) ++ v.AuxInt = int64ToAuxInt(63) ++ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) ++ v0.AddArg(x) ++ v.AddArg(v0) ++ return true ++ } ++} ++func rewriteValueLOONG64_OpStore(v *Value) bool { ++ v_2 := v.Args[2] ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ // match: (Store {t} ptr val mem) ++ // cond: t.Size() == 1 ++ // result: (MOVBstore ptr val mem) ++ for { ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ val := v_1 ++ mem := v_2 ++ if !(t.Size() == 1) { ++ break ++ } ++ v.reset(OpLOONG64MOVBstore) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (Store {t} ptr val mem) ++ // cond: t.Size() == 2 ++ // result: (MOVHstore ptr val mem) ++ for { ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ val := v_1 ++ mem := v_2 ++ if !(t.Size() == 2) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstore) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (Store {t} ptr val mem) ++ // cond: t.Size() == 4 && !is32BitFloat(val.Type) ++ // result: (MOVWstore ptr val mem) ++ for { ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ val := v_1 ++ mem := v_2 ++ if !(t.Size() == 4 && !is32BitFloat(val.Type)) { ++ break ++ } ++ v.reset(OpLOONG64MOVWstore) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (Store {t} ptr val mem) ++ // cond: t.Size() == 8 && !is64BitFloat(val.Type) ++ // result: (MOVVstore ptr val mem) ++ for { ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ val := v_1 ++ mem := v_2 ++ if !(t.Size() == 8 && !is64BitFloat(val.Type)) { ++ break ++ } ++ v.reset(OpLOONG64MOVVstore) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (Store {t} ptr val mem) ++ // cond: t.Size() == 4 && is32BitFloat(val.Type) ++ // result: (MOVFstore ptr val mem) ++ for { ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ val := v_1 ++ mem := v_2 ++ if !(t.Size() == 4 && is32BitFloat(val.Type)) { ++ break ++ } ++ v.reset(OpLOONG64MOVFstore) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ // match: (Store {t} ptr val mem) ++ // cond: t.Size() == 8 && is64BitFloat(val.Type) ++ // result: (MOVDstore ptr val mem) ++ for { ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ val := v_1 ++ mem := v_2 ++ if !(t.Size() == 8 && is64BitFloat(val.Type)) { ++ break ++ } ++ v.reset(OpLOONG64MOVDstore) ++ v.AddArg3(ptr, val, mem) ++ return true ++ } ++ return false ++} ++func rewriteValueLOONG64_OpZero(v *Value) bool { ++ v_1 := v.Args[1] ++ v_0 := v.Args[0] ++ b := v.Block ++ config := b.Func.Config ++ typ := &b.Func.Config.Types ++ // match: (Zero [0] _ mem) ++ // result: mem ++ for { ++ if auxIntToInt64(v.AuxInt) != 0 { ++ break ++ } ++ mem := v_1 ++ v.copyOf(mem) ++ return true ++ } ++ // match: (Zero [1] ptr mem) ++ // result: (MOVBstore ptr (MOVVconst [0]) mem) ++ for { ++ if auxIntToInt64(v.AuxInt) != 1 { ++ break ++ } ++ ptr := v_0 ++ mem := v_1 ++ v.reset(OpLOONG64MOVBstore) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v.AddArg3(ptr, v0, mem) ++ return true ++ } ++ // match: (Zero [2] {t} ptr mem) ++ // cond: t.Alignment()%2 == 0 ++ // result: (MOVHstore ptr (MOVVconst [0]) mem) ++ for { ++ if auxIntToInt64(v.AuxInt) != 2 { ++ break ++ } ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ mem := v_1 ++ if !(t.Alignment()%2 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstore) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v.AddArg3(ptr, v0, mem) ++ return true ++ } ++ // match: (Zero [2] ptr mem) ++ // result: (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem)) ++ for { ++ if auxIntToInt64(v.AuxInt) != 2 { ++ break ++ } ++ ptr := v_0 ++ mem := v_1 ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(1) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(0) ++ v1.AddArg3(ptr, v0, mem) ++ v.AddArg3(ptr, v0, v1) ++ return true ++ } ++ // match: (Zero [4] {t} ptr mem) ++ // cond: t.Alignment()%4 == 0 ++ // result: (MOVWstore ptr (MOVVconst [0]) mem) ++ for { ++ if auxIntToInt64(v.AuxInt) != 4 { ++ break ++ } ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ mem := v_1 ++ if !(t.Alignment()%4 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVWstore) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v.AddArg3(ptr, v0, mem) ++ return true ++ } ++ // match: (Zero [4] {t} ptr mem) ++ // cond: t.Alignment()%2 == 0 ++ // result: (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem)) ++ for { ++ if auxIntToInt64(v.AuxInt) != 4 { ++ break ++ } ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ mem := v_1 ++ if !(t.Alignment()%2 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstore) ++ v.AuxInt = int32ToAuxInt(2) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(0) ++ v1.AddArg3(ptr, v0, mem) ++ v.AddArg3(ptr, v0, v1) ++ return true ++ } ++ // match: (Zero [4] ptr mem) ++ // result: (MOVBstore [3] ptr (MOVVconst [0]) (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem)))) ++ for { ++ if auxIntToInt64(v.AuxInt) != 4 { ++ break ++ } ++ ptr := v_0 ++ mem := v_1 ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(3) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(2) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) ++ v2.AuxInt = int32ToAuxInt(1) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) ++ v3.AuxInt = int32ToAuxInt(0) ++ v3.AddArg3(ptr, v0, mem) ++ v2.AddArg3(ptr, v0, v3) ++ v1.AddArg3(ptr, v0, v2) ++ v.AddArg3(ptr, v0, v1) ++ return true ++ } ++ // match: (Zero [8] {t} ptr mem) ++ // cond: t.Alignment()%8 == 0 ++ // result: (MOVVstore ptr (MOVVconst [0]) mem) ++ for { ++ if auxIntToInt64(v.AuxInt) != 8 { ++ break ++ } ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ mem := v_1 ++ if !(t.Alignment()%8 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVVstore) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v.AddArg3(ptr, v0, mem) ++ return true ++ } ++ // match: (Zero [8] {t} ptr mem) ++ // cond: t.Alignment()%4 == 0 ++ // result: (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem)) ++ for { ++ if auxIntToInt64(v.AuxInt) != 8 { ++ break ++ } ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ mem := v_1 ++ if !(t.Alignment()%4 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVWstore) ++ v.AuxInt = int32ToAuxInt(4) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(0) ++ v1.AddArg3(ptr, v0, mem) ++ v.AddArg3(ptr, v0, v1) ++ return true ++ } ++ // match: (Zero [8] {t} ptr mem) ++ // cond: t.Alignment()%2 == 0 ++ // result: (MOVHstore [6] ptr (MOVVconst [0]) (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem)))) ++ for { ++ if auxIntToInt64(v.AuxInt) != 8 { ++ break ++ } ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ mem := v_1 ++ if !(t.Alignment()%2 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstore) ++ v.AuxInt = int32ToAuxInt(6) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(4) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) ++ v2.AuxInt = int32ToAuxInt(2) ++ v3 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) ++ v3.AuxInt = int32ToAuxInt(0) ++ v3.AddArg3(ptr, v0, mem) ++ v2.AddArg3(ptr, v0, v3) ++ v1.AddArg3(ptr, v0, v2) ++ v.AddArg3(ptr, v0, v1) ++ return true ++ } ++ // match: (Zero [3] ptr mem) ++ // result: (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem))) ++ for { ++ if auxIntToInt64(v.AuxInt) != 3 { ++ break ++ } ++ ptr := v_0 ++ mem := v_1 ++ v.reset(OpLOONG64MOVBstore) ++ v.AuxInt = int32ToAuxInt(2) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(1) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) ++ v2.AuxInt = int32ToAuxInt(0) ++ v2.AddArg3(ptr, v0, mem) ++ v1.AddArg3(ptr, v0, v2) ++ v.AddArg3(ptr, v0, v1) ++ return true ++ } ++ // match: (Zero [6] {t} ptr mem) ++ // cond: t.Alignment()%2 == 0 ++ // result: (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem))) ++ for { ++ if auxIntToInt64(v.AuxInt) != 6 { ++ break ++ } ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ mem := v_1 ++ if !(t.Alignment()%2 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVHstore) ++ v.AuxInt = int32ToAuxInt(4) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(2) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) ++ v2.AuxInt = int32ToAuxInt(0) ++ v2.AddArg3(ptr, v0, mem) ++ v1.AddArg3(ptr, v0, v2) ++ v.AddArg3(ptr, v0, v1) ++ return true ++ } ++ // match: (Zero [12] {t} ptr mem) ++ // cond: t.Alignment()%4 == 0 ++ // result: (MOVWstore [8] ptr (MOVVconst [0]) (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem))) ++ for { ++ if auxIntToInt64(v.AuxInt) != 12 { ++ break ++ } ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ mem := v_1 ++ if !(t.Alignment()%4 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVWstore) ++ v.AuxInt = int32ToAuxInt(8) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(4) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem) ++ v2.AuxInt = int32ToAuxInt(0) ++ v2.AddArg3(ptr, v0, mem) ++ v1.AddArg3(ptr, v0, v2) ++ v.AddArg3(ptr, v0, v1) ++ return true ++ } ++ // match: (Zero [16] {t} ptr mem) ++ // cond: t.Alignment()%8 == 0 ++ // result: (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem)) ++ for { ++ if auxIntToInt64(v.AuxInt) != 16 { ++ break ++ } ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ mem := v_1 ++ if !(t.Alignment()%8 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVVstore) ++ v.AuxInt = int32ToAuxInt(8) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(0) ++ v1.AddArg3(ptr, v0, mem) ++ v.AddArg3(ptr, v0, v1) ++ return true ++ } ++ // match: (Zero [24] {t} ptr mem) ++ // cond: t.Alignment()%8 == 0 ++ // result: (MOVVstore [16] ptr (MOVVconst [0]) (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem))) ++ for { ++ if auxIntToInt64(v.AuxInt) != 24 { ++ break ++ } ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ mem := v_1 ++ if !(t.Alignment()%8 == 0) { ++ break ++ } ++ v.reset(OpLOONG64MOVVstore) ++ v.AuxInt = int32ToAuxInt(16) ++ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) ++ v0.AuxInt = int64ToAuxInt(0) ++ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem) ++ v1.AuxInt = int32ToAuxInt(8) ++ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem) ++ v2.AuxInt = int32ToAuxInt(0) ++ v2.AddArg3(ptr, v0, mem) ++ v1.AddArg3(ptr, v0, v2) ++ v.AddArg3(ptr, v0, v1) ++ return true ++ } ++ // match: (Zero [s] {t} ptr mem) ++ // cond: s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice ++ // result: (DUFFZERO [8 * (128 - s/8)] ptr mem) ++ for { ++ s := auxIntToInt64(v.AuxInt) ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ mem := v_1 ++ if !(s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice) { ++ break ++ } ++ v.reset(OpLOONG64DUFFZERO) ++ v.AuxInt = int64ToAuxInt(8 * (128 - s/8)) ++ v.AddArg2(ptr, mem) ++ return true ++ } ++ // match: (Zero [s] {t} ptr mem) ++ // cond: (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0 ++ // result: (LoweredZero [t.Alignment()] ptr (ADDVconst ptr [s-moveSize(t.Alignment(), config)]) mem) ++ for { ++ s := auxIntToInt64(v.AuxInt) ++ t := auxToType(v.Aux) ++ ptr := v_0 ++ mem := v_1 ++ if !((s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0) { ++ break ++ } ++ v.reset(OpLOONG64LoweredZero) ++ v.AuxInt = int64ToAuxInt(t.Alignment()) ++ v0 := b.NewValue0(v.Pos, OpLOONG64ADDVconst, ptr.Type) ++ v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config)) ++ v0.AddArg(ptr) ++ v.AddArg3(ptr, v0, mem) ++ return true ++ } ++ return false ++} ++func rewriteBlockLOONG64(b *Block) bool { ++ switch b.Kind { ++ case BlockLOONG64EQ: ++ // match: (EQ (FPFlagTrue cmp) yes no) ++ // result: (FPF cmp yes no) ++ for b.Controls[0].Op == OpLOONG64FPFlagTrue { ++ v_0 := b.Controls[0] ++ cmp := v_0.Args[0] ++ b.resetWithControl(BlockLOONG64FPF, cmp) ++ return true ++ } ++ // match: (EQ (FPFlagFalse cmp) yes no) ++ // result: (FPT cmp yes no) ++ for b.Controls[0].Op == OpLOONG64FPFlagFalse { ++ v_0 := b.Controls[0] ++ cmp := v_0.Args[0] ++ b.resetWithControl(BlockLOONG64FPT, cmp) ++ return true ++ } ++ // match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no) ++ // result: (NE cmp yes no) ++ for b.Controls[0].Op == OpLOONG64XORconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 1 { ++ break ++ } ++ cmp := v_0.Args[0] ++ if cmp.Op != OpLOONG64SGT { ++ break ++ } ++ b.resetWithControl(BlockLOONG64NE, cmp) ++ return true ++ } ++ // match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no) ++ // result: (NE cmp yes no) ++ for b.Controls[0].Op == OpLOONG64XORconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 1 { ++ break ++ } ++ cmp := v_0.Args[0] ++ if cmp.Op != OpLOONG64SGTU { ++ break ++ } ++ b.resetWithControl(BlockLOONG64NE, cmp) ++ return true ++ } ++ // match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no) ++ // result: (NE cmp yes no) ++ for b.Controls[0].Op == OpLOONG64XORconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 1 { ++ break ++ } ++ cmp := v_0.Args[0] ++ if cmp.Op != OpLOONG64SGTconst { ++ break ++ } ++ b.resetWithControl(BlockLOONG64NE, cmp) ++ return true ++ } ++ // match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no) ++ // result: (NE cmp yes no) ++ for b.Controls[0].Op == OpLOONG64XORconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 1 { ++ break ++ } ++ cmp := v_0.Args[0] ++ if cmp.Op != OpLOONG64SGTUconst { ++ break ++ } ++ b.resetWithControl(BlockLOONG64NE, cmp) ++ return true ++ } ++ // match: (EQ (SGTUconst [1] x) yes no) ++ // result: (NE x yes no) ++ for b.Controls[0].Op == OpLOONG64SGTUconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 1 { ++ break ++ } ++ x := v_0.Args[0] ++ b.resetWithControl(BlockLOONG64NE, x) ++ return true ++ } ++ // match: (EQ (SGTU x (MOVVconst [0])) yes no) ++ // result: (EQ x yes no) ++ for b.Controls[0].Op == OpLOONG64SGTU { ++ v_0 := b.Controls[0] ++ _ = v_0.Args[1] ++ x := v_0.Args[0] ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { ++ break ++ } ++ b.resetWithControl(BlockLOONG64EQ, x) ++ return true ++ } ++ // match: (EQ (SGTconst [0] x) yes no) ++ // result: (GEZ x yes no) ++ for b.Controls[0].Op == OpLOONG64SGTconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 0 { ++ break ++ } ++ x := v_0.Args[0] ++ b.resetWithControl(BlockLOONG64GEZ, x) ++ return true ++ } ++ // match: (EQ (SGT x (MOVVconst [0])) yes no) ++ // result: (LEZ x yes no) ++ for b.Controls[0].Op == OpLOONG64SGT { ++ v_0 := b.Controls[0] ++ _ = v_0.Args[1] ++ x := v_0.Args[0] ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { ++ break ++ } ++ b.resetWithControl(BlockLOONG64LEZ, x) ++ return true ++ } ++ // match: (EQ (MOVVconst [0]) yes no) ++ // result: (First yes no) ++ for b.Controls[0].Op == OpLOONG64MOVVconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 0 { ++ break ++ } ++ b.Reset(BlockFirst) ++ return true ++ } ++ // match: (EQ (MOVVconst [c]) yes no) ++ // cond: c != 0 ++ // result: (First no yes) ++ for b.Controls[0].Op == OpLOONG64MOVVconst { ++ v_0 := b.Controls[0] ++ c := auxIntToInt64(v_0.AuxInt) ++ if !(c != 0) { ++ break ++ } ++ b.Reset(BlockFirst) ++ b.swapSuccessors() ++ return true ++ } ++ case BlockLOONG64GEZ: ++ // match: (GEZ (MOVVconst [c]) yes no) ++ // cond: c >= 0 ++ // result: (First yes no) ++ for b.Controls[0].Op == OpLOONG64MOVVconst { ++ v_0 := b.Controls[0] ++ c := auxIntToInt64(v_0.AuxInt) ++ if !(c >= 0) { ++ break ++ } ++ b.Reset(BlockFirst) ++ return true ++ } ++ // match: (GEZ (MOVVconst [c]) yes no) ++ // cond: c < 0 ++ // result: (First no yes) ++ for b.Controls[0].Op == OpLOONG64MOVVconst { ++ v_0 := b.Controls[0] ++ c := auxIntToInt64(v_0.AuxInt) ++ if !(c < 0) { ++ break ++ } ++ b.Reset(BlockFirst) ++ b.swapSuccessors() ++ return true ++ } ++ case BlockLOONG64GTZ: ++ // match: (GTZ (MOVVconst [c]) yes no) ++ // cond: c > 0 ++ // result: (First yes no) ++ for b.Controls[0].Op == OpLOONG64MOVVconst { ++ v_0 := b.Controls[0] ++ c := auxIntToInt64(v_0.AuxInt) ++ if !(c > 0) { ++ break ++ } ++ b.Reset(BlockFirst) ++ return true ++ } ++ // match: (GTZ (MOVVconst [c]) yes no) ++ // cond: c <= 0 ++ // result: (First no yes) ++ for b.Controls[0].Op == OpLOONG64MOVVconst { ++ v_0 := b.Controls[0] ++ c := auxIntToInt64(v_0.AuxInt) ++ if !(c <= 0) { ++ break ++ } ++ b.Reset(BlockFirst) ++ b.swapSuccessors() ++ return true ++ } ++ case BlockIf: ++ // match: (If cond yes no) ++ // result: (NE cond yes no) ++ for { ++ cond := b.Controls[0] ++ b.resetWithControl(BlockLOONG64NE, cond) ++ return true ++ } ++ case BlockLOONG64LEZ: ++ // match: (LEZ (MOVVconst [c]) yes no) ++ // cond: c <= 0 ++ // result: (First yes no) ++ for b.Controls[0].Op == OpLOONG64MOVVconst { ++ v_0 := b.Controls[0] ++ c := auxIntToInt64(v_0.AuxInt) ++ if !(c <= 0) { ++ break ++ } ++ b.Reset(BlockFirst) ++ return true ++ } ++ // match: (LEZ (MOVVconst [c]) yes no) ++ // cond: c > 0 ++ // result: (First no yes) ++ for b.Controls[0].Op == OpLOONG64MOVVconst { ++ v_0 := b.Controls[0] ++ c := auxIntToInt64(v_0.AuxInt) ++ if !(c > 0) { ++ break ++ } ++ b.Reset(BlockFirst) ++ b.swapSuccessors() ++ return true ++ } ++ case BlockLOONG64LTZ: ++ // match: (LTZ (MOVVconst [c]) yes no) ++ // cond: c < 0 ++ // result: (First yes no) ++ for b.Controls[0].Op == OpLOONG64MOVVconst { ++ v_0 := b.Controls[0] ++ c := auxIntToInt64(v_0.AuxInt) ++ if !(c < 0) { ++ break ++ } ++ b.Reset(BlockFirst) ++ return true ++ } ++ // match: (LTZ (MOVVconst [c]) yes no) ++ // cond: c >= 0 ++ // result: (First no yes) ++ for b.Controls[0].Op == OpLOONG64MOVVconst { ++ v_0 := b.Controls[0] ++ c := auxIntToInt64(v_0.AuxInt) ++ if !(c >= 0) { ++ break ++ } ++ b.Reset(BlockFirst) ++ b.swapSuccessors() ++ return true ++ } ++ case BlockLOONG64NE: ++ // match: (NE (FPFlagTrue cmp) yes no) ++ // result: (FPT cmp yes no) ++ for b.Controls[0].Op == OpLOONG64FPFlagTrue { ++ v_0 := b.Controls[0] ++ cmp := v_0.Args[0] ++ b.resetWithControl(BlockLOONG64FPT, cmp) ++ return true ++ } ++ // match: (NE (FPFlagFalse cmp) yes no) ++ // result: (FPF cmp yes no) ++ for b.Controls[0].Op == OpLOONG64FPFlagFalse { ++ v_0 := b.Controls[0] ++ cmp := v_0.Args[0] ++ b.resetWithControl(BlockLOONG64FPF, cmp) ++ return true ++ } ++ // match: (NE (XORconst [1] cmp:(SGT _ _)) yes no) ++ // result: (EQ cmp yes no) ++ for b.Controls[0].Op == OpLOONG64XORconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 1 { ++ break ++ } ++ cmp := v_0.Args[0] ++ if cmp.Op != OpLOONG64SGT { ++ break ++ } ++ b.resetWithControl(BlockLOONG64EQ, cmp) ++ return true ++ } ++ // match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no) ++ // result: (EQ cmp yes no) ++ for b.Controls[0].Op == OpLOONG64XORconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 1 { ++ break ++ } ++ cmp := v_0.Args[0] ++ if cmp.Op != OpLOONG64SGTU { ++ break ++ } ++ b.resetWithControl(BlockLOONG64EQ, cmp) ++ return true ++ } ++ // match: (NE (XORconst [1] cmp:(SGTconst _)) yes no) ++ // result: (EQ cmp yes no) ++ for b.Controls[0].Op == OpLOONG64XORconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 1 { ++ break ++ } ++ cmp := v_0.Args[0] ++ if cmp.Op != OpLOONG64SGTconst { ++ break ++ } ++ b.resetWithControl(BlockLOONG64EQ, cmp) ++ return true ++ } ++ // match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no) ++ // result: (EQ cmp yes no) ++ for b.Controls[0].Op == OpLOONG64XORconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 1 { ++ break ++ } ++ cmp := v_0.Args[0] ++ if cmp.Op != OpLOONG64SGTUconst { ++ break ++ } ++ b.resetWithControl(BlockLOONG64EQ, cmp) ++ return true ++ } ++ // match: (NE (SGTUconst [1] x) yes no) ++ // result: (EQ x yes no) ++ for b.Controls[0].Op == OpLOONG64SGTUconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 1 { ++ break ++ } ++ x := v_0.Args[0] ++ b.resetWithControl(BlockLOONG64EQ, x) ++ return true ++ } ++ // match: (NE (SGTU x (MOVVconst [0])) yes no) ++ // result: (NE x yes no) ++ for b.Controls[0].Op == OpLOONG64SGTU { ++ v_0 := b.Controls[0] ++ _ = v_0.Args[1] ++ x := v_0.Args[0] ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { ++ break ++ } ++ b.resetWithControl(BlockLOONG64NE, x) ++ return true ++ } ++ // match: (NE (SGTconst [0] x) yes no) ++ // result: (LTZ x yes no) ++ for b.Controls[0].Op == OpLOONG64SGTconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 0 { ++ break ++ } ++ x := v_0.Args[0] ++ b.resetWithControl(BlockLOONG64LTZ, x) ++ return true ++ } ++ // match: (NE (SGT x (MOVVconst [0])) yes no) ++ // result: (GTZ x yes no) ++ for b.Controls[0].Op == OpLOONG64SGT { ++ v_0 := b.Controls[0] ++ _ = v_0.Args[1] ++ x := v_0.Args[0] ++ v_0_1 := v_0.Args[1] ++ if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { ++ break ++ } ++ b.resetWithControl(BlockLOONG64GTZ, x) ++ return true ++ } ++ // match: (NE (MOVVconst [0]) yes no) ++ // result: (First no yes) ++ for b.Controls[0].Op == OpLOONG64MOVVconst { ++ v_0 := b.Controls[0] ++ if auxIntToInt64(v_0.AuxInt) != 0 { ++ break ++ } ++ b.Reset(BlockFirst) ++ b.swapSuccessors() ++ return true ++ } ++ // match: (NE (MOVVconst [c]) yes no) ++ // cond: c != 0 ++ // result: (First yes no) ++ for b.Controls[0].Op == OpLOONG64MOVVconst { ++ v_0 := b.Controls[0] ++ c := auxIntToInt64(v_0.AuxInt) ++ if !(c != 0) { ++ break ++ } ++ b.Reset(BlockFirst) ++ return true ++ } ++ } ++ return false ++} +-- +2.27.0 + diff --git a/0010-cmd-compile-internal-ssa-inline-memmove-with-known-s.patch b/0010-cmd-compile-internal-ssa-inline-memmove-with-known-s.patch new file mode 100644 index 0000000..643e43b --- /dev/null +++ b/0010-cmd-compile-internal-ssa-inline-memmove-with-known-s.patch @@ -0,0 +1,27 @@ +From c32d0dbbe840280c4b03377c57a4de2d02677f13 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Thu, 25 Nov 2021 10:26:47 +0800 +Subject: [PATCH 10/56] cmd/compile/internal/ssa: inline memmove with known + size + +Change-Id: I1534b66b527efaf2bbaa8e6e6ac0618aac0b5930 +--- + src/cmd/compile/internal/ssa/rewrite.go | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go +index 375c4d5..8a9cbcd 100644 +--- a/src/cmd/compile/internal/ssa/rewrite.go ++++ b/src/cmd/compile/internal/ssa/rewrite.go +@@ -1345,7 +1345,7 @@ func isInlinableMemmove(dst, src *Value, sz int64, c *Config) bool { + return sz <= 8 + case "s390x", "ppc64", "ppc64le": + return sz <= 8 || disjoint(dst, sz, src, sz) +- case "arm", "mips", "mips64", "mipsle", "mips64le": ++ case "arm", "loong64", "mips", "mips64", "mipsle", "mips64le": + return sz <= 4 + } + return false +-- +2.27.0 + diff --git a/0011-cmd-compile-internal-ssa-add-support-on-loong64-for-.patch b/0011-cmd-compile-internal-ssa-add-support-on-loong64-for-.patch new file mode 100644 index 0000000..6df30d6 --- /dev/null +++ b/0011-cmd-compile-internal-ssa-add-support-on-loong64-for-.patch @@ -0,0 +1,37 @@ +From eee49191c82da545ce9a04e054bf35888a0fbfec Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Thu, 25 Nov 2021 11:41:03 +0800 +Subject: [PATCH 11/56] cmd/compile/internal/ssa: add support on loong64 for + schedule phase + +Change-Id: Id533912c62d8c4e2aa3c124561772b543d685d7d +--- + src/cmd/compile/internal/ssa/schedule.go | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go +index 4e3e5e7..e6f6cf2 100644 +--- a/src/cmd/compile/internal/ssa/schedule.go ++++ b/src/cmd/compile/internal/ssa/schedule.go +@@ -78,7 +78,7 @@ func (h ValHeap) Less(i, j int) bool { + func (op Op) isLoweredGetClosurePtr() bool { + switch op { + case OpAMD64LoweredGetClosurePtr, OpPPC64LoweredGetClosurePtr, OpARMLoweredGetClosurePtr, OpARM64LoweredGetClosurePtr, +- Op386LoweredGetClosurePtr, OpMIPS64LoweredGetClosurePtr, OpS390XLoweredGetClosurePtr, OpMIPSLoweredGetClosurePtr, ++ Op386LoweredGetClosurePtr, OpMIPS64LoweredGetClosurePtr, OpLOONG64LoweredGetClosurePtr, OpS390XLoweredGetClosurePtr, OpMIPSLoweredGetClosurePtr, + OpRISCV64LoweredGetClosurePtr, OpWasmLoweredGetClosurePtr: + return true + } +@@ -128,7 +128,8 @@ func schedule(f *Func) { + v.Op == OpARMLoweredNilCheck || v.Op == OpARM64LoweredNilCheck || + v.Op == Op386LoweredNilCheck || v.Op == OpMIPS64LoweredNilCheck || + v.Op == OpS390XLoweredNilCheck || v.Op == OpMIPSLoweredNilCheck || +- v.Op == OpRISCV64LoweredNilCheck || v.Op == OpWasmLoweredNilCheck: ++ v.Op == OpRISCV64LoweredNilCheck || v.Op == OpWasmLoweredNilCheck || ++ v.Op == OpLOONG64LoweredNilCheck: + // Nil checks must come before loads from the same address. + score[v.ID] = ScoreNilCheck + case v.Op == OpPhi: +-- +2.27.0 + diff --git a/0012-cmd-compile-internal-ssagen-enable-intrinsic-operati.patch b/0012-cmd-compile-internal-ssagen-enable-intrinsic-operati.patch new file mode 100644 index 0000000..8ab87d6 --- /dev/null +++ b/0012-cmd-compile-internal-ssagen-enable-intrinsic-operati.patch @@ -0,0 +1,172 @@ +From 223a867e609185d6d8d6b00608d6df4f02711939 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Thu, 25 Nov 2021 11:59:37 +0800 +Subject: [PATCH 12/56] cmd/compile/internal/ssagen: enable intrinsic operation + on loong64 + +Change-Id: If28fe03297e1de62f348373f2779dce07f54611c +--- + src/cmd/compile/internal/ssagen/ssa.go | 40 +++++++++++++------------- + 1 file changed, 20 insertions(+), 20 deletions(-) + +diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go +index 2ffd491..1b0452d 100644 +--- a/src/cmd/compile/internal/ssagen/ssa.go ++++ b/src/cmd/compile/internal/ssagen/ssa.go +@@ -3810,7 +3810,7 @@ func InitTables() { + } + return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1]) + }, +- sys.AMD64, sys.I386, sys.MIPS64) ++ sys.AMD64, sys.I386, sys.MIPS64, sys.Loong64) + add("runtime", "KeepAlive", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0]) +@@ -3865,21 +3865,21 @@ func InitTables() { + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) + }, +- sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Load8", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[types.TUINT8], types.TypeMem), args[0], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT8], v) + }, +- sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Load64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v) + }, +- sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "LoadAcq", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem()) +@@ -3900,32 +3900,32 @@ func InitTables() { + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v) + }, +- sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + + addF("runtime/internal/atomic", "Store", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem()) + return nil + }, +- sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Store8", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem()) + return nil + }, +- sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Store64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem()) + return nil + }, +- sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "StorepNoWB", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem()) + return nil + }, +- sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "StoreRel", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem()) +@@ -3945,14 +3945,14 @@ func InitTables() { + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) + }, +- sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Xchg64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v) + }, +- sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + + type atomicOpEmitter func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) + +@@ -4010,14 +4010,14 @@ func InitTables() { + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) + }, +- sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Xadd64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v) + }, +- sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + + addF("runtime/internal/atomic", "Xadd", + makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64), +@@ -4032,14 +4032,14 @@ func InitTables() { + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v) + }, +- sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Cas64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v) + }, +- sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "CasRel", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) +@@ -4155,7 +4155,7 @@ func InitTables() { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpSqrt, types.Types[types.TFLOAT64], args[0]) + }, +- sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm) ++ sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm) + addF("math", "Trunc", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpTrunc, types.Types[types.TFLOAT64], args[0]) +@@ -4536,9 +4536,9 @@ func InitTables() { + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1]) + }, +- sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64) +- alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE) +- alias("runtime/internal/math", "Mul64", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE) ++ sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64, sys.Loong64) ++ alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE, sys.ArchLoong64) ++ alias("runtime/internal/math", "Mul64", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE, sys.ArchLoong64) + addF("math/bits", "Add64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2]) +@@ -7450,7 +7450,7 @@ func (s *State) Call(v *ssa.Value) *obj.Prog { + switch Arch.LinkArch.Family { + case sys.AMD64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm: + p.To.Type = obj.TYPE_REG +- case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64: ++ case sys.ARM, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64: + p.To.Type = obj.TYPE_MEM + default: + base.Fatalf("unknown indirect call family") +-- +2.27.0 + diff --git a/0013-cmd-compile-internal-fix-test-error-on-loong64.patch b/0013-cmd-compile-internal-fix-test-error-on-loong64.patch new file mode 100644 index 0000000..c52e96d --- /dev/null +++ b/0013-cmd-compile-internal-fix-test-error-on-loong64.patch @@ -0,0 +1,44 @@ +From 73a5da5b448d7e5eb3e6131dd4958761d4979b0b Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Thu, 25 Nov 2021 14:20:39 +0800 +Subject: [PATCH 13/56] cmd/compile/internal: fix test error on loong64 + +Change-Id: I4ca290bf725425a9a6ac2c6767a5bf4ff2339d0e +--- + src/cmd/compile/internal/logopt/logopt_test.go | 2 +- + src/cmd/compile/internal/test/inl_test.go | 4 ++-- + 2 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/src/cmd/compile/internal/logopt/logopt_test.go b/src/cmd/compile/internal/logopt/logopt_test.go +index 7197617..8131869 100644 +--- a/src/cmd/compile/internal/logopt/logopt_test.go ++++ b/src/cmd/compile/internal/logopt/logopt_test.go +@@ -155,7 +155,7 @@ func s15a8(x *[15]int64) [15]int64 { + arches := []string{runtime.GOARCH} + goos0 := runtime.GOOS + if runtime.GOARCH == "amd64" { // Test many things with "linux" (wasm will get "js") +- arches = []string{"arm", "arm64", "386", "amd64", "mips", "mips64", "ppc64le", "riscv64", "s390x", "wasm"} ++ arches = []string{"arm", "arm64", "386", "amd64", "mips", "mips64", "loong64", "ppc64le", "riscv64", "s390x", "wasm"} + goos0 = "linux" + } + +diff --git a/src/cmd/compile/internal/test/inl_test.go b/src/cmd/compile/internal/test/inl_test.go +index 6f10003..c919125 100644 +--- a/src/cmd/compile/internal/test/inl_test.go ++++ b/src/cmd/compile/internal/test/inl_test.go +@@ -160,10 +160,10 @@ func TestIntendedInlining(t *testing.T) { + }, + } + +- if runtime.GOARCH != "386" && runtime.GOARCH != "mips64" && runtime.GOARCH != "mips64le" && runtime.GOARCH != "riscv64" { ++ if runtime.GOARCH != "386" && runtime.GOARCH != "loong64" && runtime.GOARCH != "mips64" && runtime.GOARCH != "mips64le" && runtime.GOARCH != "riscv64" { + // nextFreeFast calls sys.Ctz64, which on 386 is implemented in asm and is not inlinable. + // We currently don't have midstack inlining so nextFreeFast is also not inlinable on 386. +- // On mips64x and riscv64, Ctz64 is not intrinsified and causes nextFreeFast too expensive ++ // On loong64, mips64x and riscv64, Ctz64 is not intrinsified and causes nextFreeFast too expensive + // to inline (Issue 22239). + want["runtime"] = append(want["runtime"], "nextFreeFast") + } +-- +2.27.0 + diff --git a/0014-cmd-internal-obj-instructions-and-registers-for-loon.patch b/0014-cmd-internal-obj-instructions-and-registers-for-loon.patch new file mode 100644 index 0000000..2841853 --- /dev/null +++ b/0014-cmd-internal-obj-instructions-and-registers-for-loon.patch @@ -0,0 +1,3323 @@ +From f87efce3a177d4a6017200932276f0fc7794e8e8 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 16:01:31 +0800 +Subject: [PATCH 14/56] cmd/internal/obj: instructions and registers for + loong64 + +Change-Id: I930d2a19246496e3ca36d55539183c0f9f650ad9 +--- + src/cmd/internal/obj/link.go | 1 + + src/cmd/internal/obj/loong64/a.out.go | 414 +++++ + src/cmd/internal/obj/loong64/anames.go | 130 ++ + src/cmd/internal/obj/loong64/asm.go | 1960 ++++++++++++++++++++++++ + src/cmd/internal/obj/loong64/cnames.go | 43 + + src/cmd/internal/obj/loong64/list.go | 46 + + src/cmd/internal/obj/loong64/obj.go | 625 ++++++++ + src/cmd/internal/obj/util.go | 19 +- + 8 files changed, 3229 insertions(+), 9 deletions(-) + create mode 100644 src/cmd/internal/obj/loong64/a.out.go + create mode 100644 src/cmd/internal/obj/loong64/anames.go + create mode 100644 src/cmd/internal/obj/loong64/asm.go + create mode 100644 src/cmd/internal/obj/loong64/cnames.go + create mode 100644 src/cmd/internal/obj/loong64/list.go + create mode 100644 src/cmd/internal/obj/loong64/obj.go + +diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go +index 28626e6..957b206 100644 +--- a/src/cmd/internal/obj/link.go ++++ b/src/cmd/internal/obj/link.go +@@ -435,6 +435,7 @@ const ( + ABasePPC64 + ABaseARM64 + ABaseMIPS ++ ABaseLOONG64 + ABaseRISCV + ABaseS390X + ABaseWasm +diff --git a/src/cmd/internal/obj/loong64/a.out.go b/src/cmd/internal/obj/loong64/a.out.go +new file mode 100644 +index 0000000..e3857ea +--- /dev/null ++++ b/src/cmd/internal/obj/loong64/a.out.go +@@ -0,0 +1,414 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package loong64 ++ ++import ( ++ "cmd/internal/obj" ++) ++ ++//go:generate go run ../stringer.go -i $GOFILE -o anames.go -p loong64 ++ ++const ( ++ NSNAME = 8 ++ NSYM = 50 ++ NREG = 32 // number of general registers ++ NFREG = 32 // number of floating point registers ++) ++ ++const ( ++ REG_R0 = obj.RBaseLOONG64 + iota // must be a multiple of 32 ++ REG_R1 ++ REG_R2 ++ REG_R3 ++ REG_R4 ++ REG_R5 ++ REG_R6 ++ REG_R7 ++ REG_R8 ++ REG_R9 ++ REG_R10 ++ REG_R11 ++ REG_R12 ++ REG_R13 ++ REG_R14 ++ REG_R15 ++ REG_R16 ++ REG_R17 ++ REG_R18 ++ REG_R19 ++ REG_R20 ++ REG_R21 ++ REG_R22 ++ REG_R23 ++ REG_R24 ++ REG_R25 ++ REG_R26 ++ REG_R27 ++ REG_R28 ++ REG_R29 ++ REG_R30 ++ REG_R31 ++ ++ REG_F0 // must be a multiple of 32 ++ REG_F1 ++ REG_F2 ++ REG_F3 ++ REG_F4 ++ REG_F5 ++ REG_F6 ++ REG_F7 ++ REG_F8 ++ REG_F9 ++ REG_F10 ++ REG_F11 ++ REG_F12 ++ REG_F13 ++ REG_F14 ++ REG_F15 ++ REG_F16 ++ REG_F17 ++ REG_F18 ++ REG_F19 ++ REG_F20 ++ REG_F21 ++ REG_F22 ++ REG_F23 ++ REG_F24 ++ REG_F25 ++ REG_F26 ++ REG_F27 ++ REG_F28 ++ REG_F29 ++ REG_F30 ++ REG_F31 ++ ++ REG_FCSR0 // must be a multiple of 32 ++ REG_FCSR1 ++ REG_FCSR2 ++ REG_FCSR3 // only four registers are needed ++ REG_FCSR4 ++ REG_FCSR5 ++ REG_FCSR6 ++ REG_FCSR7 ++ REG_FCSR8 ++ REG_FCSR9 ++ REG_FCSR10 ++ REG_FCSR11 ++ REG_FCSR12 ++ REG_FCSR13 ++ REG_FCSR14 ++ REG_FCSR15 ++ REG_FCSR16 ++ REG_FCSR17 ++ REG_FCSR18 ++ REG_FCSR19 ++ REG_FCSR20 ++ REG_FCSR21 ++ REG_FCSR22 ++ REG_FCSR23 ++ REG_FCSR24 ++ REG_FCSR25 ++ REG_FCSR26 ++ REG_FCSR27 ++ REG_FCSR28 ++ REG_FCSR29 ++ REG_FCSR30 ++ REG_FCSR31 ++ ++ REG_FCC0 // must be a multiple of 32 ++ REG_FCC1 ++ REG_FCC2 ++ REG_FCC3 ++ REG_FCC4 ++ REG_FCC5 ++ REG_FCC6 ++ REG_FCC7 // only eight registers are needed ++ REG_FCC8 ++ REG_FCC9 ++ REG_FCC10 ++ REG_FCC11 ++ REG_FCC12 ++ REG_FCC13 ++ REG_FCC14 ++ REG_FCC15 ++ REG_FCC16 ++ REG_FCC17 ++ REG_FCC18 ++ REG_FCC19 ++ REG_FCC20 ++ REG_FCC21 ++ REG_FCC22 ++ REG_FCC23 ++ REG_FCC24 ++ REG_FCC25 ++ REG_FCC26 ++ REG_FCC27 ++ REG_FCC28 ++ REG_FCC29 ++ REG_FCC30 ++ REG_FCC31 ++ ++ REG_LAST = REG_FCC31 // the last defined register ++ ++ REG_SPECIAL = REG_FCSR0 ++ ++ REGZERO = REG_R0 // set to zero ++ REGLINK = REG_R1 ++ REGSP = REG_R3 ++ REGRET = REG_R19 ++ REGARG = -1 // -1 disables passing the first argument in register ++ REGRT1 = REG_R19 // reserved for runtime, duffzero and duffcopy ++ REGRT2 = REG_R20 // reserved for runtime, duffcopy ++ REGCTXT = REG_R29 // context for closures ++ REGG = REG_R22 // G in loong64 ++ REGTMP = REG_R30 // used by the assembler ++ FREGRET = REG_F0 ++) ++ ++var LOONG64DWARFRegisters = map[int16]int16{} ++ ++func init() { ++ // f assigns dwarfregisters[from:to] = (base):(to-from+base) ++ f := func(from, to, base int16) { ++ for r := int16(from); r <= to; r++ { ++ LOONG64DWARFRegisters[r] = (r - from) + base ++ } ++ } ++ f(REG_R0, REG_R31, 0) ++ f(REG_F0, REG_F31, 32) ++ ++} ++ ++const ( ++ BIG = 2046 ++) ++ ++const ( ++ // mark flags ++ LABEL = 1 << 0 ++ LEAF = 1 << 1 ++ SYNC = 1 << 2 ++ BRANCH = 1 << 3 ++) ++ ++const ( ++ C_NONE = iota ++ C_REG ++ C_FREG ++ C_FCSRREG ++ C_FCCREG ++ C_ZCON ++ C_SCON // 12 bit signed ++ C_UCON // 32 bit signed, low 12 bits 0 ++ C_ADD0CON ++ C_AND0CON ++ C_ADDCON // -0x800 <= v < 0 ++ C_ANDCON // 0 < v <= 0xFFF ++ C_LCON // other 32 ++ C_DCON // other 64 (could subdivide further) ++ C_SACON // $n(REG) where n <= int12 ++ C_SECON ++ C_LACON // $n(REG) where int12 < n <= int32 ++ C_LECON ++ C_DACON // $n(REG) where int32 < n ++ C_STCON // $tlsvar ++ C_SBRA ++ C_LBRA ++ C_SAUTO ++ C_LAUTO ++ C_SEXT ++ C_LEXT ++ C_ZOREG ++ C_SOREG ++ C_LOREG ++ C_GOK ++ C_ADDR ++ C_TLS ++ C_TEXTSIZE ++ ++ C_NCLASS // must be the last ++) ++ ++const ( ++ AABSD = obj.ABaseLOONG64 + obj.A_ARCHSPECIFIC + iota ++ AABSF ++ AADD ++ AADDD ++ AADDF ++ AADDU ++ ++ AADDW ++ AAND ++ ABEQ ++ ABGEZ ++ ABLEZ ++ ABGTZ ++ ABLTZ ++ ABFPF ++ ABFPT ++ ++ ABNE ++ ABREAK ++ ACLO ++ ACLZ ++ ++ ACMPEQD ++ ACMPEQF ++ ++ ACMPGED // ACMPGED -> fcmp.sle.d ++ ACMPGEF // ACMPGEF -> fcmp.sle.s ++ ACMPGTD // ACMPGTD -> fcmp.slt.d ++ ACMPGTF // ACMPGTF -> fcmp.slt.s ++ ++ ALU12IW ++ ALU32ID ++ ALU52ID ++ APCADDU12I ++ AJIRL ++ ABGE ++ ABLT ++ ABLTU ++ ABGEU ++ ++ ADIV ++ ADIVD ++ ADIVF ++ ADIVU ++ ADIVW ++ ++ ALL ++ ALLV ++ ++ ALUI ++ ++ AMOVB ++ AMOVBU ++ ++ AMOVD ++ AMOVDF ++ AMOVDW ++ AMOVF ++ AMOVFD ++ AMOVFW ++ ++ AMOVH ++ AMOVHU ++ AMOVW ++ ++ AMOVWD ++ AMOVWF ++ ++ AMOVWL ++ AMOVWR ++ ++ AMUL ++ AMULD ++ AMULF ++ AMULU ++ AMULH ++ AMULHU ++ AMULW ++ ANEGD ++ ANEGF ++ ++ ANEGW ++ ANEGV ++ ++ ANOOP // hardware nop ++ ANOR ++ AOR ++ AREM ++ AREMU ++ ++ ARFE ++ ++ ASC ++ ASCV ++ ++ ASGT ++ ASGTU ++ ++ ASLL ++ ASQRTD ++ ASQRTF ++ ASRA ++ ASRL ++ ASUB ++ ASUBD ++ ASUBF ++ ++ ASUBU ++ ASUBW ++ ADBAR ++ ASYSCALL ++ ++ ATEQ ++ ATNE ++ ++ AWORD ++ ++ AXOR ++ ++ // 64-bit ++ AMOVV ++ AMOVVL ++ AMOVVR ++ ++ ASLLV ++ ASRAV ++ ASRLV ++ ADIVV ++ ADIVVU ++ ++ AREMV ++ AREMVU ++ ++ AMULV ++ AMULVU ++ AMULHV ++ AMULHVU ++ AADDV ++ AADDVU ++ ASUBV ++ ASUBVU ++ ++ // 64-bit FP ++ ATRUNCFV ++ ATRUNCDV ++ ATRUNCFW ++ ATRUNCDW ++ ++ AMOVWU ++ AMOVFV ++ AMOVDV ++ AMOVVF ++ AMOVVD ++ ++ ALAST ++ ++ // aliases ++ AJMP = obj.AJMP ++ AJAL = obj.ACALL ++ ARET = obj.ARET ++) ++ ++func init() { ++ // The asm encoder generally assumes that the lowest 5 bits of the ++ // REG_XX constants match the machine instruction encoding, i.e. ++ // the lowest 5 bits is the register number. ++ // Check this here. ++ if REG_R0%32 != 0 { ++ panic("REG_R0 is not a multiple of 32") ++ } ++ if REG_F0%32 != 0 { ++ panic("REG_F0 is not a multiple of 32") ++ } ++ if REG_FCSR0%32 != 0 { ++ panic("REG_FCSR0 is not a multiple of 32") ++ } ++ if REG_FCC0%32 != 0 { ++ panic("REG_FCC0 is not a multiple of 32") ++ } ++} +diff --git a/src/cmd/internal/obj/loong64/anames.go b/src/cmd/internal/obj/loong64/anames.go +new file mode 100644 +index 0000000..48d8a78 +--- /dev/null ++++ b/src/cmd/internal/obj/loong64/anames.go +@@ -0,0 +1,130 @@ ++// Code generated by stringer -i a.out.go -o anames.go -p loong64; DO NOT EDIT. ++ ++package loong64 ++ ++import "cmd/internal/obj" ++ ++var Anames = []string{ ++ obj.A_ARCHSPECIFIC: "ABSD", ++ "ABSF", ++ "ADD", ++ "ADDD", ++ "ADDF", ++ "ADDU", ++ "ADDW", ++ "AND", ++ "BEQ", ++ "BGEZ", ++ "BLEZ", ++ "BGTZ", ++ "BLTZ", ++ "BFPF", ++ "BFPT", ++ "BNE", ++ "BREAK", ++ "CLO", ++ "CLZ", ++ "CMPEQD", ++ "CMPEQF", ++ "CMPGED", ++ "CMPGEF", ++ "CMPGTD", ++ "CMPGTF", ++ "LU12IW", ++ "LU32ID", ++ "LU52ID", ++ "PCADDU12I", ++ "JIRL", ++ "BGE", ++ "BLT", ++ "BLTU", ++ "BGEU", ++ "DIV", ++ "DIVD", ++ "DIVF", ++ "DIVU", ++ "DIVW", ++ "LL", ++ "LLV", ++ "LUI", ++ "MOVB", ++ "MOVBU", ++ "MOVD", ++ "MOVDF", ++ "MOVDW", ++ "MOVF", ++ "MOVFD", ++ "MOVFW", ++ "MOVH", ++ "MOVHU", ++ "MOVW", ++ "MOVWD", ++ "MOVWF", ++ "MOVWL", ++ "MOVWR", ++ "MUL", ++ "MULD", ++ "MULF", ++ "MULU", ++ "MULH", ++ "MULHU", ++ "MULW", ++ "NEGD", ++ "NEGF", ++ "NEGW", ++ "NEGV", ++ "NOOP", ++ "NOR", ++ "OR", ++ "REM", ++ "REMU", ++ "RFE", ++ "SC", ++ "SCV", ++ "SGT", ++ "SGTU", ++ "SLL", ++ "SQRTD", ++ "SQRTF", ++ "SRA", ++ "SRL", ++ "SUB", ++ "SUBD", ++ "SUBF", ++ "SUBU", ++ "SUBW", ++ "DBAR", ++ "SYSCALL", ++ "TEQ", ++ "TNE", ++ "WORD", ++ "XOR", ++ "MOVV", ++ "MOVVL", ++ "MOVVR", ++ "SLLV", ++ "SRAV", ++ "SRLV", ++ "DIVV", ++ "DIVVU", ++ "REMV", ++ "REMVU", ++ "MULV", ++ "MULVU", ++ "MULHV", ++ "MULHVU", ++ "ADDV", ++ "ADDVU", ++ "SUBV", ++ "SUBVU", ++ "TRUNCFV", ++ "TRUNCDV", ++ "TRUNCFW", ++ "TRUNCDW", ++ "MOVWU", ++ "MOVFV", ++ "MOVDV", ++ "MOVVF", ++ "MOVVD", ++ "LAST", ++} +diff --git a/src/cmd/internal/obj/loong64/asm.go b/src/cmd/internal/obj/loong64/asm.go +new file mode 100644 +index 0000000..345366f +--- /dev/null ++++ b/src/cmd/internal/obj/loong64/asm.go +@@ -0,0 +1,1960 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package loong64 ++ ++import ( ++ "cmd/internal/obj" ++ "cmd/internal/objabi" ++ "cmd/internal/sys" ++ "fmt" ++ "log" ++ "sort" ++) ++ ++// ctxt0 holds state while assembling a single function. ++// Each function gets a fresh ctxt0. ++// This allows for multiple functions to be safely concurrently assembled. ++type ctxt0 struct { ++ ctxt *obj.Link ++ newprog obj.ProgAlloc ++ cursym *obj.LSym ++ autosize int32 ++ instoffset int64 ++ pc int64 ++} ++ ++// Instruction layout. ++ ++const ( ++ FuncAlign = 4 ++) ++ ++type Optab struct { ++ as obj.As ++ a1 uint8 ++ a2 uint8 ++ a3 uint8 ++ type_ int8 ++ size int8 ++ param int16 ++ family sys.ArchFamily ++ flag uint8 ++} ++ ++const ( ++ NOTUSETMP = 1 << iota // p expands to multiple instructions, but does NOT use REGTMP ++) ++ ++var optab = []Optab{ ++ {obj.ATEXT, C_ADDR, C_NONE, C_TEXTSIZE, 0, 0, 0, 0, 0}, ++ ++ {AMOVW, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0}, ++ {AMOVV, C_REG, C_NONE, C_REG, 1, 4, 0, sys.Loong64, 0}, ++ {AMOVB, C_REG, C_NONE, C_REG, 12, 8, 0, 0, NOTUSETMP}, ++ {AMOVBU, C_REG, C_NONE, C_REG, 13, 4, 0, 0, 0}, ++ {AMOVWU, C_REG, C_NONE, C_REG, 14, 8, 0, sys.Loong64, NOTUSETMP}, ++ ++ {ASUB, C_REG, C_REG, C_REG, 2, 4, 0, 0, 0}, ++ {ASUBV, C_REG, C_REG, C_REG, 2, 4, 0, sys.Loong64, 0}, ++ {AADD, C_REG, C_REG, C_REG, 2, 4, 0, 0, 0}, ++ {AADDV, C_REG, C_REG, C_REG, 2, 4, 0, sys.Loong64, 0}, ++ {AAND, C_REG, C_REG, C_REG, 2, 4, 0, 0, 0}, ++ {ASUB, C_REG, C_NONE, C_REG, 2, 4, 0, 0, 0}, ++ {ASUBV, C_REG, C_NONE, C_REG, 2, 4, 0, sys.Loong64, 0}, ++ {AADD, C_REG, C_NONE, C_REG, 2, 4, 0, 0, 0}, ++ {AADDV, C_REG, C_NONE, C_REG, 2, 4, 0, sys.Loong64, 0}, ++ {AAND, C_REG, C_NONE, C_REG, 2, 4, 0, 0, 0}, ++ {ANEGW, C_REG, C_NONE, C_REG, 2, 4, 0, 0, 0}, ++ {ANEGV, C_REG, C_NONE, C_REG, 2, 4, 0, sys.Loong64, 0}, ++ ++ {ASLL, C_REG, C_NONE, C_REG, 9, 4, 0, 0, 0}, ++ {ASLL, C_REG, C_REG, C_REG, 9, 4, 0, 0, 0}, ++ {ASLLV, C_REG, C_NONE, C_REG, 9, 4, 0, sys.Loong64, 0}, ++ {ASLLV, C_REG, C_REG, C_REG, 9, 4, 0, sys.Loong64, 0}, ++ {ACLO, C_REG, C_NONE, C_REG, 9, 4, 0, 0, 0}, ++ ++ {AADDF, C_FREG, C_NONE, C_FREG, 32, 4, 0, 0, 0}, ++ {AADDF, C_FREG, C_REG, C_FREG, 32, 4, 0, 0, 0}, ++ {ACMPEQF, C_FREG, C_REG, C_NONE, 32, 4, 0, 0, 0}, ++ {AABSF, C_FREG, C_NONE, C_FREG, 33, 4, 0, 0, 0}, ++ {AMOVVF, C_FREG, C_NONE, C_FREG, 33, 4, 0, sys.Loong64, 0}, ++ {AMOVF, C_FREG, C_NONE, C_FREG, 33, 4, 0, 0, 0}, ++ {AMOVD, C_FREG, C_NONE, C_FREG, 33, 4, 0, 0, 0}, ++ ++ {AMOVW, C_REG, C_NONE, C_SEXT, 7, 4, 0, sys.Loong64, 0}, ++ {AMOVWU, C_REG, C_NONE, C_SEXT, 7, 4, 0, sys.Loong64, 0}, ++ {AMOVV, C_REG, C_NONE, C_SEXT, 7, 4, 0, sys.Loong64, 0}, ++ {AMOVB, C_REG, C_NONE, C_SEXT, 7, 4, 0, sys.Loong64, 0}, ++ {AMOVBU, C_REG, C_NONE, C_SEXT, 7, 4, 0, sys.Loong64, 0}, ++ {AMOVWL, C_REG, C_NONE, C_SEXT, 7, 4, 0, sys.Loong64, 0}, ++ {AMOVVL, C_REG, C_NONE, C_SEXT, 7, 4, 0, sys.Loong64, 0}, ++ {AMOVW, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, 0, 0}, ++ {AMOVWU, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, sys.Loong64, 0}, ++ {AMOVV, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, sys.Loong64, 0}, ++ {AMOVB, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, 0, 0}, ++ {AMOVBU, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, 0, 0}, ++ {AMOVWL, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, 0, 0}, ++ {AMOVVL, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, sys.Loong64, 0}, ++ {AMOVW, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0}, ++ {AMOVWU, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.Loong64, 0}, ++ {AMOVV, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.Loong64, 0}, ++ {AMOVB, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0}, ++ {AMOVBU, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0}, ++ {AMOVWL, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0}, ++ {AMOVVL, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.Loong64, 0}, ++ {ASC, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, 0, 0}, ++ {ASCV, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.Loong64, 0}, ++ ++ {AMOVW, C_SEXT, C_NONE, C_REG, 8, 4, 0, sys.Loong64, 0}, ++ {AMOVWU, C_SEXT, C_NONE, C_REG, 8, 4, 0, sys.Loong64, 0}, ++ {AMOVV, C_SEXT, C_NONE, C_REG, 8, 4, 0, sys.Loong64, 0}, ++ {AMOVB, C_SEXT, C_NONE, C_REG, 8, 4, 0, sys.Loong64, 0}, ++ {AMOVBU, C_SEXT, C_NONE, C_REG, 8, 4, 0, sys.Loong64, 0}, ++ {AMOVWL, C_SEXT, C_NONE, C_REG, 8, 4, 0, sys.Loong64, 0}, ++ {AMOVVL, C_SEXT, C_NONE, C_REG, 8, 4, 0, sys.Loong64, 0}, ++ {AMOVW, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, 0, 0}, ++ {AMOVWU, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, sys.Loong64, 0}, ++ {AMOVV, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, sys.Loong64, 0}, ++ {AMOVB, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, 0, 0}, ++ {AMOVBU, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, 0, 0}, ++ {AMOVWL, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, 0, 0}, ++ {AMOVVL, C_SAUTO, C_NONE, C_REG, 8, 4, REGSP, sys.Loong64, 0}, ++ {AMOVW, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0}, ++ {AMOVWU, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, sys.Loong64, 0}, ++ {AMOVV, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, sys.Loong64, 0}, ++ {AMOVB, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0}, ++ {AMOVBU, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0}, ++ {AMOVWL, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0}, ++ {AMOVVL, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, sys.Loong64, 0}, ++ {ALL, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0}, ++ {ALLV, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, sys.Loong64, 0}, ++ ++ {AMOVW, C_REG, C_NONE, C_LEXT, 35, 12, 0, sys.Loong64, 0}, ++ {AMOVWU, C_REG, C_NONE, C_LEXT, 35, 12, 0, sys.Loong64, 0}, ++ {AMOVV, C_REG, C_NONE, C_LEXT, 35, 12, 0, sys.Loong64, 0}, ++ {AMOVB, C_REG, C_NONE, C_LEXT, 35, 12, 0, sys.Loong64, 0}, ++ {AMOVBU, C_REG, C_NONE, C_LEXT, 35, 12, 0, sys.Loong64, 0}, ++ {AMOVW, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, 0, 0}, ++ {AMOVWU, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, sys.Loong64, 0}, ++ {AMOVV, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, sys.Loong64, 0}, ++ {AMOVB, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, 0, 0}, ++ {AMOVBU, C_REG, C_NONE, C_LAUTO, 35, 12, REGSP, 0, 0}, ++ {AMOVW, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, 0, 0}, ++ {AMOVWU, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, sys.Loong64, 0}, ++ {AMOVV, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, sys.Loong64, 0}, ++ {AMOVB, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, 0, 0}, ++ {AMOVBU, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, 0, 0}, ++ {ASC, C_REG, C_NONE, C_LOREG, 35, 12, REGZERO, 0, 0}, ++ {AMOVW, C_REG, C_NONE, C_ADDR, 50, 8, 0, 0, 0}, ++ {AMOVW, C_REG, C_NONE, C_ADDR, 50, 8, 0, sys.Loong64, 0}, ++ {AMOVWU, C_REG, C_NONE, C_ADDR, 50, 8, 0, sys.Loong64, 0}, ++ {AMOVV, C_REG, C_NONE, C_ADDR, 50, 8, 0, sys.Loong64, 0}, ++ {AMOVB, C_REG, C_NONE, C_ADDR, 50, 8, 0, 0, 0}, ++ {AMOVB, C_REG, C_NONE, C_ADDR, 50, 8, 0, sys.Loong64, 0}, ++ {AMOVBU, C_REG, C_NONE, C_ADDR, 50, 8, 0, 0, 0}, ++ {AMOVBU, C_REG, C_NONE, C_ADDR, 50, 8, 0, sys.Loong64, 0}, ++ {AMOVW, C_REG, C_NONE, C_TLS, 53, 16, 0, 0, 0}, ++ {AMOVWU, C_REG, C_NONE, C_TLS, 53, 16, 0, sys.Loong64, 0}, ++ {AMOVV, C_REG, C_NONE, C_TLS, 53, 16, 0, sys.Loong64, 0}, ++ {AMOVB, C_REG, C_NONE, C_TLS, 53, 16, 0, 0, 0}, ++ {AMOVBU, C_REG, C_NONE, C_TLS, 53, 16, 0, 0, 0}, ++ ++ {AMOVW, C_LEXT, C_NONE, C_REG, 36, 12, 0, sys.Loong64, 0}, ++ {AMOVWU, C_LEXT, C_NONE, C_REG, 36, 12, 0, sys.Loong64, 0}, ++ {AMOVV, C_LEXT, C_NONE, C_REG, 36, 12, 0, sys.Loong64, 0}, ++ {AMOVB, C_LEXT, C_NONE, C_REG, 36, 12, 0, sys.Loong64, 0}, ++ {AMOVBU, C_LEXT, C_NONE, C_REG, 36, 12, 0, sys.Loong64, 0}, ++ {AMOVW, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, 0, 0}, ++ {AMOVWU, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, sys.Loong64, 0}, ++ {AMOVV, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, sys.Loong64, 0}, ++ {AMOVB, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, 0, 0}, ++ {AMOVBU, C_LAUTO, C_NONE, C_REG, 36, 12, REGSP, 0, 0}, ++ {AMOVW, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, 0, 0}, ++ {AMOVWU, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, sys.Loong64, 0}, ++ {AMOVV, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, sys.Loong64, 0}, ++ {AMOVB, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, 0, 0}, ++ {AMOVBU, C_LOREG, C_NONE, C_REG, 36, 12, REGZERO, 0, 0}, ++ {AMOVW, C_ADDR, C_NONE, C_REG, 51, 8, 0, 0, 0}, ++ {AMOVW, C_ADDR, C_NONE, C_REG, 51, 8, 0, sys.Loong64, 0}, ++ {AMOVWU, C_ADDR, C_NONE, C_REG, 51, 8, 0, sys.Loong64, 0}, ++ {AMOVV, C_ADDR, C_NONE, C_REG, 51, 8, 0, sys.Loong64, 0}, ++ {AMOVB, C_ADDR, C_NONE, C_REG, 51, 8, 0, 0, 0}, ++ {AMOVB, C_ADDR, C_NONE, C_REG, 51, 8, 0, sys.Loong64, 0}, ++ {AMOVBU, C_ADDR, C_NONE, C_REG, 51, 8, 0, 0, 0}, ++ {AMOVBU, C_ADDR, C_NONE, C_REG, 51, 8, 0, sys.Loong64, 0}, ++ {AMOVW, C_TLS, C_NONE, C_REG, 54, 16, 0, 0, 0}, ++ {AMOVWU, C_TLS, C_NONE, C_REG, 54, 16, 0, sys.Loong64, 0}, ++ {AMOVV, C_TLS, C_NONE, C_REG, 54, 16, 0, sys.Loong64, 0}, ++ {AMOVB, C_TLS, C_NONE, C_REG, 54, 16, 0, 0, 0}, ++ {AMOVBU, C_TLS, C_NONE, C_REG, 54, 16, 0, 0, 0}, ++ ++ {AMOVW, C_SECON, C_NONE, C_REG, 3, 4, 0, sys.Loong64, 0}, ++ {AMOVV, C_SECON, C_NONE, C_REG, 3, 4, 0, sys.Loong64, 0}, ++ {AMOVW, C_SACON, C_NONE, C_REG, 3, 4, REGSP, 0, 0}, ++ {AMOVV, C_SACON, C_NONE, C_REG, 3, 4, REGSP, sys.Loong64, 0}, ++ {AMOVW, C_LECON, C_NONE, C_REG, 52, 8, 0, 0, NOTUSETMP}, ++ {AMOVW, C_LECON, C_NONE, C_REG, 52, 8, 0, sys.Loong64, NOTUSETMP}, ++ {AMOVV, C_LECON, C_NONE, C_REG, 52, 8, 0, sys.Loong64, NOTUSETMP}, ++ ++ {AMOVW, C_LACON, C_NONE, C_REG, 26, 12, REGSP, 0, 0}, ++ {AMOVV, C_LACON, C_NONE, C_REG, 26, 12, REGSP, sys.Loong64, 0}, ++ {AMOVW, C_ADDCON, C_NONE, C_REG, 3, 4, REGZERO, 0, 0}, ++ {AMOVV, C_ADDCON, C_NONE, C_REG, 3, 4, REGZERO, sys.Loong64, 0}, ++ {AMOVW, C_ANDCON, C_NONE, C_REG, 3, 4, REGZERO, 0, 0}, ++ {AMOVV, C_ANDCON, C_NONE, C_REG, 3, 4, REGZERO, sys.Loong64, 0}, ++ {AMOVW, C_STCON, C_NONE, C_REG, 55, 12, 0, 0, 0}, ++ {AMOVV, C_STCON, C_NONE, C_REG, 55, 12, 0, sys.Loong64, 0}, ++ ++ {AMOVW, C_UCON, C_NONE, C_REG, 24, 4, 0, 0, 0}, ++ {AMOVV, C_UCON, C_NONE, C_REG, 24, 4, 0, sys.Loong64, 0}, ++ {AMOVW, C_LCON, C_NONE, C_REG, 19, 8, 0, 0, NOTUSETMP}, ++ {AMOVV, C_LCON, C_NONE, C_REG, 19, 8, 0, sys.Loong64, NOTUSETMP}, ++ {AMOVV, C_DCON, C_NONE, C_REG, 59, 16, 0, sys.Loong64, NOTUSETMP}, ++ ++ {AMUL, C_REG, C_NONE, C_REG, 2, 4, 0, 0, 0}, ++ {AMUL, C_REG, C_REG, C_REG, 2, 4, 0, 0, 0}, ++ {AMULV, C_REG, C_NONE, C_REG, 2, 4, 0, sys.Loong64, 0}, ++ {AMULV, C_REG, C_REG, C_REG, 2, 4, 0, sys.Loong64, 0}, ++ ++ {AADD, C_ADD0CON, C_REG, C_REG, 4, 4, 0, 0, 0}, ++ {AADD, C_ADD0CON, C_NONE, C_REG, 4, 4, 0, 0, 0}, ++ {AADD, C_ANDCON, C_REG, C_REG, 10, 8, 0, 0, 0}, ++ {AADD, C_ANDCON, C_NONE, C_REG, 10, 8, 0, 0, 0}, ++ ++ {AADDV, C_ADD0CON, C_REG, C_REG, 4, 4, 0, sys.Loong64, 0}, ++ {AADDV, C_ADD0CON, C_NONE, C_REG, 4, 4, 0, sys.Loong64, 0}, ++ {AADDV, C_ANDCON, C_REG, C_REG, 10, 8, 0, sys.Loong64, 0}, ++ {AADDV, C_ANDCON, C_NONE, C_REG, 10, 8, 0, sys.Loong64, 0}, ++ ++ {AAND, C_AND0CON, C_REG, C_REG, 4, 4, 0, 0, 0}, ++ {AAND, C_AND0CON, C_NONE, C_REG, 4, 4, 0, 0, 0}, ++ {AAND, C_ADDCON, C_REG, C_REG, 10, 8, 0, 0, 0}, ++ {AAND, C_ADDCON, C_NONE, C_REG, 10, 8, 0, 0, 0}, ++ ++ {AADD, C_UCON, C_REG, C_REG, 25, 8, 0, 0, 0}, ++ {AADD, C_UCON, C_NONE, C_REG, 25, 8, 0, 0, 0}, ++ {AADDV, C_UCON, C_REG, C_REG, 25, 8, 0, sys.Loong64, 0}, ++ {AADDV, C_UCON, C_NONE, C_REG, 25, 8, 0, sys.Loong64, 0}, ++ {AAND, C_UCON, C_REG, C_REG, 25, 8, 0, 0, 0}, ++ {AAND, C_UCON, C_NONE, C_REG, 25, 8, 0, 0, 0}, ++ ++ {AADD, C_LCON, C_NONE, C_REG, 23, 12, 0, 0, 0}, ++ {AADDV, C_LCON, C_NONE, C_REG, 23, 12, 0, sys.Loong64, 0}, ++ {AAND, C_LCON, C_NONE, C_REG, 23, 12, 0, 0, 0}, ++ {AADD, C_LCON, C_REG, C_REG, 23, 12, 0, 0, 0}, ++ {AADDV, C_LCON, C_REG, C_REG, 23, 12, 0, sys.Loong64, 0}, ++ {AAND, C_LCON, C_REG, C_REG, 23, 12, 0, 0, 0}, ++ ++ {AADDV, C_DCON, C_NONE, C_REG, 60, 20, 0, sys.Loong64, 0}, ++ {AADDV, C_DCON, C_REG, C_REG, 60, 20, 0, sys.Loong64, 0}, ++ ++ {ASLL, C_SCON, C_REG, C_REG, 16, 4, 0, 0, 0}, ++ {ASLL, C_SCON, C_NONE, C_REG, 16, 4, 0, 0, 0}, ++ ++ {ASLLV, C_SCON, C_REG, C_REG, 16, 4, 0, sys.Loong64, 0}, ++ {ASLLV, C_SCON, C_NONE, C_REG, 16, 4, 0, sys.Loong64, 0}, ++ ++ {ASYSCALL, C_NONE, C_NONE, C_NONE, 5, 4, 0, 0, 0}, ++ ++ {ABEQ, C_REG, C_REG, C_SBRA, 6, 4, 0, 0, 0}, ++ {ABEQ, C_REG, C_NONE, C_SBRA, 6, 4, 0, 0, 0}, ++ {ABLEZ, C_REG, C_NONE, C_SBRA, 6, 4, 0, 0, 0}, ++ {ABFPT, C_NONE, C_NONE, C_SBRA, 6, 4, 0, 0, NOTUSETMP}, ++ ++ {AJMP, C_NONE, C_NONE, C_LBRA, 11, 4, 0, 0, 0}, // b ++ {AJAL, C_NONE, C_NONE, C_LBRA, 11, 4, 0, 0, 0}, // bl ++ ++ {AJMP, C_NONE, C_NONE, C_ZOREG, 18, 4, REGZERO, 0, 0}, // jirl r0, rj, 0 ++ {AJAL, C_NONE, C_NONE, C_ZOREG, 18, 4, REGLINK, 0, 0}, // jirl r1, rj, 0 ++ ++ {AMOVW, C_SEXT, C_NONE, C_FREG, 27, 4, 0, sys.Loong64, 0}, ++ {AMOVF, C_SEXT, C_NONE, C_FREG, 27, 4, 0, sys.Loong64, 0}, ++ {AMOVD, C_SEXT, C_NONE, C_FREG, 27, 4, 0, sys.Loong64, 0}, ++ {AMOVW, C_SAUTO, C_NONE, C_FREG, 27, 4, REGSP, sys.Loong64, 0}, ++ {AMOVF, C_SAUTO, C_NONE, C_FREG, 27, 4, REGSP, 0, 0}, ++ {AMOVD, C_SAUTO, C_NONE, C_FREG, 27, 4, REGSP, 0, 0}, ++ {AMOVW, C_SOREG, C_NONE, C_FREG, 27, 4, REGZERO, sys.Loong64, 0}, ++ {AMOVF, C_SOREG, C_NONE, C_FREG, 27, 4, REGZERO, 0, 0}, ++ {AMOVD, C_SOREG, C_NONE, C_FREG, 27, 4, REGZERO, 0, 0}, ++ ++ {AMOVW, C_LEXT, C_NONE, C_FREG, 27, 12, 0, sys.Loong64, 0}, ++ {AMOVF, C_LEXT, C_NONE, C_FREG, 27, 12, 0, sys.Loong64, 0}, ++ {AMOVD, C_LEXT, C_NONE, C_FREG, 27, 12, 0, sys.Loong64, 0}, ++ {AMOVW, C_LAUTO, C_NONE, C_FREG, 27, 12, REGSP, sys.Loong64, 0}, ++ {AMOVF, C_LAUTO, C_NONE, C_FREG, 27, 12, REGSP, 0, 0}, ++ {AMOVD, C_LAUTO, C_NONE, C_FREG, 27, 12, REGSP, 0, 0}, ++ {AMOVW, C_LOREG, C_NONE, C_FREG, 27, 12, REGZERO, sys.Loong64, 0}, ++ {AMOVF, C_LOREG, C_NONE, C_FREG, 27, 12, REGZERO, 0, 0}, ++ {AMOVD, C_LOREG, C_NONE, C_FREG, 27, 12, REGZERO, 0, 0}, ++ {AMOVF, C_ADDR, C_NONE, C_FREG, 51, 8, 0, 0, 0}, ++ {AMOVF, C_ADDR, C_NONE, C_FREG, 51, 8, 0, sys.Loong64, 0}, ++ {AMOVD, C_ADDR, C_NONE, C_FREG, 51, 8, 0, 0, 0}, ++ {AMOVD, C_ADDR, C_NONE, C_FREG, 51, 8, 0, sys.Loong64, 0}, ++ ++ {AMOVW, C_FREG, C_NONE, C_SEXT, 28, 4, 0, sys.Loong64, 0}, ++ {AMOVF, C_FREG, C_NONE, C_SEXT, 28, 4, 0, sys.Loong64, 0}, ++ {AMOVD, C_FREG, C_NONE, C_SEXT, 28, 4, 0, sys.Loong64, 0}, ++ {AMOVW, C_FREG, C_NONE, C_SAUTO, 28, 4, REGSP, sys.Loong64, 0}, ++ {AMOVF, C_FREG, C_NONE, C_SAUTO, 28, 4, REGSP, 0, 0}, ++ {AMOVD, C_FREG, C_NONE, C_SAUTO, 28, 4, REGSP, 0, 0}, ++ {AMOVW, C_FREG, C_NONE, C_SOREG, 28, 4, REGZERO, sys.Loong64, 0}, ++ {AMOVF, C_FREG, C_NONE, C_SOREG, 28, 4, REGZERO, 0, 0}, ++ {AMOVD, C_FREG, C_NONE, C_SOREG, 28, 4, REGZERO, 0, 0}, ++ ++ {AMOVW, C_FREG, C_NONE, C_LEXT, 28, 12, 0, sys.Loong64, 0}, ++ {AMOVF, C_FREG, C_NONE, C_LEXT, 28, 12, 0, sys.Loong64, 0}, ++ {AMOVD, C_FREG, C_NONE, C_LEXT, 28, 12, 0, sys.Loong64, 0}, ++ {AMOVW, C_FREG, C_NONE, C_LAUTO, 28, 12, REGSP, sys.Loong64, 0}, ++ {AMOVF, C_FREG, C_NONE, C_LAUTO, 28, 12, REGSP, 0, 0}, ++ {AMOVD, C_FREG, C_NONE, C_LAUTO, 28, 12, REGSP, 0, 0}, ++ {AMOVW, C_FREG, C_NONE, C_LOREG, 28, 12, REGZERO, sys.Loong64, 0}, ++ {AMOVF, C_FREG, C_NONE, C_LOREG, 28, 12, REGZERO, 0, 0}, ++ {AMOVD, C_FREG, C_NONE, C_LOREG, 28, 12, REGZERO, 0, 0}, ++ {AMOVF, C_FREG, C_NONE, C_ADDR, 50, 8, 0, 0, 0}, ++ {AMOVF, C_FREG, C_NONE, C_ADDR, 50, 8, 0, sys.Loong64, 0}, ++ {AMOVD, C_FREG, C_NONE, C_ADDR, 50, 8, 0, 0, 0}, ++ {AMOVD, C_FREG, C_NONE, C_ADDR, 50, 8, 0, sys.Loong64, 0}, ++ ++ {AMOVW, C_REG, C_NONE, C_FREG, 30, 4, 0, 0, 0}, ++ {AMOVW, C_FREG, C_NONE, C_REG, 31, 4, 0, 0, 0}, ++ {AMOVV, C_REG, C_NONE, C_FREG, 47, 4, 0, sys.Loong64, 0}, ++ {AMOVV, C_FREG, C_NONE, C_REG, 48, 4, 0, sys.Loong64, 0}, ++ ++ {AMOVW, C_ADDCON, C_NONE, C_FREG, 34, 8, 0, sys.Loong64, 0}, ++ {AMOVW, C_ANDCON, C_NONE, C_FREG, 34, 8, 0, sys.Loong64, 0}, ++ ++ {AWORD, C_LCON, C_NONE, C_NONE, 40, 4, 0, 0, 0}, ++ {AWORD, C_DCON, C_NONE, C_NONE, 61, 4, 0, 0, 0}, ++ ++ {ATEQ, C_SCON, C_REG, C_REG, 15, 8, 0, 0, 0}, ++ {ATEQ, C_SCON, C_NONE, C_REG, 15, 8, 0, 0, 0}, ++ ++ {ABREAK, C_REG, C_NONE, C_SEXT, 7, 4, 0, sys.Loong64, 0}, // really CACHE instruction ++ {ABREAK, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, sys.Loong64, 0}, ++ {ABREAK, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.Loong64, 0}, ++ {ABREAK, C_NONE, C_NONE, C_NONE, 5, 4, 0, 0, 0}, ++ ++ {obj.AUNDEF, C_NONE, C_NONE, C_NONE, 49, 4, 0, 0, 0}, ++ {obj.APCDATA, C_LCON, C_NONE, C_LCON, 0, 0, 0, 0, 0}, ++ {obj.APCDATA, C_DCON, C_NONE, C_DCON, 0, 0, 0, 0, 0}, ++ {obj.AFUNCDATA, C_SCON, C_NONE, C_ADDR, 0, 0, 0, 0, 0}, ++ {obj.ANOP, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0}, ++ {obj.ANOP, C_LCON, C_NONE, C_NONE, 0, 0, 0, 0, 0}, // nop variants, see #40689 ++ {obj.ANOP, C_DCON, C_NONE, C_NONE, 0, 0, 0, 0, 0}, // nop variants, see #40689 ++ {obj.ANOP, C_REG, C_NONE, C_NONE, 0, 0, 0, 0, 0}, ++ {obj.ANOP, C_FREG, C_NONE, C_NONE, 0, 0, 0, 0, 0}, ++ {obj.ADUFFZERO, C_NONE, C_NONE, C_LBRA, 11, 4, 0, 0, 0}, // same as AJMP ++ {obj.ADUFFCOPY, C_NONE, C_NONE, C_LBRA, 11, 4, 0, 0, 0}, // same as AJMP ++ ++ {obj.AXXX, C_NONE, C_NONE, C_NONE, 0, 4, 0, 0, 0}, ++} ++ ++var oprange [ALAST & obj.AMask][]Optab ++ ++var xcmp [C_NCLASS][C_NCLASS]bool ++ ++func span0(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ++ if ctxt.Retpoline { ++ ctxt.Diag("-spectre=ret not supported on loong64") ++ ctxt.Retpoline = false // don't keep printing ++ } ++ ++ p := cursym.Func().Text ++ if p == nil || p.Link == nil { // handle external functions and ELF section symbols ++ return ++ } ++ ++ c := ctxt0{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset + ctxt.FixedFrameSize())} ++ ++ if oprange[AOR&obj.AMask] == nil { ++ c.ctxt.Diag("loong64 ops not initialized, call loong64.buildop first") ++ } ++ ++ pc := int64(0) ++ p.Pc = pc ++ ++ var m int ++ var o *Optab ++ for p = p.Link; p != nil; p = p.Link { ++ p.Pc = pc ++ o = c.oplook(p) ++ m = int(o.size) ++ if m == 0 { ++ if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA { ++ c.ctxt.Diag("zero-width instruction\n%v", p) ++ } ++ continue ++ } ++ ++ pc += int64(m) ++ } ++ ++ c.cursym.Size = pc ++ ++ /* ++ * if any procedure is large enough to ++ * generate a large SBRA branch, then ++ * generate extra passes putting branches ++ * around jmps to fix. this is rare. ++ */ ++ bflag := 1 ++ ++ var otxt int64 ++ var q *obj.Prog ++ for bflag != 0 { ++ bflag = 0 ++ pc = 0 ++ for p = c.cursym.Func().Text.Link; p != nil; p = p.Link { ++ p.Pc = pc ++ o = c.oplook(p) ++ ++ // very large conditional branches ++ if o.type_ == 6 && p.To.Target() != nil { ++ otxt = p.To.Target().Pc - pc ++ if otxt < -(1<<17)+10 || otxt >= (1<<17)-10 { ++ q = c.newprog() ++ q.Link = p.Link ++ p.Link = q ++ q.As = AJMP ++ q.Pos = p.Pos ++ q.To.Type = obj.TYPE_BRANCH ++ q.To.SetTarget(p.To.Target()) ++ p.To.SetTarget(q) ++ q = c.newprog() ++ q.Link = p.Link ++ p.Link = q ++ q.As = AJMP ++ q.Pos = p.Pos ++ q.To.Type = obj.TYPE_BRANCH ++ q.To.SetTarget(q.Link.Link) ++ ++ c.addnop(p.Link) ++ c.addnop(p) ++ bflag = 1 ++ } ++ } ++ ++ m = int(o.size) ++ if m == 0 { ++ if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA { ++ c.ctxt.Diag("zero-width instruction\n%v", p) ++ } ++ continue ++ } ++ ++ pc += int64(m) ++ } ++ ++ c.cursym.Size = pc ++ } ++ pc += -pc & (FuncAlign - 1) ++ c.cursym.Size = pc ++ ++ // lay out the code, emitting code and data relocations. ++ ++ c.cursym.Grow(c.cursym.Size) ++ ++ bp := c.cursym.P ++ var i int32 ++ var out [5]uint32 ++ for p := c.cursym.Func().Text.Link; p != nil; p = p.Link { ++ c.pc = p.Pc ++ o = c.oplook(p) ++ if int(o.size) > 4*len(out) { ++ log.Fatalf("out array in span0 is too small, need at least %d for %v", o.size/4, p) ++ } ++ c.asmout(p, o, out[:]) ++ for i = 0; i < int32(o.size/4); i++ { ++ c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i]) ++ bp = bp[4:] ++ } ++ } ++ ++ // Mark nonpreemptible instruction sequences. ++ // We use REGTMP as a scratch register during call injection, ++ // so instruction sequences that use REGTMP are unsafe to ++ // preempt asynchronously. ++ obj.MarkUnsafePoints(c.ctxt, c.cursym.Func().Text, c.newprog, c.isUnsafePoint, c.isRestartable) ++} ++ ++// isUnsafePoint returns whether p is an unsafe point. ++func (c *ctxt0) isUnsafePoint(p *obj.Prog) bool { ++ // If p explicitly uses REGTMP, it's unsafe to preempt, because the ++ // preemption sequence clobbers REGTMP. ++ return p.From.Reg == REGTMP || p.To.Reg == REGTMP || p.Reg == REGTMP ++} ++ ++// isRestartable returns whether p is a multi-instruction sequence that, ++// if preempted, can be restarted. ++func (c *ctxt0) isRestartable(p *obj.Prog) bool { ++ if c.isUnsafePoint(p) { ++ return false ++ } ++ // If p is a multi-instruction sequence with uses REGTMP inserted by ++ // the assembler in order to materialize a large constant/offset, we ++ // can restart p (at the start of the instruction sequence), recompute ++ // the content of REGTMP, upon async preemption. Currently, all cases ++ // of assembler-inserted REGTMP fall into this category. ++ // If p doesn't use REGTMP, it can be simply preempted, so we don't ++ // mark it. ++ o := c.oplook(p) ++ return o.size > 4 && o.flag&NOTUSETMP == 0 ++} ++ ++func isint32(v int64) bool { ++ return int64(int32(v)) == v ++} ++ ++func isuint32(v uint64) bool { ++ return uint64(uint32(v)) == v ++} ++ ++func (c *ctxt0) aclass(a *obj.Addr) int { ++ switch a.Type { ++ case obj.TYPE_NONE: ++ return C_NONE ++ ++ case obj.TYPE_REG: ++ if REG_R0 <= a.Reg && a.Reg <= REG_R31 { ++ return C_REG ++ } ++ if REG_F0 <= a.Reg && a.Reg <= REG_F31 { ++ return C_FREG ++ } ++ if REG_FCSR0 <= a.Reg && a.Reg <= REG_FCSR31 { ++ return C_FCSRREG ++ } ++ if REG_FCC0 <= a.Reg && a.Reg <= REG_FCC31 { ++ return C_FCCREG ++ } ++ return C_GOK ++ ++ case obj.TYPE_MEM: ++ switch a.Name { ++ case obj.NAME_EXTERN, ++ obj.NAME_STATIC: ++ if a.Sym == nil { ++ break ++ } ++ c.instoffset = a.Offset ++ if a.Sym != nil { // use relocation ++ if a.Sym.Type == objabi.STLSBSS { ++ return C_TLS ++ } ++ return C_ADDR ++ } ++ return C_LEXT ++ ++ case obj.NAME_AUTO: ++ if a.Reg == REGSP { ++ // unset base register for better printing, since ++ // a.Offset is still relative to pseudo-SP. ++ a.Reg = obj.REG_NONE ++ } ++ c.instoffset = int64(c.autosize) + a.Offset ++ if c.instoffset >= -BIG && c.instoffset < BIG { ++ return C_SAUTO ++ } ++ return C_LAUTO ++ ++ case obj.NAME_PARAM: ++ if a.Reg == REGSP { ++ // unset base register for better printing, since ++ // a.Offset is still relative to pseudo-FP. ++ a.Reg = obj.REG_NONE ++ } ++ c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize() ++ if c.instoffset >= -BIG && c.instoffset < BIG { ++ return C_SAUTO ++ } ++ return C_LAUTO ++ ++ case obj.NAME_NONE: ++ c.instoffset = a.Offset ++ if c.instoffset == 0 { ++ return C_ZOREG ++ } ++ if c.instoffset >= -BIG && c.instoffset < BIG { ++ return C_SOREG ++ } ++ return C_LOREG ++ } ++ ++ return C_GOK ++ ++ case obj.TYPE_TEXTSIZE: ++ return C_TEXTSIZE ++ ++ case obj.TYPE_CONST, ++ obj.TYPE_ADDR: ++ switch a.Name { ++ case obj.NAME_NONE: ++ c.instoffset = a.Offset ++ if a.Reg != 0 { ++ if -BIG <= c.instoffset && c.instoffset <= BIG { ++ return C_SACON ++ } ++ if isint32(c.instoffset) { ++ return C_LACON ++ } ++ return C_DACON ++ } ++ ++ case obj.NAME_EXTERN, ++ obj.NAME_STATIC: ++ s := a.Sym ++ if s == nil { ++ return C_GOK ++ } ++ ++ c.instoffset = a.Offset ++ if s.Type == objabi.STLSBSS { ++ return C_STCON // address of TLS variable ++ } ++ return C_LECON ++ ++ case obj.NAME_AUTO: ++ if a.Reg == REGSP { ++ // unset base register for better printing, since ++ // a.Offset is still relative to pseudo-SP. ++ a.Reg = obj.REG_NONE ++ } ++ c.instoffset = int64(c.autosize) + a.Offset ++ if c.instoffset >= -BIG && c.instoffset < BIG { ++ return C_SACON ++ } ++ return C_LACON ++ ++ case obj.NAME_PARAM: ++ if a.Reg == REGSP { ++ // unset base register for better printing, since ++ // a.Offset is still relative to pseudo-FP. ++ a.Reg = obj.REG_NONE ++ } ++ c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize() ++ if c.instoffset >= -BIG && c.instoffset < BIG { ++ return C_SACON ++ } ++ return C_LACON ++ ++ default: ++ return C_GOK ++ } ++ ++ if c.instoffset != int64(int32(c.instoffset)) { ++ return C_DCON ++ } ++ ++ if c.instoffset >= 0 { ++ if c.instoffset == 0 { ++ return C_ZCON ++ } ++ if c.instoffset <= 0x7ff { ++ return C_SCON ++ } ++ if c.instoffset <= 0xfff { ++ return C_ANDCON ++ } ++ if c.instoffset&0xfff == 0 && isuint32(uint64(c.instoffset)) { // && (instoffset & (1<<31)) == 0) ++ return C_UCON ++ } ++ if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) { ++ return C_LCON ++ } ++ return C_LCON ++ } ++ ++ if c.instoffset >= -0x800 { ++ return C_ADDCON ++ } ++ if c.instoffset&0xfff == 0 && isint32(c.instoffset) { ++ return C_UCON ++ } ++ if isint32(c.instoffset) { ++ return C_LCON ++ } ++ return C_LCON ++ ++ case obj.TYPE_BRANCH: ++ return C_SBRA ++ } ++ ++ return C_GOK ++} ++ ++func prasm(p *obj.Prog) { ++ fmt.Printf("%v\n", p) ++} ++ ++func (c *ctxt0) oplook(p *obj.Prog) *Optab { ++ if oprange[AOR&obj.AMask] == nil { ++ c.ctxt.Diag("loong64 ops not initialized, call loong64.buildop first") ++ } ++ ++ a1 := int(p.Optab) ++ if a1 != 0 { ++ return &optab[a1-1] ++ } ++ a1 = int(p.From.Class) ++ if a1 == 0 { ++ a1 = c.aclass(&p.From) + 1 ++ p.From.Class = int8(a1) ++ } ++ ++ a1-- ++ a3 := int(p.To.Class) ++ if a3 == 0 { ++ a3 = c.aclass(&p.To) + 1 ++ p.To.Class = int8(a3) ++ } ++ ++ a3-- ++ a2 := C_NONE ++ if p.Reg != 0 { ++ a2 = C_REG ++ } ++ ++ ops := oprange[p.As&obj.AMask] ++ c1 := &xcmp[a1] ++ c3 := &xcmp[a3] ++ for i := range ops { ++ op := &ops[i] ++ if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && (op.family == 0 || c.ctxt.Arch.Family == op.family) { ++ p.Optab = uint16(cap(optab) - cap(ops) + i + 1) ++ return op ++ } ++ } ++ ++ c.ctxt.Diag("illegal combination %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3)) ++ prasm(p) ++ // Turn illegal instruction into an UNDEF, avoid crashing in asmout. ++ return &Optab{obj.AUNDEF, C_NONE, C_NONE, C_NONE, 49, 4, 0, 0, 0} ++} ++ ++func cmp(a int, b int) bool { ++ if a == b { ++ return true ++ } ++ switch a { ++ case C_DCON: ++ if b == C_LCON { ++ return true ++ } ++ fallthrough ++ case C_LCON: ++ if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON { ++ return true ++ } ++ ++ case C_ADD0CON: ++ if b == C_ADDCON { ++ return true ++ } ++ fallthrough ++ ++ case C_ADDCON: ++ if b == C_ZCON || b == C_SCON { ++ return true ++ } ++ ++ case C_AND0CON: ++ if b == C_ANDCON { ++ return true ++ } ++ fallthrough ++ ++ case C_ANDCON: ++ if b == C_ZCON || b == C_SCON { ++ return true ++ } ++ ++ case C_UCON: ++ if b == C_ZCON { ++ return true ++ } ++ ++ case C_SCON: ++ if b == C_ZCON { ++ return true ++ } ++ ++ case C_LACON: ++ if b == C_SACON { ++ return true ++ } ++ ++ case C_LBRA: ++ if b == C_SBRA { ++ return true ++ } ++ ++ case C_LEXT: ++ if b == C_SEXT { ++ return true ++ } ++ ++ case C_LAUTO: ++ if b == C_SAUTO { ++ return true ++ } ++ ++ case C_REG: ++ if b == C_ZCON { ++ return true ++ } ++ ++ case C_LOREG: ++ if b == C_ZOREG || b == C_SOREG { ++ return true ++ } ++ ++ case C_SOREG: ++ if b == C_ZOREG { ++ return true ++ } ++ } ++ ++ return false ++} ++ ++type ocmp []Optab ++ ++func (x ocmp) Len() int { ++ return len(x) ++} ++ ++func (x ocmp) Swap(i, j int) { ++ x[i], x[j] = x[j], x[i] ++} ++ ++func (x ocmp) Less(i, j int) bool { ++ p1 := &x[i] ++ p2 := &x[j] ++ n := int(p1.as) - int(p2.as) ++ if n != 0 { ++ return n < 0 ++ } ++ n = int(p1.a1) - int(p2.a1) ++ if n != 0 { ++ return n < 0 ++ } ++ n = int(p1.a2) - int(p2.a2) ++ if n != 0 { ++ return n < 0 ++ } ++ n = int(p1.a3) - int(p2.a3) ++ if n != 0 { ++ return n < 0 ++ } ++ return false ++} ++ ++func opset(a, b0 obj.As) { ++ oprange[a&obj.AMask] = oprange[b0] ++} ++ ++func buildop(ctxt *obj.Link) { ++ if ctxt.DiagFunc == nil { ++ ctxt.DiagFunc = func(format string, args ...interface{}) { ++ log.Printf(format, args...) ++ } ++ } ++ ++ if oprange[AOR&obj.AMask] != nil { ++ // Already initialized; stop now. ++ // This happens in the cmd/asm tests, ++ // each of which re-initializes the arch. ++ return ++ } ++ ++ var n int ++ ++ for i := 0; i < C_NCLASS; i++ { ++ for n = 0; n < C_NCLASS; n++ { ++ if cmp(n, i) { ++ xcmp[i][n] = true ++ } ++ } ++ } ++ for n = 0; optab[n].as != obj.AXXX; n++ { ++ } ++ sort.Sort(ocmp(optab[:n])) ++ for i := 0; i < n; i++ { ++ r := optab[i].as ++ r0 := r & obj.AMask ++ start := i ++ for optab[i].as == r { ++ i++ ++ } ++ oprange[r0] = optab[start:i] ++ i-- ++ ++ switch r { ++ default: ++ ctxt.Diag("unknown op in build: %v", r) ++ ctxt.DiagFlush() ++ log.Fatalf("bad code") ++ ++ case AABSF: ++ opset(AMOVFD, r0) ++ opset(AMOVDF, r0) ++ opset(AMOVWF, r0) ++ opset(AMOVFW, r0) ++ opset(AMOVWD, r0) ++ opset(AMOVDW, r0) ++ opset(ANEGF, r0) ++ opset(ANEGD, r0) ++ opset(AABSD, r0) ++ opset(ATRUNCDW, r0) ++ opset(ATRUNCFW, r0) ++ opset(ASQRTF, r0) ++ opset(ASQRTD, r0) ++ ++ case AMOVVF: ++ opset(AMOVVD, r0) ++ opset(AMOVFV, r0) ++ opset(AMOVDV, r0) ++ opset(ATRUNCDV, r0) ++ opset(ATRUNCFV, r0) ++ ++ case AADD: ++ opset(ASGT, r0) ++ opset(ASGTU, r0) ++ opset(AADDU, r0) ++ ++ case AADDV: ++ opset(AADDVU, r0) ++ ++ case AADDF: ++ opset(ADIVF, r0) ++ opset(ADIVD, r0) ++ opset(AMULF, r0) ++ opset(AMULD, r0) ++ opset(ASUBF, r0) ++ opset(ASUBD, r0) ++ opset(AADDD, r0) ++ ++ case AAND: ++ opset(AOR, r0) ++ opset(AXOR, r0) ++ ++ case ABEQ: ++ opset(ABNE, r0) ++ opset(ABLT, r0) ++ opset(ABGE, r0) ++ opset(ABGEU, r0) ++ opset(ABLTU, r0) ++ ++ case ABLEZ: ++ opset(ABGEZ, r0) ++ opset(ABLTZ, r0) ++ opset(ABGTZ, r0) ++ ++ case AMOVB: ++ opset(AMOVH, r0) ++ ++ case AMOVBU: ++ opset(AMOVHU, r0) ++ ++ case AMUL: ++ opset(AMULU, r0) ++ opset(AMULH, r0) ++ opset(AMULHU, r0) ++ opset(AREM, r0) ++ opset(AREMU, r0) ++ opset(ADIV, r0) ++ opset(ADIVU, r0) ++ ++ case AMULV: ++ opset(AMULVU, r0) ++ opset(AMULHV, r0) ++ opset(AMULHVU, r0) ++ opset(AREMV, r0) ++ opset(AREMVU, r0) ++ opset(ADIVV, r0) ++ opset(ADIVVU, r0) ++ ++ case ASLL: ++ opset(ASRL, r0) ++ opset(ASRA, r0) ++ ++ case ASLLV: ++ opset(ASRAV, r0) ++ opset(ASRLV, r0) ++ ++ case ASUB: ++ opset(ASUBU, r0) ++ opset(ANOR, r0) ++ ++ case ASUBV: ++ opset(ASUBVU, r0) ++ ++ case ASYSCALL: ++ opset(ADBAR, r0) ++ opset(ANOOP, r0) ++ ++ case ACMPEQF: ++ opset(ACMPGTF, r0) ++ opset(ACMPGTD, r0) ++ opset(ACMPGEF, r0) ++ opset(ACMPGED, r0) ++ opset(ACMPEQD, r0) ++ ++ case ABFPT: ++ opset(ABFPF, r0) ++ ++ case AMOVWL: ++ opset(AMOVWR, r0) ++ ++ case AMOVVL: ++ opset(AMOVVR, r0) ++ ++ case AMOVW, ++ AMOVD, ++ AMOVF, ++ AMOVV, ++ ABREAK, ++ ARFE, ++ AJAL, ++ AJMP, ++ AMOVWU, ++ ALL, ++ ALLV, ++ ASC, ++ ASCV, ++ ANEGW, ++ ANEGV, ++ AWORD, ++ obj.ANOP, ++ obj.ATEXT, ++ obj.AUNDEF, ++ obj.AFUNCDATA, ++ obj.APCDATA, ++ obj.ADUFFZERO, ++ obj.ADUFFCOPY: ++ break ++ ++ case ACLO: ++ opset(ACLZ, r0) ++ ++ case ATEQ: ++ opset(ATNE, r0) ++ } ++ } ++} ++ ++func OP(x uint32, y uint32) uint32 { ++ return x<<3 | y<<0 ++} ++ ++func SP(x uint32, y uint32) uint32 { ++ return x<<29 | y<<26 ++} ++ ++func OP_TEN(x uint32, y uint32) uint32 { ++ return x<<21 | y<<10 ++} ++ ++// r1 -> rk ++// r2 -> rj ++// r3 -> rd ++func OP_RRR(op uint32, r1 uint32, r2 uint32, r3 uint32) uint32 { ++ return op | (r1&0x1F)<<10 | (r2&0x1F)<<5 | (r3&0x1F)<<0 ++} ++ ++// r2 -> rj ++// r3 -> rd ++func OP_RR(op uint32, r2 uint32, r3 uint32) uint32 { ++ return op | (r2&0x1F)<<5 | (r3&0x1F)<<0 ++} ++ ++func OP_16IR_5I(op uint32, i uint32, r2 uint32) uint32 { ++ return op | (i&0xFFFF)<<10 | (r2&0x7)<<5 | ((i >> 16) & 0x1F) ++} ++ ++func OP_16IRR(op uint32, i uint32, r2 uint32, r3 uint32) uint32 { ++ return op | (i&0xFFFF)<<10 | (r2&0x1F)<<5 | (r3&0x1F)<<0 ++} ++ ++func OP_12IRR(op uint32, i uint32, r2 uint32, r3 uint32) uint32 { ++ return op | (i&0xFFF)<<10 | (r2&0x1F)<<5 | (r3&0x1F)<<0 ++} ++ ++func OP_IR(op uint32, i uint32, r2 uint32) uint32 { ++ return op | (i&0xFFFFF)<<5 | (r2&0x1F)<<0 // ui20, rd5 ++} ++ ++// Encoding for the 'b' or 'bl' instruction ++func OP_B_BL(op uint32, i uint32) uint32 { ++ return op | ((i & 0xFFFF) << 10) | ((i >> 16) & 0x3FF) ++} ++ ++func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { ++ o1 := uint32(0) ++ o2 := uint32(0) ++ o3 := uint32(0) ++ o4 := uint32(0) ++ o5 := uint32(0) ++ ++ add := AADDU ++ add = AADDVU ++ ++ switch o.type_ { ++ default: ++ c.ctxt.Diag("unknown type %d %v", o.type_) ++ prasm(p) ++ ++ case 0: // pseudo ops ++ break ++ ++ case 1: // mov r1,r2 ==> OR r1,r0,r2 ++ a := AOR ++ if p.As == AMOVW { ++ a = ASLL ++ } ++ o1 = OP_RRR(c.oprrr(a), uint32(REGZERO), uint32(p.From.Reg), uint32(p.To.Reg)) ++ ++ case 2: // add/sub r1,[r2],r3 ++ r := int(p.Reg) ++ if p.As == ANEGW || p.As == ANEGV { ++ r = REGZERO ++ } ++ if r == 0 { ++ r = int(p.To.Reg) ++ } ++ o1 = OP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg)) ++ ++ case 3: // mov $soreg, r ==> or/add $i,o,r ++ v := c.regoff(&p.From) ++ ++ r := int(p.From.Reg) ++ if r == 0 { ++ r = int(o.param) ++ } ++ a := add ++ if o.a1 == C_ANDCON { ++ a = AOR ++ } ++ ++ o1 = OP_12IRR(c.opirr(a), uint32(v), uint32(r), uint32(p.To.Reg)) ++ ++ case 4: // add $scon,[r1],r2 ++ v := c.regoff(&p.From) ++ ++ r := int(p.Reg) ++ if r == 0 { ++ r = int(p.To.Reg) ++ } ++ ++ o1 = OP_12IRR(c.opirr(p.As), uint32(v), uint32(r), uint32(p.To.Reg)) ++ ++ case 5: // syscall ++ o1 = c.oprrr(p.As) ++ ++ case 6: // beq r1,[r2],sbra ++ v := int32(0) ++ vcmp := int32(0) ++ if p.To.Target() != nil { ++ v = int32(p.To.Target().Pc-p.Pc) >> 2 ++ } ++ if v < 0 { ++ vcmp = -v ++ } ++ if (p.As == ABFPT || p.As == ABFPF) && ((uint32(vcmp))>>21)&0x7FF != 0 { ++ c.ctxt.Diag("21 bit-width, short branch too far\n%v", p) ++ } else if p.As != ABFPT && p.As != ABFPF && (v<<16)>>16 != v { ++ c.ctxt.Diag("16 bit-width, short branch too far\n%v", p) ++ } ++ if p.As == ABGTZ || p.As == ABLEZ { ++ o1 = OP_16IRR(c.opirr(p.As), uint32(v), uint32(p.Reg), uint32(p.From.Reg)) ++ } else if p.As == ABFPT || p.As == ABFPF { ++ // BCNEZ cj offset21 ,cj = fcc0 ++ // BCEQZ cj offset21 ,cj = fcc0 ++ o1 = OP_16IR_5I(c.opirr(p.As), uint32(v), uint32(REG_FCC0)) ++ } else { ++ o1 = OP_16IRR(c.opirr(p.As), uint32(v), uint32(p.From.Reg), uint32(p.Reg)) ++ } ++ ++ case 7: // mov r, soreg ++ r := int(p.To.Reg) ++ if r == 0 { ++ r = int(o.param) ++ } ++ v := c.regoff(&p.To) ++ o1 = OP_12IRR(c.opirr(p.As), uint32(v), uint32(r), uint32(p.From.Reg)) ++ ++ case 8: // mov soreg, r ++ r := int(p.From.Reg) ++ if r == 0 { ++ r = int(o.param) ++ } ++ v := c.regoff(&p.From) ++ o1 = OP_12IRR(c.opirr(-p.As), uint32(v), uint32(r), uint32(p.To.Reg)) ++ ++ case 9: // sll r1,[r2],r3 ++ if p.As != ACLO && p.As != ACLZ { ++ r := int(p.Reg) ++ if r == 0 { ++ r = int(p.To.Reg) ++ } ++ o1 = OP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg)) ++ } else { // clo r1,r2 ++ o1 = OP_RR(c.oprr(p.As), uint32(p.From.Reg), uint32(p.To.Reg)) ++ } ++ ++ case 10: // add $con,[r1],r2 ==> mov $con, t; add t,[r1],r2 ++ v := c.regoff(&p.From) ++ a := AOR ++ if v < 0 { ++ a = AADDU ++ } ++ o1 = OP_12IRR(c.opirr(a), uint32(v), uint32(0), uint32(REGTMP)) ++ r := int(p.Reg) ++ if r == 0 { ++ r = int(p.To.Reg) ++ } ++ o2 = OP_RRR(c.oprrr(p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg)) ++ ++ case 11: // jmp lbra ++ v := int32(0) ++ if c.aclass(&p.To) == C_SBRA && p.To.Sym == nil && p.As == AJMP { ++ // use PC-relative branch for short branches ++ // BEQ R0, R0, sbra ++ if p.To.Target() != nil { ++ v = int32(p.To.Target().Pc-p.Pc) >> 2 ++ } ++ if (v<<16)>>16 == v { ++ o1 = OP_16IRR(c.opirr(ABEQ), uint32(v), uint32(REGZERO), uint32(REGZERO)) ++ break ++ } ++ } ++ if p.To.Target() == nil { ++ v = int32(p.Pc) >> 2 ++ } else { ++ v = int32(p.To.Target().Pc) >> 2 ++ } ++ o1 = OP_B_BL(c.opirr(p.As), uint32(v)) ++ if p.To.Sym == nil { ++ p.To.Sym = c.cursym.Func().Text.From.Sym ++ p.To.Offset = p.To.Target().Pc ++ } ++ rel := obj.Addrel(c.cursym) ++ rel.Off = int32(c.pc) ++ rel.Siz = 4 ++ rel.Sym = p.To.Sym ++ rel.Add = p.To.Offset ++ rel.Type = objabi.R_CALLLOONG64 ++ ++ case 12: // movbs r,r ++ // NOTE: this case does not use REGTMP. If it ever does, ++ // remove the NOTUSETMP flag in optab. ++ v := 16 ++ if p.As == AMOVB { ++ v = 24 ++ } ++ o1 = OP_16IRR(c.opirr(ASLL), uint32(v), uint32(p.From.Reg), uint32(p.To.Reg)) ++ o2 = OP_16IRR(c.opirr(ASRA), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg)) ++ ++ case 13: // movbu r,r ++ if p.As == AMOVBU { ++ o1 = OP_12IRR(c.opirr(AAND), uint32(0xff), uint32(p.From.Reg), uint32(p.To.Reg)) ++ } else { ++ // bstrpick.d (msbd=15, lsbd=0) ++ o1 = (0x33c0 << 10) | ((uint32(p.From.Reg) & 0x1f) << 5) | (uint32(p.To.Reg) & 0x1F) ++ } ++ ++ case 14: // movwu r,r ++ // NOTE: this case does not use REGTMP. If it ever does, ++ // remove the NOTUSETMP flag in optab. ++ o1 = OP_16IRR(c.opirr(-ASLLV), uint32(32)&0x3f, uint32(p.From.Reg), uint32(p.To.Reg)) ++ o2 = OP_16IRR(c.opirr(-ASRLV), uint32(32)&0x3f, uint32(p.To.Reg), uint32(p.To.Reg)) ++ ++ case 15: // teq $c r,r ++ v := c.regoff(&p.From) ++ r := int(p.Reg) ++ if r == 0 { ++ r = REGZERO ++ } ++ /* ++ teq c, r1, r2 ++ fallthrough ++ ==> ++ bne r1, r2, 2 ++ break c ++ fallthrough ++ */ ++ if p.As == ATEQ { ++ o1 = OP_16IRR(c.opirr(ABNE), uint32(2), uint32(r), uint32(p.To.Reg)) ++ } else { // ATNE ++ o1 = OP_16IRR(c.opirr(ABEQ), uint32(2), uint32(r), uint32(p.To.Reg)) ++ } ++ o2 = c.oprrr(ABREAK) | (uint32(v) & 0x7FFF) ++ ++ case 16: // sll $c,[r1],r2 ++ v := c.regoff(&p.From) ++ r := int(p.Reg) ++ if r == 0 { ++ r = int(p.To.Reg) ++ } ++ ++ // instruction ending with V:6-digit immediate, others:5-digit immediate ++ if v >= 32 && vshift(p.As) { ++ o1 = OP_16IRR(c.opirr(p.As), uint32(v)&0x3f, uint32(r), uint32(p.To.Reg)) ++ } else { ++ o1 = OP_16IRR(c.opirr(p.As), uint32(v)&0x1f, uint32(r), uint32(p.To.Reg)) ++ } ++ ++ case 17: ++ o1 = OP_RRR(c.oprrr(p.As), uint32(REGZERO), uint32(p.From.Reg), uint32(p.To.Reg)) ++ ++ case 18: // jmp [r1],0(r2) ++ r := int(p.Reg) ++ if r == 0 { ++ r = int(o.param) ++ } ++ o1 = OP_RRR(c.oprrr(p.As), uint32(0), uint32(p.To.Reg), uint32(r)) ++ if p.As == obj.ACALL { ++ rel := obj.Addrel(c.cursym) ++ rel.Off = int32(c.pc) ++ rel.Siz = 0 ++ rel.Type = objabi.R_CALLIND ++ } ++ ++ case 19: // mov $lcon,r ++ // NOTE: this case does not use REGTMP. If it ever does, ++ // remove the NOTUSETMP flag in optab. ++ v := c.regoff(&p.From) ++ o1 = OP_IR(c.opir(ALU12IW), uint32(v>>12), uint32(p.To.Reg)) ++ o2 = OP_12IRR(c.opirr(AOR), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg)) ++ ++ case 23: // add $lcon,r1,r2 ++ v := c.regoff(&p.From) ++ o1 = OP_IR(c.opir(ALU12IW), uint32(v>>12), uint32(REGTMP)) ++ o2 = OP_12IRR(c.opirr(AOR), uint32(v), uint32(REGTMP), uint32(REGTMP)) ++ r := int(p.Reg) ++ if r == 0 { ++ r = int(p.To.Reg) ++ } ++ o3 = OP_RRR(c.oprrr(p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg)) ++ ++ case 24: // mov $ucon,r ++ v := c.regoff(&p.From) ++ o1 = OP_IR(c.opir(ALU12IW), uint32(v>>12), uint32(p.To.Reg)) ++ ++ case 25: // add/and $ucon,[r1],r2 ++ v := c.regoff(&p.From) ++ o1 = OP_IR(c.opir(ALU12IW), uint32(v>>12), uint32(REGTMP)) ++ r := int(p.Reg) ++ if r == 0 { ++ r = int(p.To.Reg) ++ } ++ o2 = OP_RRR(c.oprrr(p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg)) ++ ++ case 26: // mov $lsext/auto/oreg,r ++ v := c.regoff(&p.From) ++ o1 = OP_IR(c.opir(ALU12IW), uint32(v>>12), uint32(REGTMP)) ++ o2 = OP_12IRR(c.opirr(AOR), uint32(v), uint32(REGTMP), uint32(REGTMP)) ++ r := int(p.From.Reg) ++ if r == 0 { ++ r = int(o.param) ++ } ++ o3 = OP_RRR(c.oprrr(add), uint32(REGTMP), uint32(r), uint32(p.To.Reg)) ++ ++ case 27: // mov [sl]ext/auto/oreg,fr ++ v := c.regoff(&p.From) ++ r := int(p.From.Reg) ++ if r == 0 { ++ r = int(o.param) ++ } ++ a := -AMOVF ++ if p.As == AMOVD { ++ a = -AMOVD ++ } ++ switch o.size { ++ case 12: ++ o1 = OP_IR(c.opir(ALU12IW), uint32((v+1<<11)>>12), uint32(REGTMP)) ++ o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP)) ++ o3 = OP_12IRR(c.opirr(a), uint32(v), uint32(REGTMP), uint32(p.To.Reg)) ++ ++ case 4: ++ o1 = OP_12IRR(c.opirr(a), uint32(v), uint32(r), uint32(p.To.Reg)) ++ } ++ ++ case 28: // mov fr,[sl]ext/auto/oreg ++ v := c.regoff(&p.To) ++ r := int(p.To.Reg) ++ if r == 0 { ++ r = int(o.param) ++ } ++ a := AMOVF ++ if p.As == AMOVD { ++ a = AMOVD ++ } ++ switch o.size { ++ case 12: ++ o1 = OP_IR(c.opir(ALU12IW), uint32((v+1<<11)>>12), uint32(REGTMP)) ++ o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP)) ++ o3 = OP_12IRR(c.opirr(a), uint32(v), uint32(REGTMP), uint32(p.From.Reg)) ++ ++ case 4: ++ o1 = OP_12IRR(c.opirr(a), uint32(v), uint32(r), uint32(p.From.Reg)) ++ } ++ ++ case 30: // movw r,fr ++ a := OP_TEN(8, 1321) // movgr2fr.w ++ o1 = OP_RR(a, uint32(p.From.Reg), uint32(p.To.Reg)) ++ ++ case 31: // movw fr,r ++ a := OP_TEN(8, 1325) // movfr2gr.s ++ o1 = OP_RR(a, uint32(p.From.Reg), uint32(p.To.Reg)) ++ ++ case 32: // fadd fr1,[fr2],fr3 ++ r := int(p.Reg) ++ if r == 0 { ++ r = int(p.To.Reg) ++ } ++ o1 = OP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg)) ++ ++ case 33: // fabs fr1, fr3 ++ o1 = OP_RRR(c.oprrr(p.As), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg)) ++ ++ case 34: // mov $con,fr ++ v := c.regoff(&p.From) ++ a := AADDU ++ if o.a1 == C_ANDCON { ++ a = AOR ++ } ++ o1 = OP_12IRR(c.opirr(a), uint32(v), uint32(0), uint32(REGTMP)) ++ o2 = OP_RR(OP_TEN(8, 1321), uint32(REGTMP), uint32(p.To.Reg)) // movgr2fr.w ++ ++ case 35: // mov r,lext/auto/oreg ++ v := c.regoff(&p.To) ++ r := int(p.To.Reg) ++ if r == 0 { ++ r = int(o.param) ++ } ++ o1 = OP_IR(c.opir(ALU12IW), uint32((v+1<<11)>>12), uint32(REGTMP)) ++ o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP)) ++ o3 = OP_12IRR(c.opirr(p.As), uint32(v), uint32(REGTMP), uint32(p.From.Reg)) ++ ++ case 36: // mov lext/auto/oreg,r ++ v := c.regoff(&p.From) ++ r := int(p.From.Reg) ++ if r == 0 { ++ r = int(o.param) ++ } ++ o1 = OP_IR(c.opir(ALU12IW), uint32((v+1<<11)>>12), uint32(REGTMP)) ++ o2 = OP_RRR(c.oprrr(add), uint32(r), uint32(REGTMP), uint32(REGTMP)) ++ o3 = OP_12IRR(c.opirr(-p.As), uint32(v), uint32(REGTMP), uint32(p.To.Reg)) ++ ++ case 40: // word ++ o1 = uint32(c.regoff(&p.From)) ++ ++ case 47: // movv r,fr ++ a := OP_TEN(8, 1322) // movgr2fr.d ++ o1 = OP_RR(a, uint32(p.From.Reg), uint32(p.To.Reg)) ++ ++ case 48: // movv fr,r ++ a := OP_TEN(8, 1326) // movfr2gr.d ++ o1 = OP_RR(a, uint32(p.From.Reg), uint32(p.To.Reg)) ++ ++ case 49: // undef ++ o1 = c.oprrr(ABREAK) ++ ++ // relocation operations ++ case 50: // mov r,addr ==> pcaddu12i + sw ++ o1 = OP_IR(c.opir(APCADDU12I), uint32(0), uint32(REGTMP)) ++ rel := obj.Addrel(c.cursym) ++ rel.Off = int32(c.pc) ++ rel.Siz = 4 ++ rel.Sym = p.To.Sym ++ rel.Add = p.To.Offset ++ rel.Type = objabi.R_ADDRLOONG64U ++ ++ o2 = OP_12IRR(c.opirr(p.As), uint32(0), uint32(REGTMP), uint32(p.From.Reg)) ++ rel2 := obj.Addrel(c.cursym) ++ rel2.Off = int32(c.pc + 4) ++ rel2.Siz = 4 ++ rel2.Sym = p.To.Sym ++ rel2.Add = p.To.Offset ++ rel2.Type = objabi.R_ADDRLOONG64 ++ ++ case 51: // mov addr,r ==> pcaddu12i + lw ++ o1 = OP_IR(c.opir(APCADDU12I), uint32(0), uint32(REGTMP)) ++ rel := obj.Addrel(c.cursym) ++ rel.Off = int32(c.pc) ++ rel.Siz = 4 ++ rel.Sym = p.From.Sym ++ rel.Add = p.From.Offset ++ rel.Type = objabi.R_ADDRLOONG64U ++ o2 = OP_12IRR(c.opirr(-p.As), uint32(0), uint32(REGTMP), uint32(p.To.Reg)) ++ rel2 := obj.Addrel(c.cursym) ++ rel2.Off = int32(c.pc + 4) ++ rel2.Siz = 4 ++ rel2.Sym = p.From.Sym ++ rel2.Add = p.From.Offset ++ rel2.Type = objabi.R_ADDRLOONG64 ++ ++ case 52: // mov $lext, r ++ // NOTE: this case does not use REGTMP. If it ever does, ++ // remove the NOTUSETMP flag in optab. ++ o1 = OP_IR(c.opir(APCADDU12I), uint32(0), uint32(p.To.Reg)) ++ rel := obj.Addrel(c.cursym) ++ rel.Off = int32(c.pc) ++ rel.Siz = 4 ++ rel.Sym = p.From.Sym ++ rel.Add = p.From.Offset ++ rel.Type = objabi.R_ADDRLOONG64U ++ o2 = OP_12IRR(c.opirr(add), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg)) ++ rel2 := obj.Addrel(c.cursym) ++ rel2.Off = int32(c.pc + 4) ++ rel2.Siz = 4 ++ rel2.Sym = p.From.Sym ++ rel2.Add = p.From.Offset ++ rel2.Type = objabi.R_ADDRLOONG64 ++ ++ case 53: // mov r, tlsvar ==> lu12i.w + ori + add r2, regtmp + sw o(regtmp) ++ // NOTE: this case does not use REGTMP. If it ever does, ++ // remove the NOTUSETMP flag in optab. ++ o1 = OP_IR(c.opir(ALU12IW), uint32(0), uint32(REGTMP)) ++ rel := obj.Addrel(c.cursym) ++ rel.Off = int32(c.pc) ++ rel.Siz = 4 ++ rel.Sym = p.To.Sym ++ rel.Add = p.To.Offset ++ rel.Type = objabi.R_ADDRLOONG64TLSU ++ o2 = OP_12IRR(c.opirr(AOR), uint32(0), uint32(REGTMP), uint32(REGTMP)) ++ rel2 := obj.Addrel(c.cursym) ++ rel2.Off = int32(c.pc + 4) ++ rel2.Siz = 4 ++ rel2.Sym = p.To.Sym ++ rel2.Add = p.To.Offset ++ rel2.Type = objabi.R_ADDRLOONG64TLS ++ o3 = OP_RRR(c.oprrr(AADDV), uint32(REG_R2), uint32(REGTMP), uint32(REGTMP)) ++ o4 = OP_12IRR(c.opirr(p.As), uint32(0), uint32(REGTMP), uint32(p.From.Reg)) ++ ++ case 54: // lu12i.w + ori + add r2, regtmp + lw o(regtmp) ++ // NOTE: this case does not use REGTMP. If it ever does, ++ // remove the NOTUSETMP flag in optab. ++ o1 = OP_IR(c.opir(ALU12IW), uint32(0), uint32(REGTMP)) ++ rel := obj.Addrel(c.cursym) ++ rel.Off = int32(c.pc) ++ rel.Siz = 4 ++ rel.Sym = p.From.Sym ++ rel.Add = p.From.Offset ++ rel.Type = objabi.R_ADDRLOONG64TLSU ++ o2 = OP_12IRR(c.opirr(AOR), uint32(0), uint32(REGTMP), uint32(REGTMP)) ++ rel2 := obj.Addrel(c.cursym) ++ rel2.Off = int32(c.pc + 4) ++ rel2.Siz = 4 ++ rel2.Sym = p.From.Sym ++ rel2.Add = p.From.Offset ++ rel2.Type = objabi.R_ADDRLOONG64TLS ++ o3 = OP_RRR(c.oprrr(AADDV), uint32(REG_R2), uint32(REGTMP), uint32(REGTMP)) ++ o4 = OP_12IRR(c.opirr(-p.As), uint32(0), uint32(REGTMP), uint32(p.To.Reg)) ++ ++ case 55: // lu12i.w + ori + add r2, regtmp ++ // NOTE: this case does not use REGTMP. If it ever does, ++ // remove the NOTUSETMP flag in optab. ++ o1 = OP_IR(c.opir(ALU12IW), uint32(0), uint32(REGTMP)) ++ rel := obj.Addrel(c.cursym) ++ rel.Off = int32(c.pc) ++ rel.Siz = 4 ++ rel.Sym = p.From.Sym ++ rel.Add = p.From.Offset ++ rel.Type = objabi.R_ADDRLOONG64TLSU ++ o2 = OP_12IRR(c.opirr(AOR), uint32(0), uint32(REGTMP), uint32(REGTMP)) ++ rel2 := obj.Addrel(c.cursym) ++ rel2.Off = int32(c.pc + 4) ++ rel2.Siz = 4 ++ rel2.Sym = p.From.Sym ++ rel2.Add = p.From.Offset ++ rel2.Type = objabi.R_ADDRLOONG64TLS ++ o3 = OP_RRR(c.oprrr(AADDV), uint32(REG_R2), uint32(REGTMP), uint32(p.To.Reg)) ++ ++ case 59: // mov $dcon,r ++ // NOTE: this case does not use REGTMP. If it ever does, ++ // remove the NOTUSETMP flag in optab. ++ v := c.vregoff(&p.From) ++ o1 = OP_IR(c.opir(ALU12IW), uint32(v>>12), uint32(p.To.Reg)) ++ o2 = OP_12IRR(c.opirr(AOR), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg)) ++ o3 = OP_IR(c.opir(ALU32ID), uint32(v>>32), uint32(p.To.Reg)) ++ o4 = OP_12IRR(c.opirr(ALU52ID), uint32(v>>52), uint32(p.To.Reg), uint32(p.To.Reg)) ++ ++ case 60: // add $dcon,r1,r2 ++ v := c.vregoff(&p.From) ++ o1 = OP_IR(c.opir(ALU12IW), uint32(v>>12), uint32(REGTMP)) ++ o2 = OP_12IRR(c.opirr(AOR), uint32(v), uint32(REGTMP), uint32(REGTMP)) ++ o3 = OP_IR(c.opir(ALU32ID), uint32(v>>32), uint32(REGTMP)) ++ o4 = OP_12IRR(c.opirr(ALU52ID), uint32(v>>52), uint32(REGTMP), uint32(REGTMP)) ++ r := int(p.Reg) ++ if r == 0 { ++ r = int(p.To.Reg) ++ } ++ o5 = OP_RRR(c.oprrr(p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg)) ++ ++ case 61: // word C_DCON ++ o1 = uint32(c.vregoff(&p.From)) ++ o2 = uint32(c.vregoff(&p.From) >> 32) ++ } ++ ++ out[0] = o1 ++ out[1] = o2 ++ out[2] = o3 ++ out[3] = o4 ++ out[4] = o5 ++} ++ ++func (c *ctxt0) vregoff(a *obj.Addr) int64 { ++ c.instoffset = 0 ++ c.aclass(a) ++ return c.instoffset ++} ++ ++func (c *ctxt0) regoff(a *obj.Addr) int32 { ++ return int32(c.vregoff(a)) ++} ++ ++func (c *ctxt0) oprrr(a obj.As) uint32 { ++ switch a { ++ case AADD: ++ return 0x20 << 15 ++ case AADDU: ++ return 0x20 << 15 ++ case ASGT: ++ return 0x24 << 15 // SLT ++ case ASGTU: ++ return 0x25 << 15 // SLTU ++ case AAND: ++ return 0x29 << 15 ++ case AOR: ++ return 0x2a << 15 ++ case AXOR: ++ return 0x2b << 15 ++ case ASUB: ++ return 0x22 << 15 ++ case ASUBU, ANEGW: ++ return 0x22 << 15 ++ case ANOR: ++ return 0x28 << 15 ++ case ASLL: ++ return 0x2e << 15 ++ case ASRL: ++ return 0x2f << 15 ++ case ASRA: ++ return 0x30 << 15 ++ case ASLLV: ++ return 0x31 << 15 ++ case ASRLV: ++ return 0x32 << 15 ++ case ASRAV: ++ return 0x33 << 15 ++ case AADDV: ++ return 0x21 << 15 ++ case AADDVU: ++ return 0x21 << 15 ++ case ASUBV: ++ return 0x23 << 15 ++ case ASUBVU, ANEGV: ++ return 0x23 << 15 ++ ++ case AMUL: ++ return 0x38 << 15 // mul.w ++ case AMULU: ++ return 0x38 << 15 // mul.w ++ case AMULH: ++ return 0x39 << 15 // mulh.w ++ case AMULHU: ++ return 0x3a << 15 // mulhu.w ++ case AMULV: ++ return 0x3b << 15 // mul.d ++ case AMULVU: ++ return 0x3b << 15 // mul.d ++ case AMULHV: ++ return 0x3c << 15 // mulh.d ++ case AMULHVU: ++ return 0x3d << 15 // mulhu.d ++ case ADIV: ++ return 0x40 << 15 // div.w ++ case ADIVU: ++ return 0x42 << 15 // div.wu ++ case ADIVV: ++ return 0x44 << 15 // div.d ++ case ADIVVU: ++ return 0x46 << 15 // div.du ++ case AREM: ++ return 0x41 << 15 // mod.w ++ case AREMU: ++ return 0x43 << 15 // mod.wu ++ case AREMV: ++ return 0x45 << 15 // mod.d ++ case AREMVU: ++ return 0x47 << 15 // mod.du ++ ++ case AJMP: ++ return 0x13 << 26 // jirl r0, rj, 0 ++ case AJAL: ++ return (0x13 << 26) | 1 // jirl r1, rj, 0 ++ ++ case ABREAK: ++ return 0x54 << 15 ++ case ASYSCALL: ++ return 0x56 << 15 ++ case ADIVF: ++ return 0x20d << 15 ++ case ADIVD: ++ return 0x20e << 15 ++ case AMULF: ++ return 0x209 << 15 ++ case AMULD: ++ return 0x20a << 15 ++ case ASUBF: ++ return 0x205 << 15 ++ case ASUBD: ++ return 0x206 << 15 ++ case AADDF: ++ return 0x201 << 15 ++ case AADDD: ++ return 0x202 << 15 ++ case ATRUNCFV: ++ return 0x46a9 << 10 ++ case ATRUNCDV: ++ return 0x46aa << 10 ++ case ATRUNCFW: ++ return 0x46a1 << 10 ++ case ATRUNCDW: ++ return 0x46a2 << 10 ++ case AMOVFV: ++ return 0x46c9 << 10 ++ case AMOVDV: ++ return 0x46ca << 10 ++ case AMOVVF: ++ return 0x4746 << 10 ++ case AMOVVD: ++ return 0x474a << 10 ++ case AMOVFW: ++ return 0x46c1 << 10 ++ case AMOVDW: ++ return 0x46c2 << 10 ++ case AMOVWF: ++ return 0x4744 << 10 ++ case AMOVDF: ++ return 0x4646 << 10 ++ case AMOVWD: ++ return 0x4748 << 10 ++ case AMOVFD: ++ return 0x4649 << 10 ++ case AABSF: ++ return 0x4501 << 10 ++ case AABSD: ++ return 0x4502 << 10 ++ case AMOVF: ++ return 0x4525 << 10 ++ case AMOVD: ++ return 0x4526 << 10 ++ case ANEGF: ++ return 0x4505 << 10 ++ case ANEGD: ++ return 0x4506 << 10 ++ case ACMPEQF: ++ return 0x0c1<<20 | 0x4<<15 // FCMP.CEQ.S ++ case ACMPEQD: ++ return 0x0c2<<20 | 0x4<<15 // FCMP.CEQ.D ++ case ACMPGED: ++ return 0x0c2<<20 | 0x7<<15 // FCMP.SLE.D ++ case ACMPGEF: ++ return 0x0c1<<20 | 0x7<<15 // FCMP.SLE.S ++ case ACMPGTD: ++ return 0x0c2<<20 | 0x3<<15 // FCMP.SLT.D ++ case ACMPGTF: ++ return 0x0c1<<20 | 0x3<<15 // FCMP.SLT.S ++ ++ case ASQRTF: ++ return 0x4511 << 10 ++ case ASQRTD: ++ return 0x4512 << 10 ++ ++ case ADBAR: ++ return 0x70e4 << 15 ++ case ANOOP: ++ // andi r0, r0, 0 ++ return 0x03400000 ++ } ++ ++ if a < 0 { ++ c.ctxt.Diag("bad rrr opcode -%v", -a) ++ } else { ++ c.ctxt.Diag("bad rrr opcode %v", a) ++ } ++ return 0 ++} ++ ++func (c *ctxt0) oprr(a obj.As) uint32 { ++ switch a { ++ case ACLO: ++ return 0x4 << 10 ++ case ACLZ: ++ return 0x5 << 10 ++ } ++ ++ c.ctxt.Diag("bad rr opcode %v", a) ++ return 0 ++} ++ ++func (c *ctxt0) opir(a obj.As) uint32 { ++ switch a { ++ case ALU12IW: ++ return 0x0a << 25 ++ case ALU32ID: ++ return 0x0b << 25 ++ case APCADDU12I: ++ return 0x0e << 25 ++ } ++ return 0 ++} ++ ++func (c *ctxt0) opirr(a obj.As) uint32 { ++ switch a { ++ case AADD, AADDU: ++ return 0x00a << 22 ++ case ASGT: ++ return 0x008 << 22 ++ case ASGTU: ++ return 0x009 << 22 ++ case AAND: ++ return 0x00d << 22 ++ case AOR: ++ return 0x00e << 22 ++ case ALU52ID: ++ return 0x00c << 22 ++ case AXOR: ++ return 0x00f << 22 ++ case ASLL: ++ return 0x00081 << 15 ++ case ASRL: ++ return 0x00089 << 15 ++ case ASRA: ++ return 0x00091 << 15 ++ case AADDV: ++ return 0x00b << 22 ++ case AADDVU: ++ return 0x00b << 22 ++ ++ case AJMP: ++ return 0x14 << 26 ++ case AJAL, ++ obj.ADUFFZERO, ++ obj.ADUFFCOPY: ++ return 0x15 << 26 ++ ++ case AJIRL: ++ return 0x13 << 26 ++ case ABLTU: ++ return 0x1a << 26 ++ case ABLT, ABLTZ, ABGTZ: ++ return 0x18 << 26 ++ case ABGEU: ++ return 0x1b << 26 ++ case ABGE, ABGEZ, ABLEZ: ++ return 0x19 << 26 ++ case ABEQ: ++ return 0x16 << 26 ++ case ABNE: ++ return 0x17 << 26 ++ case ABFPT: ++ return 0x12<<26 | 0x1<<8 ++ case ABFPF: ++ return 0x12<<26 | 0x0<<8 ++ ++ case AMOVB, ++ AMOVBU: ++ return 0x0a4 << 22 ++ case AMOVH, ++ AMOVHU: ++ return 0x0a5 << 22 ++ case AMOVW, ++ AMOVWU: ++ return 0x0a6 << 22 ++ case AMOVV: ++ return 0x0a7 << 22 ++ case AMOVF: ++ return 0x0ad << 22 ++ case AMOVD: ++ return 0x0af << 22 ++ case AMOVWL: ++ return 0x0bc << 22 ++ case AMOVWR: ++ return 0x0bd << 22 ++ case AMOVVL: ++ return 0x0be << 22 ++ case AMOVVR: ++ return 0x0bf << 22 ++ ++ case ABREAK: ++ return 0x018 << 22 ++ ++ case -AMOVWL: ++ return 0x0b8 << 22 ++ case -AMOVWR: ++ return 0x0b9 << 22 ++ case -AMOVVL: ++ return 0x0ba << 22 ++ case -AMOVVR: ++ return 0x0bb << 22 ++ case -AMOVB: ++ return 0x0a0 << 22 ++ case -AMOVBU: ++ return 0x0a8 << 22 ++ case -AMOVH: ++ return 0x0a1 << 22 ++ case -AMOVHU: ++ return 0x0a9 << 22 ++ case -AMOVW: ++ return 0x0a2 << 22 ++ case -AMOVWU: ++ return 0x0aa << 22 ++ case -AMOVV: ++ return 0x0a3 << 22 ++ case -AMOVF: ++ return 0x0ac << 22 ++ case -AMOVD: ++ return 0x0ae << 22 ++ ++ case ASLLV, ++ -ASLLV: ++ return 0x0041 << 16 ++ case ASRLV, ++ -ASRLV: ++ return 0x0045 << 16 ++ case ASRAV, ++ -ASRAV: ++ return 0x0049 << 16 ++ case -ALL: ++ return 0x020 << 24 ++ case -ALLV: ++ return 0x022 << 24 ++ case ASC: ++ return 0x021 << 24 ++ case ASCV: ++ return 0x023 << 24 ++ } ++ ++ if a < 0 { ++ c.ctxt.Diag("bad irr opcode -%v", -a) ++ } else { ++ c.ctxt.Diag("bad irr opcode %v", a) ++ } ++ return 0 ++} ++ ++func vshift(a obj.As) bool { ++ switch a { ++ case ASLLV, ++ ASRLV, ++ ASRAV: ++ return true ++ } ++ return false ++} +diff --git a/src/cmd/internal/obj/loong64/cnames.go b/src/cmd/internal/obj/loong64/cnames.go +new file mode 100644 +index 0000000..d6d3091 +--- /dev/null ++++ b/src/cmd/internal/obj/loong64/cnames.go +@@ -0,0 +1,43 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package loong64 ++ ++var cnames0 = []string{ ++ "NONE", ++ "REG", ++ "FREG", ++ "FCREG", ++ "FCSRREG", ++ "FCCREG", ++ "ZCON", ++ "SCON", ++ "UCON", ++ "ADD0CON", ++ "AND0CON", ++ "ADDCON", ++ "ANDCON", ++ "LCON", ++ "DCON", ++ "SACON", ++ "SECON", ++ "LACON", ++ "LECON", ++ "DACON", ++ "STCON", ++ "SBRA", ++ "LBRA", ++ "SAUTO", ++ "LAUTO", ++ "SEXT", ++ "LEXT", ++ "ZOREG", ++ "SOREG", ++ "LOREG", ++ "GOK", ++ "ADDR", ++ "TLS", ++ "TEXTSIZE", ++ "NCLASS", ++} +diff --git a/src/cmd/internal/obj/loong64/list.go b/src/cmd/internal/obj/loong64/list.go +new file mode 100644 +index 0000000..97ac659 +--- /dev/null ++++ b/src/cmd/internal/obj/loong64/list.go +@@ -0,0 +1,46 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package loong64 ++ ++import ( ++ "cmd/internal/obj" ++ "fmt" ++) ++ ++func init() { ++ obj.RegisterRegister(obj.RBaseLOONG64, REG_LAST+1, rconv) ++ obj.RegisterOpcode(obj.ABaseLOONG64, Anames) ++} ++ ++func rconv(r int) string { ++ if r == 0 { ++ return "NONE" ++ } ++ if r == REGG { ++ // Special case. ++ return "g" ++ } ++ if REG_R0 <= r && r <= REG_R31 { ++ return fmt.Sprintf("R%d", r-REG_R0) ++ } ++ if REG_F0 <= r && r <= REG_F31 { ++ return fmt.Sprintf("F%d", r-REG_F0) ++ } ++ if REG_FCSR0 <= r && r <= REG_FCSR31 { ++ return fmt.Sprintf("FCSR%d", r-REG_FCSR0) ++ } ++ if REG_FCC0 <= r && r <= REG_FCC31 { ++ return fmt.Sprintf("FCC%d", r-REG_FCC0) ++ } ++ return fmt.Sprintf("Rgok(%d)", r-obj.RBaseLOONG64) ++} ++ ++func DRconv(a int) string { ++ s := "C_??" ++ if a >= C_NONE && a <= C_NCLASS { ++ s = cnames0[a] ++ } ++ return s ++} +diff --git a/src/cmd/internal/obj/loong64/obj.go b/src/cmd/internal/obj/loong64/obj.go +new file mode 100644 +index 0000000..36036e5 +--- /dev/null ++++ b/src/cmd/internal/obj/loong64/obj.go +@@ -0,0 +1,625 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package loong64 ++ ++import ( ++ "cmd/internal/obj" ++ "cmd/internal/objabi" ++ "cmd/internal/sys" ++ ++ "math" ++) ++ ++func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { ++ // Rewrite JMP/JAL to symbol as TYPE_BRANCH. ++ switch p.As { ++ case AJMP, ++ AJAL, ++ ARET, ++ obj.ADUFFZERO, ++ obj.ADUFFCOPY: ++ if p.To.Sym != nil { ++ p.To.Type = obj.TYPE_BRANCH ++ } ++ } ++ ++ // Rewrite float constants to values stored in memory. ++ switch p.As { ++ case AMOVF: ++ if p.From.Type == obj.TYPE_FCONST { ++ f32 := float32(p.From.Val.(float64)) ++ if math.Float32bits(f32) == 0 { ++ p.As = AMOVW ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = REGZERO ++ break ++ } ++ p.From.Type = obj.TYPE_MEM ++ p.From.Sym = ctxt.Float32Sym(f32) ++ p.From.Name = obj.NAME_EXTERN ++ p.From.Offset = 0 ++ } ++ ++ case AMOVD: ++ if p.From.Type == obj.TYPE_FCONST { ++ f64 := p.From.Val.(float64) ++ if math.Float64bits(f64) == 0 { ++ p.As = AMOVV ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = REGZERO ++ break ++ } ++ p.From.Type = obj.TYPE_MEM ++ p.From.Sym = ctxt.Float64Sym(f64) ++ p.From.Name = obj.NAME_EXTERN ++ p.From.Offset = 0 ++ } ++ } ++ ++ // Rewrite SUB constants into ADD. ++ switch p.As { ++ case ASUB: ++ if p.From.Type == obj.TYPE_CONST { ++ p.From.Offset = -p.From.Offset ++ p.As = AADD ++ } ++ ++ case ASUBU: ++ if p.From.Type == obj.TYPE_CONST { ++ p.From.Offset = -p.From.Offset ++ p.As = AADDU ++ } ++ ++ case ASUBV: ++ if p.From.Type == obj.TYPE_CONST { ++ p.From.Offset = -p.From.Offset ++ p.As = AADDV ++ } ++ ++ case ASUBVU: ++ if p.From.Type == obj.TYPE_CONST { ++ p.From.Offset = -p.From.Offset ++ p.As = AADDVU ++ } ++ } ++} ++ ++func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ++ c := ctxt0{ctxt: ctxt, newprog: newprog, cursym: cursym} ++ ++ p := c.cursym.Func().Text ++ textstksiz := p.To.Offset ++ ++ if textstksiz < 0 { ++ c.ctxt.Diag("negative frame size %d - did you mean NOFRAME?", textstksiz) ++ } ++ if p.From.Sym.NoFrame() { ++ if textstksiz != 0 { ++ c.ctxt.Diag("NOFRAME functions must have a frame size of 0, not %d", textstksiz) ++ } ++ } ++ ++ c.cursym.Func().Args = p.To.Val.(int32) ++ c.cursym.Func().Locals = int32(textstksiz) ++ ++ /* ++ * find leaf subroutines ++ * expand RET ++ */ ++ ++ for p := c.cursym.Func().Text; p != nil; p = p.Link { ++ switch p.As { ++ case obj.ATEXT: ++ p.Mark |= LABEL | LEAF | SYNC ++ if p.Link != nil { ++ p.Link.Mark |= LABEL ++ } ++ ++ case AMOVW, ++ AMOVV: ++ if p.To.Type == obj.TYPE_REG && p.To.Reg >= REG_SPECIAL { ++ p.Mark |= LABEL | SYNC ++ break ++ } ++ if p.From.Type == obj.TYPE_REG && p.From.Reg >= REG_SPECIAL { ++ p.Mark |= LABEL | SYNC ++ } ++ ++ case ASYSCALL, ++ AWORD: ++ p.Mark |= LABEL | SYNC ++ ++ case ANOR: ++ if p.To.Type == obj.TYPE_REG { ++ if p.To.Reg == REGZERO { ++ p.Mark |= LABEL | SYNC ++ } ++ } ++ ++ case AJAL, ++ obj.ADUFFZERO, ++ obj.ADUFFCOPY: ++ c.cursym.Func().Text.Mark &^= LEAF ++ fallthrough ++ ++ case AJMP, ++ ABEQ, ++ ABGEU, ++ ABLTU, ++ ABLTZ, ++ ABNE, ++ ABFPT, ABFPF: ++ p.Mark |= BRANCH ++ q1 := p.To.Target() ++ if q1 != nil { ++ for q1.As == obj.ANOP { ++ q1 = q1.Link ++ p.To.SetTarget(q1) ++ } ++ ++ if q1.Mark&LEAF == 0 { ++ q1.Mark |= LABEL ++ } ++ } ++ q1 = p.Link ++ if q1 != nil { ++ q1.Mark |= LABEL ++ } ++ ++ case ARET: ++ if p.Link != nil { ++ p.Link.Mark |= LABEL ++ } ++ } ++ } ++ ++ var mov, add obj.As ++ ++ add = AADDV ++ mov = AMOVV ++ ++ var q *obj.Prog ++ var q1 *obj.Prog ++ autosize := int32(0) ++ var p1 *obj.Prog ++ var p2 *obj.Prog ++ for p := c.cursym.Func().Text; p != nil; p = p.Link { ++ o := p.As ++ switch o { ++ case obj.ATEXT: ++ autosize = int32(textstksiz) ++ ++ if p.Mark&LEAF != 0 && autosize == 0 { ++ // A leaf function with no locals has no frame. ++ p.From.Sym.Set(obj.AttrNoFrame, true) ++ } ++ ++ if !p.From.Sym.NoFrame() { ++ // If there is a stack frame at all, it includes ++ // space to save the LR. ++ autosize += int32(c.ctxt.FixedFrameSize()) ++ } ++ ++ if autosize&4 != 0 { ++ autosize += 4 ++ } ++ ++ if autosize == 0 && c.cursym.Func().Text.Mark&LEAF == 0 { ++ if c.cursym.Func().Text.From.Sym.NoSplit() { ++ if ctxt.Debugvlog { ++ ctxt.Logf("save suppressed in: %s\n", c.cursym.Name) ++ } ++ ++ c.cursym.Func().Text.Mark |= LEAF ++ } ++ } ++ ++ p.To.Offset = int64(autosize) - ctxt.FixedFrameSize() ++ ++ if c.cursym.Func().Text.Mark&LEAF != 0 { ++ c.cursym.Set(obj.AttrLeaf, true) ++ if p.From.Sym.NoFrame() { ++ break ++ } ++ } ++ ++ if !p.From.Sym.NoSplit() { ++ p = c.stacksplit(p, autosize) // emit split check ++ } ++ ++ q = p ++ ++ if autosize != 0 { ++ // Make sure to save link register for non-empty frame, even if ++ // it is a leaf function, so that traceback works. ++ // Store link register before decrement SP, so if a signal comes ++ // during the execution of the function prologue, the traceback ++ // code will not see a half-updated stack frame. ++ // This sequence is not async preemptible, as if we open a frame ++ // at the current SP, it will clobber the saved LR. ++ q = c.ctxt.StartUnsafePoint(q, c.newprog) ++ ++ q = obj.Appendp(q, newprog) ++ q.As = mov ++ q.Pos = p.Pos ++ q.From.Type = obj.TYPE_REG ++ q.From.Reg = REGLINK ++ q.To.Type = obj.TYPE_MEM ++ q.To.Offset = int64(-autosize) ++ q.To.Reg = REGSP ++ ++ q = obj.Appendp(q, newprog) ++ q.As = add ++ q.Pos = p.Pos ++ q.From.Type = obj.TYPE_CONST ++ q.From.Offset = int64(-autosize) ++ q.To.Type = obj.TYPE_REG ++ q.To.Reg = REGSP ++ q.Spadj = +autosize ++ ++ q = c.ctxt.EndUnsafePoint(q, c.newprog, -1) ++ } ++ ++ if c.cursym.Func().Text.From.Sym.Wrapper() && c.cursym.Func().Text.Mark&LEAF == 0 { ++ // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame ++ // ++ // MOV g_panic(g), R1 ++ // BEQ R1, end ++ // MOV panic_argp(R1), R2 ++ // ADD $(autosize+FIXED_FRAME), R29, R3 ++ // BNE R2, R3, end ++ // ADD $FIXED_FRAME, R29, R2 ++ // MOV R2, panic_argp(R1) ++ // end: ++ // NOP ++ // ++ // The NOP is needed to give the jumps somewhere to land. ++ // It is a liblink NOP, not an hardware NOP: it encodes to 0 instruction bytes. ++ // ++ // We don't generate this for leafs because that means the wrapped ++ // function was inlined into the wrapper. ++ ++ q = obj.Appendp(q, newprog) ++ ++ q.As = mov ++ q.From.Type = obj.TYPE_MEM ++ q.From.Reg = REGG ++ q.From.Offset = 4 * int64(c.ctxt.Arch.PtrSize) // G.panic ++ q.To.Type = obj.TYPE_REG ++ q.To.Reg = REG_R19 ++ ++ q = obj.Appendp(q, newprog) ++ q.As = ABEQ ++ q.From.Type = obj.TYPE_REG ++ q.From.Reg = REG_R19 ++ q.To.Type = obj.TYPE_BRANCH ++ q.Mark |= BRANCH ++ p1 = q ++ ++ q = obj.Appendp(q, newprog) ++ q.As = mov ++ q.From.Type = obj.TYPE_MEM ++ q.From.Reg = REG_R19 ++ q.From.Offset = 0 // Panic.argp ++ q.To.Type = obj.TYPE_REG ++ q.To.Reg = REG_R4 ++ ++ q = obj.Appendp(q, newprog) ++ q.As = add ++ q.From.Type = obj.TYPE_CONST ++ q.From.Offset = int64(autosize) + ctxt.FixedFrameSize() ++ q.Reg = REGSP ++ q.To.Type = obj.TYPE_REG ++ q.To.Reg = REG_R5 ++ ++ q = obj.Appendp(q, newprog) ++ q.As = ABNE ++ q.From.Type = obj.TYPE_REG ++ q.From.Reg = REG_R4 ++ q.Reg = REG_R5 ++ q.To.Type = obj.TYPE_BRANCH ++ q.Mark |= BRANCH ++ p2 = q ++ ++ q = obj.Appendp(q, newprog) ++ q.As = add ++ q.From.Type = obj.TYPE_CONST ++ q.From.Offset = ctxt.FixedFrameSize() ++ q.Reg = REGSP ++ q.To.Type = obj.TYPE_REG ++ q.To.Reg = REG_R4 ++ ++ q = obj.Appendp(q, newprog) ++ q.As = mov ++ q.From.Type = obj.TYPE_REG ++ q.From.Reg = REG_R4 ++ q.To.Type = obj.TYPE_MEM ++ q.To.Reg = REG_R19 ++ q.To.Offset = 0 // Panic.argp ++ ++ q = obj.Appendp(q, newprog) ++ ++ q.As = obj.ANOP ++ p1.To.SetTarget(q) ++ p2.To.SetTarget(q) ++ } ++ ++ case ARET: ++ if p.From.Type == obj.TYPE_CONST { ++ ctxt.Diag("using BECOME (%v) is not supported!", p) ++ break ++ } ++ ++ retSym := p.To.Sym ++ p.To.Name = obj.NAME_NONE // clear fields as we may modify p to other instruction ++ p.To.Sym = nil ++ ++ if c.cursym.Func().Text.Mark&LEAF != 0 { ++ if autosize == 0 { ++ p.As = AJMP ++ p.From = obj.Addr{} ++ if retSym != nil { // retjmp ++ p.To.Type = obj.TYPE_BRANCH ++ p.To.Name = obj.NAME_EXTERN ++ p.To.Sym = retSym ++ } else { ++ p.To.Type = obj.TYPE_MEM ++ p.To.Reg = REGLINK ++ p.To.Offset = 0 ++ } ++ p.Mark |= BRANCH ++ break ++ } ++ ++ p.As = add ++ p.From.Type = obj.TYPE_CONST ++ p.From.Offset = int64(autosize) ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = REGSP ++ p.Spadj = -autosize ++ ++ q = c.newprog() ++ q.As = AJMP ++ q.Pos = p.Pos ++ if retSym != nil { // retjmp ++ q.To.Type = obj.TYPE_BRANCH ++ q.To.Name = obj.NAME_EXTERN ++ q.To.Sym = retSym ++ } else { ++ q.To.Type = obj.TYPE_MEM ++ q.To.Offset = 0 ++ q.To.Reg = REGLINK ++ } ++ q.Mark |= BRANCH ++ q.Spadj = +autosize ++ ++ q.Link = p.Link ++ p.Link = q ++ break ++ } ++ ++ p.As = mov ++ p.From.Type = obj.TYPE_MEM ++ p.From.Offset = 0 ++ p.From.Reg = REGSP ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = REGLINK ++ ++ if autosize != 0 { ++ q = c.newprog() ++ q.As = add ++ q.Pos = p.Pos ++ q.From.Type = obj.TYPE_CONST ++ q.From.Offset = int64(autosize) ++ q.To.Type = obj.TYPE_REG ++ q.To.Reg = REGSP ++ q.Spadj = -autosize ++ ++ q.Link = p.Link ++ p.Link = q ++ } ++ ++ q1 = c.newprog() ++ q1.As = AJMP ++ q1.Pos = p.Pos ++ if retSym != nil { // retjmp ++ q1.To.Type = obj.TYPE_BRANCH ++ q1.To.Name = obj.NAME_EXTERN ++ q1.To.Sym = retSym ++ } else { ++ q1.To.Type = obj.TYPE_MEM ++ q1.To.Offset = 0 ++ q1.To.Reg = REGLINK ++ } ++ q1.Mark |= BRANCH ++ q1.Spadj = +autosize ++ ++ q1.Link = q.Link ++ q.Link = q1 ++ ++ case AADD, ++ AADDU, ++ AADDV, ++ AADDVU: ++ if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST { ++ p.Spadj = int32(-p.From.Offset) ++ } ++ ++ case obj.AGETCALLERPC: ++ if cursym.Leaf() { ++ // MOV LR, Rd ++ p.As = mov ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = REGLINK ++ } else { ++ // MOV (RSP), Rd ++ p.As = mov ++ p.From.Type = obj.TYPE_MEM ++ p.From.Reg = REGSP ++ } ++ } ++ } ++} ++ ++func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { ++ var mov, add obj.As ++ ++ add = AADDV ++ mov = AMOVV ++ ++ // MOV g_stackguard(g), R19 ++ p = obj.Appendp(p, c.newprog) ++ ++ p.As = mov ++ p.From.Type = obj.TYPE_MEM ++ p.From.Reg = REGG ++ p.From.Offset = 2 * int64(c.ctxt.Arch.PtrSize) // G.stackguard0 ++ if c.cursym.CFunc() { ++ p.From.Offset = 3 * int64(c.ctxt.Arch.PtrSize) // G.stackguard1 ++ } ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = REG_R19 ++ ++ // Mark the stack bound check and morestack call async nonpreemptible. ++ // If we get preempted here, when resumed the preemption request is ++ // cleared, but we'll still call morestack, which will double the stack ++ // unnecessarily. See issue #35470. ++ p = c.ctxt.StartUnsafePoint(p, c.newprog) ++ ++ var q *obj.Prog ++ if framesize <= objabi.StackSmall { ++ // small stack: SP < stackguard ++ // AGTU SP, stackguard, R19 ++ p = obj.Appendp(p, c.newprog) ++ ++ p.As = ASGTU ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = REGSP ++ p.Reg = REG_R19 ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = REG_R19 ++ } else { ++ // large stack: SP-framesize < stackguard-StackSmall ++ offset := int64(framesize) - objabi.StackSmall ++ if framesize > objabi.StackBig { ++ // Such a large stack we need to protect against underflow. ++ // The runtime guarantees SP > objabi.StackBig, but ++ // framesize is large enough that SP-framesize may ++ // underflow, causing a direct comparison with the ++ // stack guard to incorrectly succeed. We explicitly ++ // guard against underflow. ++ // ++ // SGTU $(framesize-StackSmall), SP, R4 ++ // BNE R4, label-of-call-to-morestack ++ ++ p = obj.Appendp(p, c.newprog) ++ p.As = ASGTU ++ p.From.Type = obj.TYPE_CONST ++ p.From.Offset = offset ++ p.Reg = REGSP ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = REG_R4 ++ ++ p = obj.Appendp(p, c.newprog) ++ q = p ++ p.As = ABNE ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = REG_R4 ++ p.To.Type = obj.TYPE_BRANCH ++ p.Mark |= BRANCH ++ } ++ ++ p = obj.Appendp(p, c.newprog) ++ ++ p.As = add ++ p.From.Type = obj.TYPE_CONST ++ p.From.Offset = -offset ++ p.Reg = REGSP ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = REG_R4 ++ ++ p = obj.Appendp(p, c.newprog) ++ p.As = ASGTU ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = REG_R4 ++ p.Reg = REG_R19 ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = REG_R19 ++ } ++ ++ // q1: BNE R19, done ++ p = obj.Appendp(p, c.newprog) ++ q1 := p ++ ++ p.As = ABNE ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = REG_R19 ++ p.To.Type = obj.TYPE_BRANCH ++ p.Mark |= BRANCH ++ ++ // MOV LINK, R5 ++ p = obj.Appendp(p, c.newprog) ++ ++ p.As = mov ++ p.From.Type = obj.TYPE_REG ++ p.From.Reg = REGLINK ++ p.To.Type = obj.TYPE_REG ++ p.To.Reg = REG_R5 ++ if q != nil { ++ q.To.SetTarget(p) ++ p.Mark |= LABEL ++ } ++ ++ p = c.ctxt.EmitEntryStackMap(c.cursym, p, c.newprog) ++ ++ // JAL runtime.morestack(SB) ++ p = obj.Appendp(p, c.newprog) ++ ++ p.As = AJAL ++ p.To.Type = obj.TYPE_BRANCH ++ if c.cursym.CFunc() { ++ p.To.Sym = c.ctxt.Lookup("runtime.morestackc") ++ } else if !c.cursym.Func().Text.From.Sym.NeedCtxt() { ++ p.To.Sym = c.ctxt.Lookup("runtime.morestack_noctxt") ++ } else { ++ p.To.Sym = c.ctxt.Lookup("runtime.morestack") ++ } ++ p.Mark |= BRANCH ++ ++ p = c.ctxt.EndUnsafePoint(p, c.newprog, -1) ++ ++ // JMP start ++ p = obj.Appendp(p, c.newprog) ++ ++ p.As = AJMP ++ p.To.Type = obj.TYPE_BRANCH ++ p.To.SetTarget(c.cursym.Func().Text.Link) ++ p.Mark |= BRANCH ++ ++ // placeholder for q1's jump target ++ p = obj.Appendp(p, c.newprog) ++ ++ p.As = obj.ANOP // zero-width place holder ++ q1.To.SetTarget(p) ++ ++ return p ++} ++ ++func (c *ctxt0) addnop(p *obj.Prog) { ++ q := c.newprog() ++ q.As = ANOOP ++ q.Pos = p.Pos ++ q.Link = p.Link ++ p.Link = q ++} ++ ++var Linkloong64 = obj.LinkArch{ ++ Arch: sys.ArchLoong64, ++ Init: buildop, ++ Preprocess: preprocess, ++ Assemble: span0, ++ Progedit: progedit, ++ DWARFRegisters: LOONG64DWARFRegisters, ++} +diff --git a/src/cmd/internal/obj/util.go b/src/cmd/internal/obj/util.go +index e8441a6..9f8606e 100644 +--- a/src/cmd/internal/obj/util.go ++++ b/src/cmd/internal/obj/util.go +@@ -499,15 +499,16 @@ var regSpace []regSet + const ( + // Because of masking operations in the encodings, each register + // space should start at 0 modulo some power of 2. +- RBase386 = 1 * 1024 +- RBaseAMD64 = 2 * 1024 +- RBaseARM = 3 * 1024 +- RBasePPC64 = 4 * 1024 // range [4k, 8k) +- RBaseARM64 = 8 * 1024 // range [8k, 13k) +- RBaseMIPS = 13 * 1024 // range [13k, 14k) +- RBaseS390X = 14 * 1024 // range [14k, 15k) +- RBaseRISCV = 15 * 1024 // range [15k, 16k) +- RBaseWasm = 16 * 1024 ++ RBase386 = 1 * 1024 ++ RBaseAMD64 = 2 * 1024 ++ RBaseARM = 3 * 1024 ++ RBasePPC64 = 4 * 1024 // range [4k, 8k) ++ RBaseARM64 = 8 * 1024 // range [8k, 13k) ++ RBaseMIPS = 13 * 1024 // range [13k, 14k) ++ RBaseS390X = 14 * 1024 // range [14k, 15k) ++ RBaseRISCV = 15 * 1024 // range [15k, 16k) ++ RBaseWasm = 16 * 1024 ++ RBaseLOONG64 = 17 * 1024 + ) + + // RegisterRegister binds a pretty-printer (Rconv) for register +-- +2.27.0 + diff --git a/0015-cmd-asm-internal-helper-function-and-end-to-end-test.patch b/0015-cmd-asm-internal-helper-function-and-end-to-end-test.patch new file mode 100644 index 0000000..b2edb75 --- /dev/null +++ b/0015-cmd-asm-internal-helper-function-and-end-to-end-test.patch @@ -0,0 +1,812 @@ +From 81f4e2576955444558421568a553e60c4e539f65 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 15:25:14 +0800 +Subject: [PATCH 15/56] cmd/asm/internal: helper function and end-to-end test + for assembler + +Change-Id: I93d8be36e44e516df70b25e20d9c0695a05510d1 +--- + src/cmd/asm/internal/arch/arch.go | 56 +++++ + src/cmd/asm/internal/arch/loong64.go | 67 ++++++ + src/cmd/asm/internal/asm/asm.go | 18 ++ + src/cmd/asm/internal/asm/endtoend_test.go | 7 + + src/cmd/asm/internal/asm/operand_test.go | 88 ++++++++ + src/cmd/asm/internal/asm/testdata/loong64.s | 11 + + .../asm/internal/asm/testdata/loong64enc1.s | 209 ++++++++++++++++++ + .../asm/internal/asm/testdata/loong64enc2.s | 82 +++++++ + .../asm/internal/asm/testdata/loong64enc3.s | 131 +++++++++++ + 9 files changed, 669 insertions(+) + create mode 100644 src/cmd/asm/internal/arch/loong64.go + create mode 100644 src/cmd/asm/internal/asm/testdata/loong64.s + create mode 100644 src/cmd/asm/internal/asm/testdata/loong64enc1.s + create mode 100644 src/cmd/asm/internal/asm/testdata/loong64enc2.s + create mode 100644 src/cmd/asm/internal/asm/testdata/loong64enc3.s + +diff --git a/src/cmd/asm/internal/arch/arch.go b/src/cmd/asm/internal/arch/arch.go +index 026d8ab..95afa1d 100644 +--- a/src/cmd/asm/internal/arch/arch.go ++++ b/src/cmd/asm/internal/arch/arch.go +@@ -9,6 +9,7 @@ import ( + "cmd/internal/obj" + "cmd/internal/obj/arm" + "cmd/internal/obj/arm64" ++ "cmd/internal/obj/loong64" + "cmd/internal/obj/mips" + "cmd/internal/obj/ppc64" + "cmd/internal/obj/riscv" +@@ -60,6 +61,8 @@ func Set(GOARCH string) *Arch { + return archArm() + case "arm64": + return archArm64() ++ case "loong64": ++ return archLoong64(&loong64.Linkloong64) + case "mips": + return archMips(&mips.Linkmips) + case "mipsle": +@@ -534,6 +537,59 @@ func archMips64(linkArch *obj.LinkArch) *Arch { + } + } + ++func archLoong64(linkArch *obj.LinkArch) *Arch { ++ register := make(map[string]int16) ++ // Create maps for easy lookup of instruction names etc. ++ // Note that there is no list of names as there is for x86. ++ for i := loong64.REG_R0; i <= loong64.REG_R31; i++ { ++ register[obj.Rconv(i)] = int16(i) ++ } ++ for i := loong64.REG_F0; i <= loong64.REG_F31; i++ { ++ register[obj.Rconv(i)] = int16(i) ++ } ++ for i := loong64.REG_FCSR0; i <= loong64.REG_FCSR31; i++ { ++ register[obj.Rconv(i)] = int16(i) ++ } ++ for i := loong64.REG_FCC0; i <= loong64.REG_FCC31; i++ { ++ register[obj.Rconv(i)] = int16(i) ++ } ++ // Pseudo-registers. ++ register["SB"] = RSB ++ register["FP"] = RFP ++ register["PC"] = RPC ++ // Avoid unintentionally clobbering g using R22. ++ delete(register, "R22") ++ register["g"] = loong64.REG_R22 ++ register["RSB"] = loong64.REG_R31 ++ registerPrefix := map[string]bool{ ++ "F": true, ++ "FCSR": true, ++ "FCC": true, ++ "R": true, ++ } ++ ++ instructions := make(map[string]obj.As) ++ for i, s := range obj.Anames { ++ instructions[s] = obj.As(i) ++ } ++ for i, s := range loong64.Anames { ++ if obj.As(i) >= obj.A_ARCHSPECIFIC { ++ instructions[s] = obj.As(i) + obj.ABaseLOONG64 ++ } ++ } ++ // Annoying alias. ++ instructions["JAL"] = loong64.AJAL ++ ++ return &Arch{ ++ LinkArch: linkArch, ++ Instructions: instructions, ++ Register: register, ++ RegisterPrefix: registerPrefix, ++ RegisterNumber: loong64RegisterNumber, ++ IsJump: jumpLOONG64, ++ } ++} ++ + func archRISCV64() *Arch { + register := make(map[string]int16) + +diff --git a/src/cmd/asm/internal/arch/loong64.go b/src/cmd/asm/internal/arch/loong64.go +new file mode 100644 +index 0000000..e1fae1f +--- /dev/null ++++ b/src/cmd/asm/internal/arch/loong64.go +@@ -0,0 +1,67 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// This file encapsulates some of the odd characteristics of the ++// LOONG64 (LOONG64) instruction set, to minimize its interaction ++// with the core of the assembler. ++ ++package arch ++ ++import ( ++ "cmd/internal/obj" ++ "cmd/internal/obj/loong64" ++) ++ ++func jumpLOONG64(word string) bool { ++ switch word { ++ case "BEQ", "BFPF", "BFPT", "BLTZ", "BGEZ", "BLEZ", "BGTZ", "BLT", "BLTU", "JIRL", "BNE", "BGE", "BGEU", "JMP", "JAL", "CALL": ++ return true ++ } ++ return false ++} ++ ++// IsLOONG64CMP reports whether the op (as defined by an loong64.A* constant) is ++// one of the CMP instructions that require special handling. ++func IsLOONG64CMP(op obj.As) bool { ++ switch op { ++ case loong64.ACMPEQF, loong64.ACMPEQD, loong64.ACMPGEF, loong64.ACMPGED, ++ loong64.ACMPGTF, loong64.ACMPGTD: ++ return true ++ } ++ return false ++} ++ ++// IsLOONG64MUL reports whether the op (as defined by an loong64.A* constant) is ++// one of the MUL/DIV/REM instructions that require special handling. ++func IsLOONG64MUL(op obj.As) bool { ++ switch op { ++ case loong64.AMUL, loong64.AMULU, loong64.AMULV, loong64.AMULVU, ++ loong64.ADIV, loong64.ADIVU, loong64.ADIVV, loong64.ADIVVU, ++ loong64.AREM, loong64.AREMU, loong64.AREMV, loong64.AREMVU: ++ return true ++ } ++ return false ++} ++ ++func loong64RegisterNumber(name string, n int16) (int16, bool) { ++ switch name { ++ case "F": ++ if 0 <= n && n <= 31 { ++ return loong64.REG_F0 + n, true ++ } ++ case "FCSR": ++ if 0 <= n && n <= 31 { ++ return loong64.REG_FCSR0 + n, true ++ } ++ case "FCC": ++ if 0 <= n && n <= 31 { ++ return loong64.REG_FCC0 + n, true ++ } ++ case "R": ++ if 0 <= n && n <= 31 { ++ return loong64.REG_R0 + n, true ++ } ++ } ++ return 0, false ++} +diff --git a/src/cmd/asm/internal/asm/asm.go b/src/cmd/asm/internal/asm/asm.go +index cf0d155..1397fa8 100644 +--- a/src/cmd/asm/internal/asm/asm.go ++++ b/src/cmd/asm/internal/asm/asm.go +@@ -433,6 +433,14 @@ func (p *Parser) asmJump(op obj.As, cond string, a []obj.Addr) { + prog.Reg = p.getRegister(prog, op, &a[1]) + break + } ++ if p.arch.Family == sys.Loong64 { ++ // 3-operand jumps. ++ // First two must be registers ++ target = &a[2] ++ prog.From = a[0] ++ prog.Reg = p.getRegister(prog, op, &a[1]) ++ break ++ } + if p.arch.Family == sys.S390X { + // 3-operand jumps. + target = &a[2] +@@ -593,6 +601,12 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { + prog.Reg = p.getRegister(prog, op, &a[1]) + break + } ++ } else if p.arch.Family == sys.Loong64 { ++ if arch.IsLOONG64CMP(op) { ++ prog.From = a[0] ++ prog.Reg = p.getRegister(prog, op, &a[1]) ++ break ++ } + } + prog.From = a[0] + prog.To = a[1] +@@ -602,6 +616,10 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { + prog.From = a[0] + prog.Reg = p.getRegister(prog, op, &a[1]) + prog.To = a[2] ++ case sys.Loong64: ++ prog.From = a[0] ++ prog.Reg = p.getRegister(prog, op, &a[1]) ++ prog.To = a[2] + case sys.ARM: + // Special cases. + if arch.IsARMSTREX(op) { +diff --git a/src/cmd/asm/internal/asm/endtoend_test.go b/src/cmd/asm/internal/asm/endtoend_test.go +index ead8b27..33a4465 100644 +--- a/src/cmd/asm/internal/asm/endtoend_test.go ++++ b/src/cmd/asm/internal/asm/endtoend_test.go +@@ -447,6 +447,13 @@ func TestMIPSEndToEnd(t *testing.T) { + testEndToEnd(t, "mips64", "mips64") + } + ++func TestLOONG64Encoder(t *testing.T) { ++ testEndToEnd(t, "loong64", "loong64enc1") ++ testEndToEnd(t, "loong64", "loong64enc2") ++ testEndToEnd(t, "loong64", "loong64enc3") ++ testEndToEnd(t, "loong64", "loong64") ++} ++ + func TestPPC64EndToEnd(t *testing.T) { + testEndToEnd(t, "ppc64", "ppc64") + } +diff --git a/src/cmd/asm/internal/asm/operand_test.go b/src/cmd/asm/internal/asm/operand_test.go +index 8ef02b1..f8c9571 100644 +--- a/src/cmd/asm/internal/asm/operand_test.go ++++ b/src/cmd/asm/internal/asm/operand_test.go +@@ -125,6 +125,11 @@ func TestMIPS64OperandParser(t *testing.T) { + testOperandParser(t, parser, mips64OperandTests) + } + ++func TestLOONG64OperandParser(t *testing.T) { ++ parser := newParser("loong64") ++ testOperandParser(t, parser, loong64OperandTests) ++} ++ + func TestS390XOperandParser(t *testing.T) { + parser := newParser("s390x") + testOperandParser(t, parser, s390xOperandTests) +@@ -143,6 +148,7 @@ func TestFuncAddress(t *testing.T) { + {"ppc64", ppc64OperandTests}, + {"mips", mipsOperandTests}, + {"mips64", mips64OperandTests}, ++ {"loong64", loong64OperandTests}, + {"s390x", s390xOperandTests}, + } { + t.Run(sub.arch, func(t *testing.T) { +@@ -845,6 +851,88 @@ var mipsOperandTests = []operandTest{ + {"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms. + } + ++var loong64OperandTests = []operandTest{ ++ {"$((1<<63)-1)", "$9223372036854775807"}, ++ {"$(-64*1024)", "$-65536"}, ++ {"$(1024 * 8)", "$8192"}, ++ {"$-1", "$-1"}, ++ {"$-24(R4)", "$-24(R4)"}, ++ {"$0", "$0"}, ++ {"$0(R1)", "$(R1)"}, ++ {"$0.5", "$(0.5)"}, ++ {"$0x7000", "$28672"}, ++ {"$0x88888eef", "$2290650863"}, ++ {"$1", "$1"}, ++ {"$_main<>(SB)", "$_main<>(SB)"}, ++ {"$argframe(FP)", "$argframe(FP)"}, ++ {"$~3", "$-4"}, ++ {"(-288-3*8)(R1)", "-312(R1)"}, ++ {"(16)(R7)", "16(R7)"}, ++ {"(8)(g)", "8(g)"}, ++ {"(R0)", "(R0)"}, ++ {"(R3)", "(R3)"}, ++ {"(R4)", "(R4)"}, ++ {"(R5)", "(R5)"}, ++ {"-1(R4)", "-1(R4)"}, ++ {"-1(R5)", "-1(R5)"}, ++ {"6(PC)", "6(PC)"}, ++ {"F14", "F14"}, ++ {"F15", "F15"}, ++ {"F16", "F16"}, ++ {"F17", "F17"}, ++ {"F18", "F18"}, ++ {"F19", "F19"}, ++ {"F20", "F20"}, ++ {"F21", "F21"}, ++ {"F22", "F22"}, ++ {"F23", "F23"}, ++ {"F24", "F24"}, ++ {"F25", "F25"}, ++ {"F26", "F26"}, ++ {"F27", "F27"}, ++ {"F28", "F28"}, ++ {"F29", "F29"}, ++ {"F30", "F30"}, ++ {"F31", "F31"}, ++ {"R0", "R0"}, ++ {"R1", "R1"}, ++ {"R11", "R11"}, ++ {"R12", "R12"}, ++ {"R13", "R13"}, ++ {"R14", "R14"}, ++ {"R15", "R15"}, ++ {"R16", "R16"}, ++ {"R17", "R17"}, ++ {"R18", "R18"}, ++ {"R19", "R19"}, ++ {"R2", "R2"}, ++ {"R20", "R20"}, ++ {"R21", "R21"}, ++ {"R23", "R23"}, ++ {"R24", "R24"}, ++ {"R25", "R25"}, ++ {"R26", "R26"}, ++ {"R27", "R27"}, ++ {"R28", "R28"}, ++ {"R29", "R29"}, ++ {"R30", "R30"}, ++ {"R3", "R3"}, ++ {"R4", "R4"}, ++ {"R5", "R5"}, ++ {"R6", "R6"}, ++ {"R7", "R7"}, ++ {"R8", "R8"}, ++ {"R9", "R9"}, ++ {"a(FP)", "a(FP)"}, ++ {"g", "g"}, ++ {"RSB", "R31"}, ++ {"ret+8(FP)", "ret+8(FP)"}, ++ {"runtime·abort(SB)", "runtime.abort(SB)"}, ++ {"·AddUint32(SB)", "\"\".AddUint32(SB)"}, ++ {"·trunc(SB)", "\"\".trunc(SB)"}, ++ {"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms. ++} ++ + var s390xOperandTests = []operandTest{ + {"$((1<<63)-1)", "$9223372036854775807"}, + {"$(-64*1024)", "$-65536"}, +diff --git a/src/cmd/asm/internal/asm/testdata/loong64.s b/src/cmd/asm/internal/asm/testdata/loong64.s +new file mode 100644 +index 0000000..a4bf9ec +--- /dev/null ++++ b/src/cmd/asm/internal/asm/testdata/loong64.s +@@ -0,0 +1,11 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++#include "../../../../../runtime/textflag.h" ++//TODO ++ ++TEXT foo(SB),DUPOK|NOSPLIT,$0 ++ JAL 1(PC) //CALL 1(PC) //000c0054 ++ JAL (R4) //CALL (R4) //8100004c ++ JAL foo(SB) //CALL foo(SB) //00100054 +diff --git a/src/cmd/asm/internal/asm/testdata/loong64enc1.s b/src/cmd/asm/internal/asm/testdata/loong64enc1.s +new file mode 100644 +index 0000000..c724cf9 +--- /dev/null ++++ b/src/cmd/asm/internal/asm/testdata/loong64enc1.s +@@ -0,0 +1,209 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++#include "../../../../../runtime/textflag.h" ++ ++TEXT asmtest(SB),DUPOK|NOSPLIT,$0 ++lable1: ++ BFPT 1(PC) // 00050048 ++ BFPT lable1 // BFPT 2 //1ffdff4b ++ ++lable2: ++ BFPF 1(PC) // 00040048 ++ BFPF lable2 // BFPF 4 // 1ffcff4b ++ ++ JMP foo(SB) // 00100050 ++ JMP (R4) // 8000004c ++ JMP 1(PC) // 00040058 ++ MOVW $65536, R4 // 04020014 ++ MOVW $4096, R4 // 24000014 ++ MOVV $65536, R4 // 04020014 ++ MOVV $4096, R4 // 24000014 ++ MOVW R4, R5 // 85001700 ++ MOVV R4, R5 // 85001500 ++ MOVBU R4, R5 // 85fc4303 ++ SUB R4, R5, R6 // a6101100 ++ SUBV R4, R5, R6 // a6901100 ++ ADD R4, R5, R6 // a6101000 ++ ADDV R4, R5, R6 // a6901000 ++ AND R4, R5, R6 // a6901400 ++ SUB R4, R5 // a5101100 ++ SUBV R4, R5 // a5901100 ++ ADD R4, R5 // a5101000 ++ ADDV R4, R5 // a5901000 ++ AND R4, R5 // a5901400 ++ NEGW R4, R5 // 05101100 ++ NEGV R4, R5 // 05901100 ++ SLL R4, R5 // a5101700 ++ SLL R4, R5, R6 // a6101700 ++ SRL R4, R5 // a5901700 ++ SRL R4, R5, R6 // a6901700 ++ SRA R4, R5 // a5101800 ++ SRA R4, R5, R6 // a6101800 ++ SLLV R4, R5 // a5901800 ++ SLLV R4, R5, R6 // a6901800 ++ CLO R4, R5 // 85100000 ++ CLZ R4, R5 // 85140000 ++ ADDF F4, F5 // a5900001 ++ ADDF F4, R5, F6 // a6900001 ++ CMPEQF F4, R5 // a010120c ++ ABSF F4, F5 // 85041401 ++ MOVVF F4, F5 // 85181d01 ++ MOVF F4, F5 // 85941401 ++ MOVD F4, F5 // 85981401 ++ MOVW R4, result+16(FP) // 64608029 ++ MOVWU R4, result+16(FP) // 64608029 ++ MOVV R4, result+16(FP) // 6460c029 ++ MOVB R4, result+16(FP) // 64600029 ++ MOVBU R4, result+16(FP) // 64600029 ++ MOVWL R4, result+16(FP) // 6460002f ++ MOVVL R4, result+16(FP) // 6460802f ++ MOVW R4, 1(R5) // a4048029 ++ MOVWU R4, 1(R5) // a4048029 ++ MOVV R4, 1(R5) // a404c029 ++ MOVB R4, 1(R5) // a4040029 ++ MOVBU R4, 1(R5) // a4040029 ++ MOVWL R4, 1(R5) // a404002f ++ MOVVL R4, 1(R5) // a404802f ++ SC R4, 1(R5) // a4040021 ++ SCV R4, 1(R5) // a4040023 ++ MOVW y+8(FP), R4 // 64408028 ++ MOVWU y+8(FP), R4 // 6440802a ++ MOVV y+8(FP), R4 // 6440c028 ++ MOVB y+8(FP), R4 // 64400028 ++ MOVBU y+8(FP), R4 // 6440002a ++ MOVWL y+8(FP), R4 // 6440002e ++ MOVVL y+8(FP), R4 // 6440802e ++ MOVW 1(R5), R4 // a4048028 ++ MOVWU 1(R5), R4 // a404802a ++ MOVV 1(R5), R4 // a404c028 ++ MOVB 1(R5), R4 // a4040028 ++ MOVBU 1(R5), R4 // a404002a ++ MOVWL 1(R5), R4 // a404002e ++ MOVVL 1(R5), R4 // a404802e ++ LL 1(R5), R4 // a4040020 ++ LLV 1(R5), R4 // a4040022 ++ MOVW $4(R4), R5 // 8510c002 ++ MOVV $4(R4), R5 // 8510c002 ++ MOVW $-1, R4 // 04fcff02 ++ MOVV $-1, R4 // 04fcff02 ++ MOVW $1, R4 // 0404c002 ++ MOVV $1, R4 // 0404c002 ++ ADD $-1, R4, R5 // 85fcbf02 ++ ADD $-1, R4 // 84fcbf02 ++ ADDV $-1, R4, R5 // 85fcff02 ++ ADDV $-1, R4 // 84fcff02 ++ AND $1, R4, R5 // 85044003 ++ AND $1, R4 // 84044003 ++ SLL $4, R4, R5 // 85904000 ++ SLL $4, R4 // 84904000 ++ SRL $4, R4, R5 // 85904400 ++ SRL $4, R4 // 84904400 ++ SRA $4, R4, R5 // 85904800 ++ SRA $4, R4 // 84904800 ++ SLLV $4, R4, R5 // 85104100 ++ SLLV $4, R4 // 84104100 ++ SYSCALL // 00002b00 ++ BEQ R4, R5, 1(PC) // 85040058 ++ BEQ R4, 1(PC) // 80040058 ++ BLTU R4, 1(PC) // 80040068 ++ MOVW y+8(FP), F4 // 6440002b ++ MOVF y+8(FP), F4 // 6440002b ++ MOVD y+8(FP), F4 // 6440802b ++ MOVW 1(F5), F4 // a404002b ++ MOVF 1(F5), F4 // a404002b ++ MOVD 1(F5), F4 // a404802b ++ MOVW F4, result+16(FP) // 6460402b ++ MOVF F4, result+16(FP) // 6460402b ++ MOVD F4, result+16(FP) // 6460c02b ++ MOVW F4, 1(F5) // a404402b ++ MOVF F4, 1(F5) // a404402b ++ MOVD F4, 1(F5) // a404c02b ++ MOVW R4, F5 // 85a41401 ++ MOVW F4, R5 // 85b41401 ++ MOVV R4, F5 // 85a81401 ++ MOVV F4, R5 // 85b81401 ++ WORD $74565 // 45230100 ++ BREAK R4, result+16(FP) // 64600006 ++ BREAK R4, 1(R5) // a4040006 ++ BREAK // 00002a00 ++ UNDEF // 00002a00 ++ ++ // mul ++ MUL R4, R5 // a5101c00 ++ MUL R4, R5, R6 // a6101c00 ++ MULV R4, R5 // a5901d00 ++ MULV R4, R5, R6 // a6901d00 ++ MULVU R4, R5 // a5901d00 ++ MULVU R4, R5, R6 // a6901d00 ++ MULHV R4, R5 // a5101e00 ++ MULHV R4, R5, R6 // a6101e00 ++ MULHVU R4, R5 // a5901e00 ++ MULHVU R4, R5, R6 // a6901e00 ++ REMV R4, R5 // a5902200 ++ REMV R4, R5, R6 // a6902200 ++ REMVU R4, R5 // a5902300 ++ REMVU R4, R5, R6 // a6902300 ++ DIVV R4, R5 // a5102200 ++ DIVV R4, R5, R6 // a6102200 ++ DIVVU R4, R5 // a5102300 ++ DIVVU R4, R5, R6 // a6102300 ++ ++ MOVH R4, result+16(FP) // 64604029 ++ MOVH R4, 1(R5) // a4044029 ++ MOVH y+8(FP), R4 // 64404028 ++ MOVH 1(R5), R4 // a4044028 ++ MOVHU R4, R5 // 8500cf00 ++ MOVHU R4, result+16(FP) // 64604029 ++ MOVHU R4, 1(R5) // a4044029 ++ MOVHU y+8(FP), R4 // 6440402a ++ MOVHU 1(R5), R4 // a404402a ++ MULU R4, R5 // a5101c00 ++ MULU R4, R5, R6 // a6101c00 ++ MULH R4, R5 // a5901c00 ++ MULH R4, R5, R6 // a6901c00 ++ MULHU R4, R5 // a5101d00 ++ MULHU R4, R5, R6 // a6101d00 ++ REM R4, R5 // a5902000 ++ REM R4, R5, R6 // a6902000 ++ REMU R4, R5 // a5902100 ++ REMU R4, R5, R6 // a6902100 ++ DIV R4, R5 // a5102000 ++ DIV R4, R5, R6 // a6102000 ++ DIVU R4, R5 // a5102100 ++ DIVU R4, R5, R6 // a6102100 ++ SRLV R4, R5 // a5101900 ++ SRLV R4, R5, R6 // a6101900 ++ SRLV $4, R4, R5 // 85104500 ++ SRLV $4, R4 // 84104500 ++ SRLV $32, R4, R5 // 85804500 ++ SRLV $32, R4 // 84804500 ++ ++ MOVFD F4, F5 // 85241901 ++ MOVDF F4, F5 // 85181901 ++ MOVWF F4, F5 // 85101d01 ++ MOVFW F4, F5 // 85041b01 ++ MOVWD F4, F5 // 85201d01 ++ MOVDW F4, F5 // 85081b01 ++ NEGF F4, F5 // 85141401 ++ NEGD F4, F5 // 85181401 ++ ABSD F4, F5 // 85081401 ++ TRUNCDW F4, F5 // 85881a01 ++ TRUNCFW F4, F5 // 85841a01 ++ SQRTF F4, F5 // 85441401 ++ SQRTD F4, F5 // 85481401 ++ ++ DBAR // 00007238 ++ NOOP // 00004003 ++ ++ MOVWR R4, result+16(FP) // 6460402f ++ MOVWR R4, 1(R5) // a404402f ++ MOVWR y+8(FP), R4 // 6440402e ++ MOVWR 1(R5), R4 // a404402e ++ ++ CMPGTF F4, R5 // a090110c ++ CMPGTD F4, R5 // a090210c ++ CMPGEF F4, R5 // a090130c ++ CMPGED F4, R5 // a090230c ++ CMPEQD F4, R5 // a010220c +diff --git a/src/cmd/asm/internal/asm/testdata/loong64enc2.s b/src/cmd/asm/internal/asm/testdata/loong64enc2.s +new file mode 100644 +index 0000000..675b263 +--- /dev/null ++++ b/src/cmd/asm/internal/asm/testdata/loong64enc2.s +@@ -0,0 +1,82 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++#include "../../../../../runtime/textflag.h" ++ ++TEXT asmtest(SB),DUPOK|NOSPLIT,$0 ++ MOVB R4, R5 // 85e04000a5e04800 ++ MOVWU R4, R5 // 85804100a5804500 ++ MOVW $74565, R4 // 4402001484148d03 ++ MOVW $4097, R4 // 2400001484048003 ++ MOVV $74565, R4 // 4402001484148d03 ++ MOVV $4097, R4 // 2400001484048003 ++ AND $-1, R4, R5 // 1efcbf0285f81400 ++ AND $-1, R4 // 1efcbf0284f81400 ++ MOVW $-1, F4 // 1efcbf02c4a71401 ++ MOVW $1, F4 // 1e048002c4a71401 ++ TEQ $4, R4, R5 // 8508005c04002a00 ++ TEQ $4, R4 // 0408005c04002a00 ++ TNE $4, R4, R5 // 8508005804002a00 ++ TNE $4, R4 // 0408005804002a00 ++ ADD $65536, R4, R5 // 1e02001485781000 ++ ADD $4096, R4, R5 // 3e00001485781000 ++ ADD $65536, R4 // 1e02001484781000 ++ ADD $4096, R4 // 3e00001484781000 ++ ADDV $65536, R4, R5 // 1e02001485f81000 ++ ADDV $4096, R4, R5 // 3e00001485f81000 ++ ADDV $65536, R4 // 1e02001484f81000 ++ ADDV $4096, R4 // 3e00001484f81000 ++ AND $65536, R4, R5 // 1e02001485f81400 ++ AND $4096, R4, R5 // 3e00001485f81400 ++ AND $65536, R4 // 1e02001484f81400 ++ AND $4096, R4 // 3e00001484f81400 ++ SGT $65536, R4, R5 // 1e02001485781200 ++ SGT $4096, R4, R5 // 3e00001485781200 ++ SGT $65536, R4 // 1e02001484781200 ++ SGT $4096, R4 // 3e00001484781200 ++ SGTU $65536, R4, R5 // 1e02001485f81200 ++ SGTU $4096, R4, R5 // 3e00001485f81200 ++ SGTU $65536, R4 // 1e02001484f81200 ++ SGTU $4096, R4 // 3e00001484f81200 ++ ADDU $65536, R4, R5 // 1e02001485781000 ++ ADDU $4096, R4, R5 // 3e00001485781000 ++ ADDU $65536, R4 // 1e02001484781000 ++ ADDU $4096, R4 // 3e00001484781000 ++ ADDVU $65536, R4, R5 // 1e02001485f81000 ++ ADDVU $4096, R4, R5 // 3e00001485f81000 ++ ADDVU $65536, R4 // 1e02001484f81000 ++ ADDVU $4096, R4 // 3e00001484f81000 ++ OR $65536, R4, R5 // 1e02001485781500 ++ OR $4096, R4, R5 // 3e00001485781500 ++ OR $65536, R4 // 1e02001484781500 ++ OR $4096, R4 // 3e00001484781500 ++ OR $-1, R4, R5 // 1efcbf0285781500 ++ OR $-1, R4 // 1efcbf0284781500 ++ XOR $65536, R4, R5 // 1e02001485f81500 ++ XOR $4096, R4, R5 // 3e00001485f81500 ++ XOR $65536, R4 // 1e02001484f81500 ++ XOR $4096, R4 // 3e00001484f81500 ++ XOR $-1, R4, R5 // 1efcbf0285f81500 ++ XOR $-1, R4 // 1efcbf0284f81500 ++ MOVH R4, R5 // 85c04000a5c04800 ++ ++ // relocation instructions ++ MOVW R4, name(SB) // 1e00001cc4038029 ++ MOVWU R4, name(SB) // 1e00001cc4038029 ++ MOVV R4, name(SB) // 1e00001cc403c029 ++ MOVB R4, name(SB) // 1e00001cc4030029 ++ MOVBU R4, name(SB) // 1e00001cc4030029 ++ MOVF F4, name(SB) // 1e00001cc403402b ++ MOVD F4, name(SB) // 1e00001cc403c02b ++ MOVW name(SB), R4 // 1e00001cc4038028 ++ MOVWU name(SB), R4 // 1e00001cc403802a ++ MOVV name(SB), R4 // 1e00001cc403c028 ++ MOVB name(SB), R4 // 1e00001cc4030028 ++ MOVBU name(SB), R4 // 1e00001cc403002a ++ MOVF name(SB), F4 // 1e00001cc403002b ++ MOVD name(SB), F4 // 1e00001cc403802b ++ MOVH R4, name(SB) // 1e00001cc4034029 ++ MOVH name(SB), R4 // 1e00001cc4034028 ++ MOVHU R4, name(SB) // 1e00001cc4034029 ++ MOVHU name(SB), R4 // 1e00001cc403402a +diff --git a/src/cmd/asm/internal/asm/testdata/loong64enc3.s b/src/cmd/asm/internal/asm/testdata/loong64enc3.s +new file mode 100644 +index 0000000..fd6d9fe +--- /dev/null ++++ b/src/cmd/asm/internal/asm/testdata/loong64enc3.s +@@ -0,0 +1,131 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++#include "../../../../../runtime/textflag.h" ++ ++TEXT asmtest(SB),DUPOK|NOSPLIT,$0 ++ MOVW $65536(R4), R5 // 1e020014de03800385f81000 ++ MOVW $4096(R4), R5 // 3e000014de03800385f81000 ++ MOVV $65536(R4), R5 // 1e020014de03800385f81000 ++ MOVV $4096(R4), R5 // 3e000014de03800385f81000 ++ ADD $74565, R4 // 5e020014de178d0384781000 ++ ADD $4097, R4 // 3e000014de07800384781000 ++ ADDV $74565, R4 // 5e020014de178d0384f81000 ++ ADDV $4097, R4 // 3e000014de07800384f81000 ++ AND $74565, R4 // 5e020014de178d0384f81400 ++ AND $4097, R4 // 3e000014de07800384f81400 ++ ADD $74565, R4, R5 // 5e020014de178d0385781000 ++ ADD $4097, R4, R5 // 3e000014de07800385781000 ++ ADDV $74565, R4, R5 // 5e020014de178d0385f81000 ++ ADDV $4097, R4, R5 // 3e000014de07800385f81000 ++ AND $74565, R4, R5 // 5e020014de178d0385f81400 ++ AND $4097, R4, R5 // 3e000014de07800385f81400 ++ ++ MOVW R4, result+65540(FP) // 1e020014de8f1000c4338029 ++ MOVW R4, result+4097(FP) // 3e000014de8f1000c4278029 ++ MOVWU R4, result+65540(FP) // 1e020014de8f1000c4338029 ++ MOVWU R4, result+4097(FP) // 3e000014de8f1000c4278029 ++ MOVV R4, result+65540(FP) // 1e020014de8f1000c433c029 ++ MOVV R4, result+4097(FP) // 3e000014de8f1000c427c029 ++ MOVB R4, result+65540(FP) // 1e020014de8f1000c4330029 ++ MOVB R4, result+4097(FP) // 3e000014de8f1000c4270029 ++ MOVBU R4, result+65540(FP) // 1e020014de8f1000c4330029 ++ MOVBU R4, result+4097(FP) // 3e000014de8f1000c4270029 ++ MOVW R4, 65536(R5) // 1e020014de971000c4038029 ++ MOVW R4, 4096(R5) // 3e000014de971000c4038029 ++ MOVWU R4, 65536(R5) // 1e020014de971000c4038029 ++ MOVWU R4, 4096(R5) // 3e000014de971000c4038029 ++ MOVV R4, 65536(R5) // 1e020014de971000c403c029 ++ MOVV R4, 4096(R5) // 3e000014de971000c403c029 ++ MOVB R4, 65536(R5) // 1e020014de971000c4030029 ++ MOVB R4, 4096(R5) // 3e000014de971000c4030029 ++ MOVBU R4, 65536(R5) // 1e020014de971000c4030029 ++ MOVBU R4, 4096(R5) // 3e000014de971000c4030029 ++ SC R4, 65536(R5) // 1e020014de971000c4030021 ++ SC R4, 4096(R5) // 3e000014de971000c4030021 ++ MOVW y+65540(FP), R4 // 1e020014de8f1000c4338028 ++ MOVWU y+65540(FP), R4 // 1e020014de8f1000c433802a ++ MOVV y+65540(FP), R4 // 1e020014de8f1000c433c028 ++ MOVB y+65540(FP), R4 // 1e020014de8f1000c4330028 ++ MOVBU y+65540(FP), R4 // 1e020014de8f1000c433002a ++ MOVW y+4097(FP), R4 // 3e000014de8f1000c4278028 ++ MOVWU y+4097(FP), R4 // 3e000014de8f1000c427802a ++ MOVV y+4097(FP), R4 // 3e000014de8f1000c427c028 ++ MOVB y+4097(FP), R4 // 3e000014de8f1000c4270028 ++ MOVBU y+4097(FP), R4 // 3e000014de8f1000c427002a ++ MOVW 65536(R5), R4 // 1e020014de971000c4038028 ++ MOVWU 65536(R5), R4 // 1e020014de971000c403802a ++ MOVV 65536(R5), R4 // 1e020014de971000c403c028 ++ MOVB 65536(R5), R4 // 1e020014de971000c4030028 ++ MOVBU 65536(R5), R4 // 1e020014de971000c403002a ++ MOVW 4096(R5), R4 // 3e000014de971000c4038028 ++ MOVWU 4096(R5), R4 // 3e000014de971000c403802a ++ MOVV 4096(R5), R4 // 3e000014de971000c403c028 ++ MOVB 4096(R5), R4 // 3e000014de971000c4030028 ++ MOVBU 4096(R5), R4 // 3e000014de971000c403002a ++ MOVW y+65540(FP), F4 // 1e020014de8f1000c433002b ++ MOVF y+65540(FP), F4 // 1e020014de8f1000c433002b ++ MOVD y+65540(FP), F4 // 1e020014de8f1000c433802b ++ MOVW y+4097(FP), F4 // 3e000014de8f1000c427002b ++ MOVF y+4097(FP), F4 // 3e000014de8f1000c427002b ++ MOVD y+4097(FP), F4 // 3e000014de8f1000c427802b ++ MOVW 65536(R5), F4 // 1e020014de971000c403002b ++ MOVF 65536(R5), F4 // 1e020014de971000c403002b ++ MOVD 65536(R5), F4 // 1e020014de971000c403802b ++ MOVW 4096(R5), F4 // 3e000014de971000c403002b ++ MOVF 4096(R5), F4 // 3e000014de971000c403002b ++ MOVD 4096(R5), F4 // 3e000014de971000c403802b ++ MOVW F4, result+65540(FP) // 1e020014de8f1000c433402b ++ MOVF F4, result+65540(FP) // 1e020014de8f1000c433402b ++ MOVD F4, result+65540(FP) // 1e020014de8f1000c433c02b ++ MOVW F4, result+4097(FP) // 3e000014de8f1000c427402b ++ MOVF F4, result+4097(FP) // 3e000014de8f1000c427402b ++ MOVD F4, result+4097(FP) // 3e000014de8f1000c427c02b ++ MOVW F4, 65536(R5) // 1e020014de971000c403402b ++ MOVF F4, 65536(R5) // 1e020014de971000c403402b ++ MOVD F4, 65536(R5) // 1e020014de971000c403c02b ++ MOVW F4, 4096(R5) // 3e000014de971000c403402b ++ MOVF F4, 4096(R5) // 3e000014de971000c403402b ++ MOVD F4, 4096(R5) // 3e000014de971000c403c02b ++ ++ MOVH R4, result+65540(FP) // 1e020014de8f1000c4334029 ++ MOVH R4, 65536(R5) // 1e020014de971000c4034029 ++ MOVH y+65540(FP), R4 // 1e020014de8f1000c4334028 ++ MOVH 65536(R5), R4 // 1e020014de971000c4034028 ++ MOVH R4, result+4097(FP) // 3e000014de8f1000c4274029 ++ MOVH R4, 4096(R5) // 3e000014de971000c4034029 ++ MOVH y+4097(FP), R4 // 3e000014de8f1000c4274028 ++ MOVH 4096(R5), R4 // 3e000014de971000c4034028 ++ MOVHU R4, result+65540(FP) // 1e020014de8f1000c4334029 ++ MOVHU R4, 65536(R5) // 1e020014de971000c4034029 ++ MOVHU y+65540(FP), R4 // 1e020014de8f1000c433402a ++ MOVHU 65536(R5), R4 // 1e020014de971000c403402a ++ MOVHU R4, result+4097(FP) // 3e000014de8f1000c4274029 ++ MOVHU R4, 4096(R5) // 3e000014de971000c4034029 ++ MOVHU y+4097(FP), R4 // 3e000014de8f1000c427402a ++ MOVHU 4096(R5), R4 // 3e000014de971000c403402a ++ SGT $74565, R4 // 5e020014de178d0384781200 ++ SGT $74565, R4, R5 // 5e020014de178d0385781200 ++ SGT $4097, R4 // 3e000014de07800384781200 ++ SGT $4097, R4, R5 // 3e000014de07800385781200 ++ SGTU $74565, R4 // 5e020014de178d0384f81200 ++ SGTU $74565, R4, R5 // 5e020014de178d0385f81200 ++ SGTU $4097, R4 // 3e000014de07800384f81200 ++ SGTU $4097, R4, R5 // 3e000014de07800385f81200 ++ ADDU $74565, R4 // 5e020014de178d0384781000 ++ ADDU $74565, R4, R5 // 5e020014de178d0385781000 ++ ADDU $4097, R4 // 3e000014de07800384781000 ++ ADDU $4097, R4, R5 // 3e000014de07800385781000 ++ ADDVU $4097, R4 // 3e000014de07800384f81000 ++ ADDVU $4097, R4, R5 // 3e000014de07800385f81000 ++ ADDVU $74565, R4 // 5e020014de178d0384f81000 ++ ADDVU $74565, R4, R5 // 5e020014de178d0385f81000 ++ OR $74565, R4 // 5e020014de178d0384781500 ++ OR $74565, R4, R5 // 5e020014de178d0385781500 ++ OR $4097, R4 // 3e000014de07800384781500 ++ OR $4097, R4, R5 // 3e000014de07800385781500 ++ XOR $74565, R4 // 5e020014de178d0384f81500 ++ XOR $74565, R4, R5 // 5e020014de178d0385f81500 ++ XOR $4097, R4 // 3e000014de07800384f81500 ++ XOR $4097, R4, R5 // 3e000014de07800385f81500 +-- +2.27.0 + diff --git a/0016-cmd-internal-objabi-cmd-link-support-linker-for-linu.patch b/0016-cmd-internal-objabi-cmd-link-support-linker-for-linu.patch new file mode 100644 index 0000000..b69725d --- /dev/null +++ b/0016-cmd-internal-objabi-cmd-link-support-linker-for-linu.patch @@ -0,0 +1,750 @@ +From 29f08a9ca2354e051b0c9bae4aa5e8e31fcd53bf Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 16:25:46 +0800 +Subject: [PATCH 16/56] cmd/internal/objabi,cmd/link: support linker for + linux/loong64 + +Change-Id: I4680eb0635dd0fa3d6ea8348a2488da9c7e33d3b +--- + src/cmd/internal/objabi/reloctype.go | 22 +- + src/cmd/internal/objabi/reloctype_string.go | 124 +++++----- + src/cmd/link/internal/ld/config.go | 2 +- + src/cmd/link/internal/ld/elf.go | 9 +- + src/cmd/link/internal/ld/lib.go | 2 + + src/cmd/link/internal/ld/pcln.go | 2 +- + src/cmd/link/internal/ld/target.go | 4 + + src/cmd/link/internal/loadelf/ldelf.go | 14 ++ + src/cmd/link/internal/loong64/asm.go | 237 ++++++++++++++++++++ + src/cmd/link/internal/loong64/l.go | 17 ++ + src/cmd/link/internal/loong64/obj.go | 58 +++++ + src/cmd/link/internal/sym/reloc.go | 2 + + src/cmd/link/link_test.go | 2 + + src/cmd/link/main.go | 3 + + 14 files changed, 434 insertions(+), 64 deletions(-) + create mode 100644 src/cmd/link/internal/loong64/asm.go + create mode 100644 src/cmd/link/internal/loong64/l.go + create mode 100644 src/cmd/link/internal/loong64/obj.go + +diff --git a/src/cmd/internal/objabi/reloctype.go b/src/cmd/internal/objabi/reloctype.go +index 52827a6..99bc5f7 100644 +--- a/src/cmd/internal/objabi/reloctype.go ++++ b/src/cmd/internal/objabi/reloctype.go +@@ -47,6 +47,9 @@ const ( + // R_ADDRMIPS (only used on mips/mips64) resolves to the low 16 bits of an external + // address, by encoding it into the instruction. + R_ADDRMIPS ++ // R_ADDRLOONG64 (only used on loong64) resolves to the low 12 bits of an external ++ // address, by encoding it into the instruction. ++ R_ADDRLOONG64 + // R_ADDROFF resolves to a 32-bit offset from the beginning of the section + // holding the data being relocated to the referenced symbol. + R_ADDROFF +@@ -61,6 +64,9 @@ const ( + R_CALLMIPS + // R_CALLRISCV marks RISC-V CALLs for stack checking. + R_CALLRISCV ++ // R_CALLLOONG64 (only used on loong64) resolves to non-PC-relative target address ++ // of a CALL (BL/JIRL) instruction, by encoding the address into the instruction. ++ R_CALLLOONG64 + R_CONST + R_PCREL + // R_TLS_LE, used on 386, amd64, and ARM, resolves to the offset of the +@@ -110,6 +116,9 @@ const ( + // of a JMP instruction, by encoding the address into the instruction. + // The stack nosplit check ignores this since it is not a function call. + R_JMPMIPS ++ // R_JMPLOONG64 (only used on loong64) resolves to non-PC-relative target address ++ // of a JMP instruction, by encoding the address into the instruction. ++ R_JMPLOONG64 + + // R_DWARFSECREF resolves to the offset of the symbol from its section. + // Target of relocation must be size 4 (in current implementation). +@@ -241,9 +250,18 @@ const ( + // R_ADDRMIPSU (only used on mips/mips64) resolves to the sign-adjusted "upper" 16 + // bits (bit 16-31) of an external address, by encoding it into the instruction. + R_ADDRMIPSU ++ // R_ADDRLOONG64U (only used on loong64) resolves to the sign-adjusted "upper" 20 ++ // bits (bit 12-31) of an external address, by encoding it into the instruction. ++ R_ADDRLOONG64U + // R_ADDRMIPSTLS (only used on mips64) resolves to the low 16 bits of a TLS + // address (offset from thread pointer), by encoding it into the instruction. + R_ADDRMIPSTLS ++ // R_ADDRLOONG64TLS (only used on loong64) resolves to the low 12 bits of a TLS ++ // address (offset from thread pointer), by encoding it into the instruction. ++ R_ADDRLOONG64TLS ++ // R_ADDRLOONG64TLSU (only used on loong64) resolves to the high 20 bits of a TLS ++ // address (offset from thread pointer), by encoding it into the instruction. ++ R_ADDRLOONG64TLSU + + // R_ADDRCUOFF resolves to a pointer-sized offset from the start of the + // symbol's DWARF compile unit. +@@ -274,7 +292,7 @@ const ( + // the target address in register or memory. + func (r RelocType) IsDirectCall() bool { + switch r { +- case R_CALL, R_CALLARM, R_CALLARM64, R_CALLMIPS, R_CALLPOWER, R_CALLRISCV: ++ case R_CALL, R_CALLARM, R_CALLARM64, R_CALLLOONG64, R_CALLMIPS, R_CALLPOWER, R_CALLRISCV: + return true + } + return false +@@ -289,6 +307,8 @@ func (r RelocType) IsDirectJump() bool { + switch r { + case R_JMPMIPS: + return true ++ case R_JMPLOONG64: ++ return true + } + return false + } +diff --git a/src/cmd/internal/objabi/reloctype_string.go b/src/cmd/internal/objabi/reloctype_string.go +index 4638ef1..9d34a0f 100644 +--- a/src/cmd/internal/objabi/reloctype_string.go ++++ b/src/cmd/internal/objabi/reloctype_string.go +@@ -1,4 +1,4 @@ +-// Code generated by "stringer -type=RelocType"; DO NOT EDIT. ++// Code generated by "stringer -type RelocType reloctype.go"; DO NOT EDIT. + + package objabi + +@@ -12,67 +12,73 @@ func _() { + _ = x[R_ADDRPOWER-2] + _ = x[R_ADDRARM64-3] + _ = x[R_ADDRMIPS-4] +- _ = x[R_ADDROFF-5] +- _ = x[R_SIZE-6] +- _ = x[R_CALL-7] +- _ = x[R_CALLARM-8] +- _ = x[R_CALLARM64-9] +- _ = x[R_CALLIND-10] +- _ = x[R_CALLPOWER-11] +- _ = x[R_CALLMIPS-12] +- _ = x[R_CALLRISCV-13] +- _ = x[R_CONST-14] +- _ = x[R_PCREL-15] +- _ = x[R_TLS_LE-16] +- _ = x[R_TLS_IE-17] +- _ = x[R_GOTOFF-18] +- _ = x[R_PLT0-19] +- _ = x[R_PLT1-20] +- _ = x[R_PLT2-21] +- _ = x[R_USEFIELD-22] +- _ = x[R_USETYPE-23] +- _ = x[R_USEIFACE-24] +- _ = x[R_USEIFACEMETHOD-25] +- _ = x[R_METHODOFF-26] +- _ = x[R_KEEP-27] +- _ = x[R_POWER_TOC-28] +- _ = x[R_GOTPCREL-29] +- _ = x[R_JMPMIPS-30] +- _ = x[R_DWARFSECREF-31] +- _ = x[R_DWARFFILEREF-32] +- _ = x[R_ARM64_TLS_LE-33] +- _ = x[R_ARM64_TLS_IE-34] +- _ = x[R_ARM64_GOTPCREL-35] +- _ = x[R_ARM64_GOT-36] +- _ = x[R_ARM64_PCREL-37] +- _ = x[R_ARM64_LDST8-38] +- _ = x[R_ARM64_LDST16-39] +- _ = x[R_ARM64_LDST32-40] +- _ = x[R_ARM64_LDST64-41] +- _ = x[R_ARM64_LDST128-42] +- _ = x[R_POWER_TLS_LE-43] +- _ = x[R_POWER_TLS_IE-44] +- _ = x[R_POWER_TLS-45] +- _ = x[R_ADDRPOWER_DS-46] +- _ = x[R_ADDRPOWER_GOT-47] +- _ = x[R_ADDRPOWER_PCREL-48] +- _ = x[R_ADDRPOWER_TOCREL-49] +- _ = x[R_ADDRPOWER_TOCREL_DS-50] +- _ = x[R_RISCV_PCREL_ITYPE-51] +- _ = x[R_RISCV_PCREL_STYPE-52] +- _ = x[R_RISCV_TLS_IE_ITYPE-53] +- _ = x[R_RISCV_TLS_IE_STYPE-54] +- _ = x[R_PCRELDBL-55] +- _ = x[R_ADDRMIPSU-56] +- _ = x[R_ADDRMIPSTLS-57] +- _ = x[R_ADDRCUOFF-58] +- _ = x[R_WASMIMPORT-59] +- _ = x[R_XCOFFREF-60] ++ _ = x[R_ADDRLOONG64-5] ++ _ = x[R_ADDROFF-6] ++ _ = x[R_SIZE-7] ++ _ = x[R_CALL-8] ++ _ = x[R_CALLARM-9] ++ _ = x[R_CALLARM64-10] ++ _ = x[R_CALLIND-11] ++ _ = x[R_CALLPOWER-12] ++ _ = x[R_CALLMIPS-13] ++ _ = x[R_CALLRISCV-14] ++ _ = x[R_CALLLOONG64-15] ++ _ = x[R_CONST-16] ++ _ = x[R_PCREL-17] ++ _ = x[R_TLS_LE-18] ++ _ = x[R_TLS_IE-19] ++ _ = x[R_GOTOFF-20] ++ _ = x[R_PLT0-21] ++ _ = x[R_PLT1-22] ++ _ = x[R_PLT2-23] ++ _ = x[R_USEFIELD-24] ++ _ = x[R_USETYPE-25] ++ _ = x[R_USEIFACE-26] ++ _ = x[R_USEIFACEMETHOD-27] ++ _ = x[R_METHODOFF-28] ++ _ = x[R_KEEP-29] ++ _ = x[R_POWER_TOC-30] ++ _ = x[R_GOTPCREL-31] ++ _ = x[R_JMPMIPS-32] ++ _ = x[R_JMPLOONG64-33] ++ _ = x[R_DWARFSECREF-34] ++ _ = x[R_DWARFFILEREF-35] ++ _ = x[R_ARM64_TLS_LE-36] ++ _ = x[R_ARM64_TLS_IE-37] ++ _ = x[R_ARM64_GOTPCREL-38] ++ _ = x[R_ARM64_GOT-39] ++ _ = x[R_ARM64_PCREL-40] ++ _ = x[R_ARM64_LDST8-41] ++ _ = x[R_ARM64_LDST16-42] ++ _ = x[R_ARM64_LDST32-43] ++ _ = x[R_ARM64_LDST64-44] ++ _ = x[R_ARM64_LDST128-45] ++ _ = x[R_POWER_TLS_LE-46] ++ _ = x[R_POWER_TLS_IE-47] ++ _ = x[R_POWER_TLS-48] ++ _ = x[R_ADDRPOWER_DS-49] ++ _ = x[R_ADDRPOWER_GOT-50] ++ _ = x[R_ADDRPOWER_PCREL-51] ++ _ = x[R_ADDRPOWER_TOCREL-52] ++ _ = x[R_ADDRPOWER_TOCREL_DS-53] ++ _ = x[R_RISCV_PCREL_ITYPE-54] ++ _ = x[R_RISCV_PCREL_STYPE-55] ++ _ = x[R_RISCV_TLS_IE_ITYPE-56] ++ _ = x[R_RISCV_TLS_IE_STYPE-57] ++ _ = x[R_PCRELDBL-58] ++ _ = x[R_ADDRMIPSU-59] ++ _ = x[R_ADDRLOONG64U-60] ++ _ = x[R_ADDRMIPSTLS-61] ++ _ = x[R_ADDRLOONG64TLS-62] ++ _ = x[R_ADDRLOONG64TLSU-63] ++ _ = x[R_ADDRCUOFF-64] ++ _ = x[R_WASMIMPORT-65] ++ _ = x[R_XCOFFREF-66] + } + +-const _RelocType_name = "R_ADDRR_ADDRPOWERR_ADDRARM64R_ADDRMIPSR_ADDROFFR_SIZER_CALLR_CALLARMR_CALLARM64R_CALLINDR_CALLPOWERR_CALLMIPSR_CALLRISCVR_CONSTR_PCRELR_TLS_LER_TLS_IER_GOTOFFR_PLT0R_PLT1R_PLT2R_USEFIELDR_USETYPER_USEIFACER_USEIFACEMETHODR_METHODOFFR_KEEPR_POWER_TOCR_GOTPCRELR_JMPMIPSR_DWARFSECREFR_DWARFFILEREFR_ARM64_TLS_LER_ARM64_TLS_IER_ARM64_GOTPCRELR_ARM64_GOTR_ARM64_PCRELR_ARM64_LDST8R_ARM64_LDST16R_ARM64_LDST32R_ARM64_LDST64R_ARM64_LDST128R_POWER_TLS_LER_POWER_TLS_IER_POWER_TLSR_ADDRPOWER_DSR_ADDRPOWER_GOTR_ADDRPOWER_PCRELR_ADDRPOWER_TOCRELR_ADDRPOWER_TOCREL_DSR_RISCV_PCREL_ITYPER_RISCV_PCREL_STYPER_RISCV_TLS_IE_ITYPER_RISCV_TLS_IE_STYPER_PCRELDBLR_ADDRMIPSUR_ADDRMIPSTLSR_ADDRCUOFFR_WASMIMPORTR_XCOFFREF" ++const _RelocType_name = "R_ADDRR_ADDRPOWERR_ADDRARM64R_ADDRMIPSR_ADDRLOONG64R_ADDROFFR_SIZER_CALLR_CALLARMR_CALLARM64R_CALLINDR_CALLPOWERR_CALLMIPSR_CALLRISCVR_CALLLOONG64R_CONSTR_PCRELR_TLS_LER_TLS_IER_GOTOFFR_PLT0R_PLT1R_PLT2R_USEFIELDR_USETYPER_USEIFACER_USEIFACEMETHODR_METHODOFFR_KEEPR_POWER_TOCR_GOTPCRELR_JMPMIPSR_JMPLOONG64R_DWARFSECREFR_DWARFFILEREFR_ARM64_TLS_LER_ARM64_TLS_IER_ARM64_GOTPCRELR_ARM64_GOTR_ARM64_PCRELR_ARM64_LDST8R_ARM64_LDST16R_ARM64_LDST32R_ARM64_LDST64R_ARM64_LDST128R_POWER_TLS_LER_POWER_TLS_IER_POWER_TLSR_ADDRPOWER_DSR_ADDRPOWER_GOTR_ADDRPOWER_PCRELR_ADDRPOWER_TOCRELR_ADDRPOWER_TOCREL_DSR_RISCV_PCREL_ITYPER_RISCV_PCREL_STYPER_RISCV_TLS_IE_ITYPER_RISCV_TLS_IE_STYPER_PCRELDBLR_ADDRMIPSUR_ADDRLOONG64UR_ADDRMIPSTLSR_ADDRLOONG64TLSR_ADDRLOONG64TLSUR_ADDRCUOFFR_WASMIMPORTR_XCOFFREF" + +-var _RelocType_index = [...]uint16{0, 6, 17, 28, 38, 47, 53, 59, 68, 79, 88, 99, 109, 120, 127, 134, 142, 150, 158, 164, 170, 176, 186, 195, 205, 221, 232, 238, 249, 259, 268, 281, 295, 309, 323, 339, 350, 363, 376, 390, 404, 418, 433, 447, 461, 472, 486, 501, 518, 536, 557, 576, 595, 615, 635, 645, 656, 669, 680, 692, 702} ++var _RelocType_index = [...]uint16{0, 6, 17, 28, 38, 51, 60, 66, 72, 81, 92, 101, 112, 122, 133, 146, 153, 160, 168, 176, 184, 190, 196, 202, 212, 221, 231, 247, 258, 264, 275, 285, 294, 306, 319, 333, 347, 361, 377, 388, 401, 414, 428, 442, 456, 471, 485, 499, 510, 524, 539, 556, 574, 595, 614, 633, 653, 673, 683, 694, 708, 721, 737, 754, 765, 777, 787} + + func (i RelocType) String() string { + i -= 1 +diff --git a/src/cmd/link/internal/ld/config.go b/src/cmd/link/internal/ld/config.go +index 20f1d0b..c15aac8 100644 +--- a/src/cmd/link/internal/ld/config.go ++++ b/src/cmd/link/internal/ld/config.go +@@ -196,7 +196,7 @@ func mustLinkExternal(ctxt *Link) (res bool, reason string) { + // Internally linking cgo is incomplete on some architectures. + // https://golang.org/issue/14449 + // https://golang.org/issue/21961 +- if iscgo && ctxt.Arch.InFamily(sys.MIPS64, sys.MIPS, sys.PPC64, sys.RISCV64) { ++ if iscgo && ctxt.Arch.InFamily(sys.Loong64, sys.MIPS64, sys.MIPS, sys.PPC64, sys.RISCV64) { + return true, buildcfg.GOARCH + " does not support internal cgo" + } + if iscgo && (buildcfg.GOOS == "android" || buildcfg.GOOS == "dragonfly") { +diff --git a/src/cmd/link/internal/ld/elf.go b/src/cmd/link/internal/ld/elf.go +index de5d287..5248ad2 100644 +--- a/src/cmd/link/internal/ld/elf.go ++++ b/src/cmd/link/internal/ld/elf.go +@@ -208,7 +208,7 @@ var buildinfo []byte + func Elfinit(ctxt *Link) { + ctxt.IsELF = true + +- if ctxt.Arch.InFamily(sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) { ++ if ctxt.Arch.InFamily(sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) { + elfRelType = ".rela" + } else { + elfRelType = ".rel" +@@ -223,10 +223,13 @@ func Elfinit(ctxt *Link) { + ehdr.Flags = 2 /* Version 2 ABI */ + } + fallthrough +- case sys.AMD64, sys.ARM64, sys.MIPS64, sys.RISCV64: ++ case sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS64, sys.RISCV64: + if ctxt.Arch.Family == sys.MIPS64 { + ehdr.Flags = 0x20000004 /* MIPS 3 CPIC */ + } ++ if ctxt.Arch.Family == sys.Loong64 { ++ ehdr.Flags = 0x3 /* LoongArch lp64d */ ++ } + if ctxt.Arch.Family == sys.RISCV64 { + ehdr.Flags = 0x4 /* RISCV Float ABI Double */ + } +@@ -1652,6 +1655,8 @@ func asmbElf(ctxt *Link) { + Exitf("unknown architecture in asmbelf: %v", ctxt.Arch.Family) + case sys.MIPS, sys.MIPS64: + eh.Machine = uint16(elf.EM_MIPS) ++ case sys.Loong64: ++ eh.Machine = uint16(elf.EM_LOONGARCH) + case sys.ARM: + eh.Machine = uint16(elf.EM_ARM) + case sys.AMD64: +diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go +index 0c277f5..c9f14f1 100644 +--- a/src/cmd/link/internal/ld/lib.go ++++ b/src/cmd/link/internal/ld/lib.go +@@ -1778,6 +1778,8 @@ func hostlinkArchArgs(arch *sys.Arch) []string { + if buildcfg.GOOS == "darwin" { + return []string{"-arch", "arm64"} + } ++ case sys.Loong64: ++ return []string{"-mabi=lp64d"} + case sys.MIPS64: + return []string{"-mabi=64"} + case sys.MIPS: +diff --git a/src/cmd/link/internal/ld/pcln.go b/src/cmd/link/internal/ld/pcln.go +index 05fd302..a3897fb 100644 +--- a/src/cmd/link/internal/ld/pcln.go ++++ b/src/cmd/link/internal/ld/pcln.go +@@ -148,7 +148,7 @@ func computeDeferReturn(ctxt *Link, deferReturnSym, s loader.Sym) uint32 { + switch target.Arch.Family { + case sys.AMD64, sys.I386: + deferreturn-- +- case sys.PPC64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64: ++ case sys.PPC64, sys.ARM, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64: + // no change + case sys.RISCV64: + // TODO(jsing): The JALR instruction is marked with +diff --git a/src/cmd/link/internal/ld/target.go b/src/cmd/link/internal/ld/target.go +index f68de8f..69ce26a 100644 +--- a/src/cmd/link/internal/ld/target.go ++++ b/src/cmd/link/internal/ld/target.go +@@ -112,6 +112,10 @@ func (t *Target) IsMIPS64() bool { + return t.Arch.Family == sys.MIPS64 + } + ++func (t *Target) IsLOONG64() bool { ++ return t.Arch.Family == sys.Loong64 ++} ++ + func (t *Target) IsPPC64() bool { + return t.Arch.Family == sys.PPC64 + } +diff --git a/src/cmd/link/internal/loadelf/ldelf.go b/src/cmd/link/internal/loadelf/ldelf.go +index c695629..d677bff 100644 +--- a/src/cmd/link/internal/loadelf/ldelf.go ++++ b/src/cmd/link/internal/loadelf/ldelf.go +@@ -346,6 +346,10 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, f *bio.Reader, + if mach != elf.EM_MIPS || class != elf.ELFCLASS64 { + return errorf("elf object but not mips64") + } ++ case sys.Loong64: ++ if mach != elf.EM_LOONGARCH || class != elf.ELFCLASS64 { ++ return errorf("elf object but not loong64") ++ } + + case sys.ARM: + if e != binary.LittleEndian || mach != elf.EM_ARM || class != elf.ELFCLASS32 { +@@ -958,6 +962,7 @@ func relSize(arch *sys.Arch, pn string, elftype uint32) (uint8, uint8, error) { + ARM = uint32(sys.ARM) + ARM64 = uint32(sys.ARM64) + I386 = uint32(sys.I386) ++ LOONG64 = uint32(sys.Loong64) + MIPS = uint32(sys.MIPS) + MIPS64 = uint32(sys.MIPS64) + PPC64 = uint32(sys.PPC64) +@@ -993,6 +998,15 @@ func relSize(arch *sys.Arch, pn string, elftype uint32) (uint8, uint8, error) { + MIPS64 | uint32(elf.R_MIPS_GOT_DISP)<<16: + return 4, 4, nil + ++ case LOONG64 | uint32(elf.R_LARCH_SOP_PUSH_PCREL)<<16, ++ LOONG64 | uint32(elf.R_LARCH_SOP_PUSH_GPREL)<<16, ++ LOONG64 | uint32(elf.R_LARCH_SOP_PUSH_ABSOLUTE)<<16, ++ LOONG64 | uint32(elf.R_LARCH_MARK_LA)<<16, ++ LOONG64 | uint32(elf.R_LARCH_SOP_POP_32_S_0_10_10_16_S2)<<16, ++ LOONG64 | uint32(elf.R_LARCH_64)<<16, ++ LOONG64 | uint32(elf.R_LARCH_MARK_PCREL)<<16: ++ return 4, 4, nil ++ + case S390X | uint32(elf.R_390_8)<<16: + return 1, 1, nil + +diff --git a/src/cmd/link/internal/loong64/asm.go b/src/cmd/link/internal/loong64/asm.go +new file mode 100644 +index 0000000..9c26431 +--- /dev/null ++++ b/src/cmd/link/internal/loong64/asm.go +@@ -0,0 +1,237 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package loong64 ++ ++import ( ++ "cmd/internal/objabi" ++ "cmd/internal/sys" ++ "cmd/link/internal/ld" ++ "cmd/link/internal/loader" ++ "cmd/link/internal/sym" ++ "debug/elf" ++ "log" ++) ++ ++func gentext(ctxt *ld.Link, ldr *loader.Loader) {} ++ ++func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym, r loader.Reloc, rIdx int) bool { ++ log.Fatalf("adddynrel not implemented") ++ return false ++} ++ ++func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, ri int, sectoff int64) bool { ++ // loong64 ELF relocation (endian neutral) ++ // offset uint64 ++ // sym uint64 ++ // addend int64 ++ ++ elfsym := ld.ElfSymForReloc(ctxt, r.Xsym) ++ switch r.Type { ++ default: ++ return false ++ case objabi.R_ADDR, objabi.R_DWARFSECREF: ++ switch r.Size { ++ case 4: ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_32) | uint64(elfsym)<<32) ++ out.Write64(uint64(r.Xadd)) ++ case 8: ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_64) | uint64(elfsym)<<32) ++ out.Write64(uint64(r.Xadd)) ++ default: ++ return false ++ } ++ case objabi.R_ADDRLOONG64TLS: ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_TLS_TPREL) | uint64(elfsym)<<32) ++ out.Write64(uint64(r.Xadd)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_ABSOLUTE)) ++ out.Write64(uint64(0xfff)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_AND)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_POP_32_U_10_12)) ++ out.Write64(uint64(0x0)) ++ ++ case objabi.R_ADDRLOONG64TLSU: ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_TLS_TPREL) | uint64(elfsym)<<32) ++ out.Write64(uint64(r.Xadd)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_ABSOLUTE)) ++ out.Write64(uint64(0xc)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_SR)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_POP_32_S_5_20) | uint64(0)<<32) ++ out.Write64(uint64(0x0)) ++ ++ case objabi.R_CALLLOONG64: ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_PLT_PCREL) | uint64(elfsym)<<32) ++ out.Write64(uint64(r.Xadd)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_POP_32_S_0_10_10_16_S2)) ++ out.Write64(uint64(0x0)) ++ ++ case objabi.R_ADDRLOONG64: ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_PCREL) | uint64(elfsym)<<32) ++ out.Write64(uint64(r.Xadd + 0x4)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_PCREL) | uint64(elfsym)<<32) ++ out.Write64(uint64(r.Xadd + 0x804)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_ABSOLUTE)) ++ out.Write64(uint64(0xc)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_SR)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_ABSOLUTE)) ++ out.Write64(uint64(0xc)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_SL)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_SUB)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_POP_32_S_10_12)) ++ out.Write64(uint64(0x0)) ++ ++ case objabi.R_ADDRLOONG64U: ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_PCREL) | uint64(elfsym)<<32) ++ out.Write64(uint64(r.Xadd + 0x800)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_PUSH_ABSOLUTE)) ++ out.Write64(uint64(0xc)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_SR)) ++ out.Write64(uint64(0x0)) ++ ++ out.Write64(uint64(sectoff)) ++ out.Write64(uint64(elf.R_LARCH_SOP_POP_32_S_5_20) | uint64(0)<<32) ++ out.Write64(uint64(0x0)) ++ } ++ ++ return true ++} ++func elfsetupplt(ctxt *ld.Link, plt, gotplt *loader.SymbolBuilder, dynamic loader.Sym) { ++ return ++} ++ ++func machoreloc1(*sys.Arch, *ld.OutBuf, *loader.Loader, loader.Sym, loader.ExtReloc, int64) bool { ++ return false ++} ++ ++func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loader.Reloc, s loader.Sym, val int64) (o int64, nExtReloc int, ok bool) { ++ rs := r.Sym() ++ rs = ldr.ResolveABIAlias(rs) ++ ++ if target.IsExternal() { ++ nExtReloc := 0 ++ switch r.Type() { ++ default: ++ return val, 0, false ++ case objabi.R_ADDRLOONG64, ++ objabi.R_ADDRLOONG64U: ++ // set up addend for eventual relocation via outer symbol. ++ rs, _ := ld.FoldSubSymbolOffset(ldr, rs) ++ rst := ldr.SymType(rs) ++ if rst != sym.SHOSTOBJ && rst != sym.SDYNIMPORT && ldr.SymSect(rs) == nil { ++ ldr.Errorf(s, "missing section for %s", ldr.SymName(rs)) ++ } ++ nExtReloc = 8 // need two ELF relocations. see elfreloc1 ++ if r.Type() == objabi.R_ADDRLOONG64U { ++ nExtReloc = 4 ++ } ++ return val, nExtReloc, true ++ case objabi.R_ADDRLOONG64TLS, ++ objabi.R_ADDRLOONG64TLSU, ++ objabi.R_CALLLOONG64, ++ objabi.R_JMPLOONG64: ++ nExtReloc = 4 ++ if r.Type() == objabi.R_CALLLOONG64 || r.Type() == objabi.R_JMPLOONG64 { ++ nExtReloc = 2 ++ } ++ return val, nExtReloc, true ++ } ++ } ++ ++ const isOk = true ++ const noExtReloc = 0 ++ ++ switch r.Type() { ++ case objabi.R_CONST: ++ return r.Add(), noExtReloc, isOk ++ case objabi.R_GOTOFF: ++ return ldr.SymValue(r.Sym()) + r.Add() - ldr.SymValue(syms.GOT), noExtReloc, isOk ++ case objabi.R_ADDRLOONG64, ++ objabi.R_ADDRLOONG64U: ++ pc := ldr.SymValue(s) + int64(r.Off()) ++ t := ldr.SymAddr(rs) + r.Add() - pc ++ if r.Type() == objabi.R_ADDRLOONG64 { ++ return int64(val&0xffc003ff | (((t + 4 - ((t + 4 + 1<<11) >> 12 << 12)) << 10) & 0x3ffc00)), noExtReloc, isOk ++ } ++ return int64(val&0xfe00001f | (((t + 1<<11) >> 12 << 5) & 0x1ffffe0)), noExtReloc, isOk ++ case objabi.R_ADDRLOONG64TLS, ++ objabi.R_ADDRLOONG64TLSU: ++ t := ldr.SymAddr(rs) + r.Add() ++ if r.Type() == objabi.R_ADDRLOONG64TLS { ++ return int64(val&0xffc003ff | ((t & 0xfff) << 10)), noExtReloc, isOk ++ } ++ return int64(val&0xfe00001f | (((t) >> 12 << 5) & 0x1ffffe0)), noExtReloc, isOk ++ case objabi.R_CALLLOONG64, ++ objabi.R_JMPLOONG64: ++ pc := ldr.SymValue(s) + int64(r.Off()) ++ t := ldr.SymAddr(rs) + r.Add() - pc ++ return int64(val&0xfc000000 | (((t >> 2) & 0xffff) << 10) | (((t >> 2) & 0x3ff0000) >> 16)), noExtReloc, isOk ++ } ++ ++ return val, 0, false ++} ++ ++func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant, loader.Sym, int64, []byte) int64 { ++ return -1 ++} ++ ++func extreloc(target *ld.Target, ldr *loader.Loader, r loader.Reloc, s loader.Sym) (loader.ExtReloc, bool) { ++ switch r.Type() { ++ case objabi.R_ADDRLOONG64, ++ objabi.R_ADDRLOONG64U: ++ return ld.ExtrelocViaOuterSym(ldr, r, s), true ++ ++ case objabi.R_ADDRLOONG64TLS, ++ objabi.R_ADDRLOONG64TLSU, ++ objabi.R_CONST, ++ objabi.R_GOTOFF, ++ objabi.R_CALLLOONG64, ++ objabi.R_JMPLOONG64: ++ return ld.ExtrelocSimple(ldr, r), true ++ } ++ return loader.ExtReloc{}, false ++} +diff --git a/src/cmd/link/internal/loong64/l.go b/src/cmd/link/internal/loong64/l.go +new file mode 100644 +index 0000000..a060901 +--- /dev/null ++++ b/src/cmd/link/internal/loong64/l.go +@@ -0,0 +1,17 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package loong64 ++ ++const ( ++ maxAlign = 32 // max data alignment ++ minAlign = 1 // min data alignment ++ funcAlign = 8 ++) ++ ++/* Used by ../../internal/ld/dwarf.go */ ++const ( ++ dwarfRegSP = 3 ++ dwarfRegLR = 1 ++) +diff --git a/src/cmd/link/internal/loong64/obj.go b/src/cmd/link/internal/loong64/obj.go +new file mode 100644 +index 0000000..62014fa +--- /dev/null ++++ b/src/cmd/link/internal/loong64/obj.go +@@ -0,0 +1,58 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package loong64 ++ ++import ( ++ "cmd/internal/objabi" ++ "cmd/internal/sys" ++ "cmd/link/internal/ld" ++) ++ ++func Init() (*sys.Arch, ld.Arch) { ++ arch := sys.ArchLoong64 ++ ++ theArch := ld.Arch{ ++ Funcalign: funcAlign, ++ Maxalign: maxAlign, ++ Minalign: minAlign, ++ Dwarfregsp: dwarfRegSP, ++ Dwarfreglr: dwarfRegLR, ++ Adddynrel: adddynrel, ++ Archinit: archinit, ++ Archreloc: archreloc, ++ Archrelocvariant: archrelocvariant, ++ Extreloc: extreloc, ++ Elfreloc1: elfreloc1, ++ ElfrelocSize: 24, ++ Elfsetupplt: elfsetupplt, ++ Machoreloc1: machoreloc1, ++ Gentext: gentext, ++ ++ Linuxdynld: "/lib64/ld.so.1", ++ Freebsddynld: "XXX", ++ Openbsddynld: "XXX", ++ Netbsddynld: "XXX", ++ Dragonflydynld: "XXX", ++ Solarisdynld: "XXX", ++ } ++ ++ return arch, theArch ++} ++ ++func archinit(ctxt *ld.Link) { ++ switch ctxt.HeadType { ++ default: ++ ld.Exitf("unknown -H option: %v", ctxt.HeadType) ++ case objabi.Hlinux: /* loong64 elf */ ++ ld.Elfinit(ctxt) ++ ld.HEADR = ld.ELFRESERVE ++ if *ld.FlagTextAddr == -1 { ++ *ld.FlagTextAddr = 0x10000 + int64(ld.HEADR) ++ } ++ if *ld.FlagRound == -1 { ++ *ld.FlagRound = 0x10000 ++ } ++ } ++} +diff --git a/src/cmd/link/internal/sym/reloc.go b/src/cmd/link/internal/sym/reloc.go +index a543233..a44dcdd 100644 +--- a/src/cmd/link/internal/sym/reloc.go ++++ b/src/cmd/link/internal/sym/reloc.go +@@ -59,6 +59,8 @@ func RelocName(arch *sys.Arch, r objabi.RelocType) string { + return elf.R_AARCH64(nr).String() + case sys.I386: + return elf.R_386(nr).String() ++ case sys.Loong64: ++ return elf.R_LARCH(nr).String() + case sys.MIPS, sys.MIPS64: + return elf.R_MIPS(nr).String() + case sys.PPC64: +diff --git a/src/cmd/link/link_test.go b/src/cmd/link/link_test.go +index a9b597b..466729c 100644 +--- a/src/cmd/link/link_test.go ++++ b/src/cmd/link/link_test.go +@@ -174,6 +174,8 @@ func TestIssue33979(t *testing.T) { + + // Skip test on platforms that do not support cgo internal linking. + switch runtime.GOARCH { ++ case "loong64": ++ t.Skipf("Skipping on %s/%s", runtime.GOOS, runtime.GOARCH) + case "mips", "mipsle", "mips64", "mips64le": + t.Skipf("Skipping on %s/%s", runtime.GOOS, runtime.GOARCH) + } +diff --git a/src/cmd/link/main.go b/src/cmd/link/main.go +index d92478e..16e5a01 100644 +--- a/src/cmd/link/main.go ++++ b/src/cmd/link/main.go +@@ -10,6 +10,7 @@ import ( + "cmd/link/internal/arm" + "cmd/link/internal/arm64" + "cmd/link/internal/ld" ++ "cmd/link/internal/loong64" + "cmd/link/internal/mips" + "cmd/link/internal/mips64" + "cmd/link/internal/ppc64" +@@ -53,6 +54,8 @@ func main() { + arch, theArch = arm.Init() + case "arm64": + arch, theArch = arm64.Init() ++ case "loong64": ++ arch, theArch = loong64.Init() + case "mips", "mipsle": + arch, theArch = mips.Init() + case "mips64", "mips64le": +-- +2.27.0 + diff --git a/0017-runtime-bootstrap-for-linux-loong64-and-implement-ru.patch b/0017-runtime-bootstrap-for-linux-loong64-and-implement-ru.patch new file mode 100644 index 0000000..6d32a29 --- /dev/null +++ b/0017-runtime-bootstrap-for-linux-loong64-and-implement-ru.patch @@ -0,0 +1,989 @@ +From 29aaf263c9aedc4fc711880fa352d1a59c922f4c Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 1 Dec 2021 15:43:08 +0800 +Subject: [PATCH 17/56] runtime: bootstrap for linux/loong64 and implement + runtime core assembly function + +Change-Id: I252ecd0b13580c5e71723715023b1951985045f3 +--- + src/runtime/asm_loong64.s | 823 ++++++++++++++++++++++++++++++++ + src/runtime/atomic_loong64.s | 14 + + src/runtime/cputicks.go | 4 +- + src/runtime/os_linux_loong64.go | 19 + + src/runtime/os_linux_noauxv.go | 4 +- + src/runtime/rt0_linux_loong64.s | 27 ++ + src/runtime/sys_loong64.go | 21 + + 7 files changed, 908 insertions(+), 4 deletions(-) + create mode 100644 src/runtime/asm_loong64.s + create mode 100644 src/runtime/atomic_loong64.s + create mode 100644 src/runtime/os_linux_loong64.go + create mode 100644 src/runtime/rt0_linux_loong64.s + create mode 100644 src/runtime/sys_loong64.go + +diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s +new file mode 100644 +index 0000000..cfc270f +--- /dev/null ++++ b/src/runtime/asm_loong64.s +@@ -0,0 +1,823 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build loong64 ++ ++#include "go_asm.h" ++#include "go_tls.h" ++#include "funcdata.h" ++#include "textflag.h" ++ ++#define REGCTXT R29 ++ ++TEXT runtime·rt0_go(SB),NOSPLIT,$0 ++ // R3 = stack; R4 = argc; R5 = argv ++ ++ ADDV $-24, R3 ++ MOVW R4, 8(R3) // argc ++ MOVV R5, 16(R3) // argv ++ ++ // create istack out of the given (operating system) stack. ++ // _cgo_init may update stackguard. ++ MOVV $runtime·g0(SB), g ++ MOVV $(-64*1024), R30 ++ ADDV R30, R3, R19 ++ MOVV R19, g_stackguard0(g) ++ MOVV R19, g_stackguard1(g) ++ MOVV R19, (g_stack+stack_lo)(g) ++ MOVV R3, (g_stack+stack_hi)(g) ++ ++ // if there is a _cgo_init, call it using the gcc ABI. ++ MOVV _cgo_init(SB), R25 ++ BEQ R25, nocgo ++ ++ MOVV R0, R7 // arg 3: not used ++ MOVV R0, R6 // arg 2: not used ++ MOVV $setg_gcc<>(SB), R5 // arg 1: setg ++ MOVV g, R4 // arg 0: G ++ JAL (R25) ++ ++nocgo: ++ // update stackguard after _cgo_init ++ MOVV (g_stack+stack_lo)(g), R19 ++ ADDV $const__StackGuard, R19 ++ MOVV R19, g_stackguard0(g) ++ MOVV R19, g_stackguard1(g) ++ ++ // set the per-goroutine and per-mach "registers" ++ MOVV $runtime·m0(SB), R19 ++ ++ // save m->g0 = g0 ++ MOVV g, m_g0(R19) ++ // save m0 to g0->m ++ MOVV R19, g_m(g) ++ ++ JAL runtime·check(SB) ++ ++ // args are already prepared ++ JAL runtime·args(SB) ++ JAL runtime·osinit(SB) ++ JAL runtime·schedinit(SB) ++ ++ // create a new goroutine to start program ++ MOVV $runtime·mainPC(SB), R19 // entry ++ ADDV $-24, R3 ++ MOVV R19, 16(R3) ++ MOVV R0, 8(R3) ++ MOVV R0, 0(R3) ++ JAL runtime·newproc(SB) ++ ADDV $24, R3 ++ ++ // start this M ++ JAL runtime·mstart(SB) ++ ++ MOVV R0, 1(R0) ++ RET ++ ++DATA runtime·mainPC+0(SB)/8,$runtime·main(SB) ++GLOBL runtime·mainPC(SB),RODATA,$8 ++ ++TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0 ++ MOVV R0, 2(R0) // TODO: TD ++ RET ++ ++TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0 ++ RET ++ ++TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0 ++ JAL runtime·mstart0(SB) ++ RET // not reached ++ ++// void jmpdefer(fv, sp); ++// called from deferreturn. ++// 1. grab stored LR for caller ++// 2. sub 4 bytes to get back to JAL deferreturn ++// 3. JMP to fn ++TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16 ++ MOVV 0(R3), R1 ++ //-4 because loong doesn't have delay slot ++ ADDV $-4, R1 ++ ++ MOVV fv+0(FP), REGCTXT ++ MOVV argp+8(FP), R3 ++ ADDV $-8, R3 ++ NOR R0, R0 // prevent scheduling ++ MOVV 0(REGCTXT), R4 ++ JMP (R4) ++ ++/* ++ * go-routine ++ */ ++ ++// void gosave(Gobuf*) ++// save state in Gobuf; setjmp ++TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8 ++ MOVV buf+0(FP), R19 ++ MOVV R3, gobuf_sp(R19) ++ MOVV R1, gobuf_pc(R19) ++ MOVV g, gobuf_g(R19) ++ MOVV R0, gobuf_lr(R19) ++ MOVV R0, gobuf_ret(R19) ++ // Assert ctxt is zero. See func save. ++ MOVV gobuf_ctxt(R19), R19 ++ BEQ R19, 2(PC) ++ JAL runtime·badctxt(SB) ++ RET ++ ++// void gogo(Gobuf*) ++// restore state from Gobuf; longjmp ++TEXT runtime·gogo(SB), NOSPLIT, $16-8 ++ MOVV buf+0(FP), R4 ++ MOVV gobuf_g(R4), g // make sure g is not nil ++ JAL runtime·save_g(SB) ++ ++ MOVV 0(g), R5 ++ MOVV gobuf_sp(R4), R3 ++ MOVV gobuf_lr(R4), R1 ++ MOVV gobuf_ret(R4), R19 ++ MOVV gobuf_ctxt(R4), REGCTXT ++ MOVV R0, gobuf_sp(R4) ++ MOVV R0, gobuf_ret(R4) ++ MOVV R0, gobuf_lr(R4) ++ MOVV R0, gobuf_ctxt(R4) ++ MOVV gobuf_pc(R4), R6 ++ JMP (R6) ++ ++// void mcall(fn func(*g)) ++// Switch to m->g0's stack, call fn(g). ++// Fn must never return. It should gogo(&g->sched) ++// to keep running g. ++TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8 ++ // Save caller state in g->sched ++ MOVV R3, (g_sched+gobuf_sp)(g) ++ MOVV R1, (g_sched+gobuf_pc)(g) ++ MOVV R0, (g_sched+gobuf_lr)(g) ++ MOVV g, (g_sched+gobuf_g)(g) ++ ++ // Switch to m->g0 & its stack, call fn. ++ MOVV g, R19 ++ MOVV g_m(g), R4 ++ MOVV m_g0(R4), g ++ JAL runtime·save_g(SB) ++ BNE g, R19, 2(PC) ++ JMP runtime·badmcall(SB) ++ MOVV fn+0(FP), REGCTXT // context ++ MOVV 0(REGCTXT), R5 // code pointer ++ MOVV (g_sched+gobuf_sp)(g), R3 // sp = m->g0->sched.sp ++ ADDV $-16, R3 ++ MOVV R19, 8(R3) ++ MOVV R0, 0(R3) ++ JAL (R5) ++ JMP runtime·badmcall2(SB) ++ ++// systemstack_switch is a dummy routine that systemstack leaves at the bottom ++// of the G stack. We need to distinguish the routine that ++// lives at the bottom of the G stack from the one that lives ++// at the top of the system stack because the one at the top of ++// the system stack terminates the stack walk (see topofstack()). ++TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0 ++ UNDEF ++ JAL (R1) // make sure this function is not leaf ++ RET ++ ++// func systemstack(fn func()) ++TEXT runtime·systemstack(SB), NOSPLIT, $0-8 ++ MOVV fn+0(FP), R19 // R19 = fn ++ MOVV R19, REGCTXT // context ++ MOVV g_m(g), R4 // R4 = m ++ ++ MOVV m_gsignal(R4), R5 // R5 = gsignal ++ BEQ g, R5, noswitch ++ ++ MOVV m_g0(R4), R5 // R5 = g0 ++ BEQ g, R5, noswitch ++ ++ MOVV m_curg(R4), R6 ++ BEQ g, R6, switch ++ ++ // Bad: g is not gsignal, not g0, not curg. What is it? ++ // Hide call from linker nosplit analysis. ++ MOVV $runtime·badsystemstack(SB), R7 ++ JAL (R7) ++ JAL runtime·abort(SB) ++ ++switch: ++ // save our state in g->sched. Pretend to ++ // be systemstack_switch if the G stack is scanned. ++ JAL gosave_systemstack_switch<>(SB) ++ ++ // switch to g0 ++ MOVV R5, g ++ JAL runtime·save_g(SB) ++ MOVV (g_sched+gobuf_sp)(g), R19 ++ // make it look like mstart called systemstack on g0, to stop traceback ++ ADDV $-8, R19 ++ MOVV $runtime·mstart(SB), R6 ++ MOVV R6, 0(R19) ++ MOVV R19, R3 ++ ++ // call target function ++ MOVV 0(REGCTXT), R6 // code pointer ++ JAL (R6) ++ ++ // switch back to g ++ MOVV g_m(g), R4 ++ MOVV m_curg(R4), g ++ JAL runtime·save_g(SB) ++ MOVV (g_sched+gobuf_sp)(g), R3 ++ MOVV R0, (g_sched+gobuf_sp)(g) ++ RET ++ ++noswitch: ++ // already on m stack, just call directly ++ // Using a tail call here cleans up tracebacks since we won't stop ++ // at an intermediate systemstack. ++ MOVV 0(REGCTXT), R4 // code pointer ++ MOVV 0(R3), R1 // restore LR ++ ADDV $8, R3 ++ JMP (R4) ++ ++/* ++ * support for morestack ++ */ ++ ++// Called during function prolog when more stack is needed. ++// Caller has already loaded: ++// loong64: R5: LR ++// ++// The traceback routines see morestack on a g0 as being ++// the top of a stack (for example, morestack calling newstack ++// calling the scheduler calling newm calling gc), so we must ++// record an argument size. For that purpose, it has no arguments. ++TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 ++ // Cannot grow scheduler stack (m->g0). ++ MOVV g_m(g), R7 ++ MOVV m_g0(R7), R8 ++ BNE g, R8, 3(PC) ++ JAL runtime·badmorestackg0(SB) ++ JAL runtime·abort(SB) ++ ++ // Cannot grow signal stack (m->gsignal). ++ MOVV m_gsignal(R7), R8 ++ BNE g, R8, 3(PC) ++ JAL runtime·badmorestackgsignal(SB) ++ JAL runtime·abort(SB) ++ ++ // Called from f. ++ // Set g->sched to context in f. ++ MOVV R3, (g_sched+gobuf_sp)(g) ++ MOVV R1, (g_sched+gobuf_pc)(g) ++ MOVV R5, (g_sched+gobuf_lr)(g) ++ MOVV REGCTXT, (g_sched+gobuf_ctxt)(g) ++ ++ // Called from f. ++ // Set m->morebuf to f's caller. ++ MOVV R5, (m_morebuf+gobuf_pc)(R7) // f's caller's PC ++ MOVV R3, (m_morebuf+gobuf_sp)(R7) // f's caller's SP ++ MOVV g, (m_morebuf+gobuf_g)(R7) ++ ++ // Call newstack on m->g0's stack. ++ MOVV m_g0(R7), g ++ JAL runtime·save_g(SB) ++ MOVV (g_sched+gobuf_sp)(g), R3 ++ // Create a stack frame on g0 to call newstack. ++ MOVV R0, -8(R3) // Zero saved LR in frame ++ ADDV $-8, R3 ++ JAL runtime·newstack(SB) ++ ++ // Not reached, but make sure the return PC from the call to newstack ++ // is still in this function, and not the beginning of the next. ++ UNDEF ++ ++TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 ++ MOVV R0, REGCTXT ++ JMP runtime·morestack(SB) ++ ++// reflectcall: call a function with the given argument list ++// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32). ++// we don't have variable-sized frames, so we use a small number ++// of constant-sized-frame functions to encode a few bits of size in the pc. ++// Caution: ugly multiline assembly macros in your future! ++ ++#define DISPATCH(NAME,MAXSIZE) \ ++ MOVV $MAXSIZE, R30; \ ++ SGTU R19, R30, R30; \ ++ BNE R30, 3(PC); \ ++ MOVV $NAME(SB), R4; \ ++ JMP (R4) ++// Note: can't just "BR NAME(SB)" - bad inlining results. ++ ++TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48 ++ MOVWU stackArgsSize+24(FP), R19 ++ DISPATCH(runtime·call32, 32) ++ DISPATCH(runtime·call64, 64) ++ DISPATCH(runtime·call128, 128) ++ DISPATCH(runtime·call256, 256) ++ DISPATCH(runtime·call512, 512) ++ DISPATCH(runtime·call1024, 1024) ++ DISPATCH(runtime·call2048, 2048) ++ DISPATCH(runtime·call4096, 4096) ++ DISPATCH(runtime·call8192, 8192) ++ DISPATCH(runtime·call16384, 16384) ++ DISPATCH(runtime·call32768, 32768) ++ DISPATCH(runtime·call65536, 65536) ++ DISPATCH(runtime·call131072, 131072) ++ DISPATCH(runtime·call262144, 262144) ++ DISPATCH(runtime·call524288, 524288) ++ DISPATCH(runtime·call1048576, 1048576) ++ DISPATCH(runtime·call2097152, 2097152) ++ DISPATCH(runtime·call4194304, 4194304) ++ DISPATCH(runtime·call8388608, 8388608) ++ DISPATCH(runtime·call16777216, 16777216) ++ DISPATCH(runtime·call33554432, 33554432) ++ DISPATCH(runtime·call67108864, 67108864) ++ DISPATCH(runtime·call134217728, 134217728) ++ DISPATCH(runtime·call268435456, 268435456) ++ DISPATCH(runtime·call536870912, 536870912) ++ DISPATCH(runtime·call1073741824, 1073741824) ++ MOVV $runtime·badreflectcall(SB), R4 ++ JMP (R4) ++ ++#define CALLFN(NAME,MAXSIZE) \ ++TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ ++ NO_LOCAL_POINTERS; \ ++ /* copy arguments to stack */ \ ++ MOVV arg+16(FP), R4; \ ++ MOVWU argsize+24(FP), R5; \ ++ MOVV R3, R12; \ ++ ADDV $8, R12; \ ++ ADDV R12, R5; \ ++ BEQ R12, R5, 6(PC); \ ++ MOVBU (R4), R6; \ ++ ADDV $1, R4; \ ++ MOVBU R6, (R12); \ ++ ADDV $1, R12; \ ++ JMP -5(PC); \ ++ /* call function */ \ ++ MOVV f+8(FP), REGCTXT; \ ++ MOVV (REGCTXT), R6; \ ++ PCDATA $PCDATA_StackMapIndex, $0; \ ++ JAL (R6); \ ++ /* copy return values back */ \ ++ MOVV argtype+0(FP), R7; \ ++ MOVV arg+16(FP), R4; \ ++ MOVWU n+24(FP), R5; \ ++ MOVWU retoffset+28(FP), R6; \ ++ ADDV $8, R3, R12; \ ++ ADDV R6, R12; \ ++ ADDV R6, R4; \ ++ SUBVU R6, R5; \ ++ JAL callRet<>(SB); \ ++ RET ++ ++// callRet copies return values back at the end of call*. This is a ++// separate function so it can allocate stack space for the arguments ++// to reflectcallmove. It does not follow the Go ABI; it expects its ++// arguments in registers. ++TEXT callRet<>(SB), NOSPLIT, $32-0 ++ MOVV R7, 8(R3) ++ MOVV R4, 16(R3) ++ MOVV R12, 24(R3) ++ MOVV R5, 32(R3) ++ JAL runtime·reflectcallmove(SB) ++ RET ++ ++CALLFN(·call16, 16) ++CALLFN(·call32, 32) ++CALLFN(·call64, 64) ++CALLFN(·call128, 128) ++CALLFN(·call256, 256) ++CALLFN(·call512, 512) ++CALLFN(·call1024, 1024) ++CALLFN(·call2048, 2048) ++CALLFN(·call4096, 4096) ++CALLFN(·call8192, 8192) ++CALLFN(·call16384, 16384) ++CALLFN(·call32768, 32768) ++CALLFN(·call65536, 65536) ++CALLFN(·call131072, 131072) ++CALLFN(·call262144, 262144) ++CALLFN(·call524288, 524288) ++CALLFN(·call1048576, 1048576) ++CALLFN(·call2097152, 2097152) ++CALLFN(·call4194304, 4194304) ++CALLFN(·call8388608, 8388608) ++CALLFN(·call16777216, 16777216) ++CALLFN(·call33554432, 33554432) ++CALLFN(·call67108864, 67108864) ++CALLFN(·call134217728, 134217728) ++CALLFN(·call268435456, 268435456) ++CALLFN(·call536870912, 536870912) ++CALLFN(·call1073741824, 1073741824) ++ ++TEXT runtime·procyield(SB),NOSPLIT,$0-0 ++ RET ++ ++// Save state of caller into g->sched. ++// but using fake PC from systemstack_switch. ++// Must only be called from functions with no locals ($0) ++// or else unwinding from systemstack_switch is incorrect. ++// Smashes R19. ++TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0 ++ MOVV $runtime·systemstack_switch(SB), R19 ++ ADDV $8, R19 ++ MOVV R19, (g_sched+gobuf_pc)(g) ++ MOVV R3, (g_sched+gobuf_sp)(g) ++ MOVV R0, (g_sched+gobuf_lr)(g) ++ MOVV R0, (g_sched+gobuf_ret)(g) ++ // Assert ctxt is zero. See func save. ++ MOVV (g_sched+gobuf_ctxt)(g), R19 ++ BEQ R19, 2(PC) ++ JAL runtime·abort(SB) ++ RET ++ ++// func asmcgocall(fn, arg unsafe.Pointer) int32 ++// Call fn(arg) on the scheduler stack, ++// aligned appropriately for the gcc ABI. ++// See cgocall.go for more details. ++TEXT ·asmcgocall(SB),NOSPLIT,$0-20 ++ MOVV fn+0(FP), R25 ++ MOVV arg+8(FP), R4 ++ ++ MOVV R3, R12 // save original stack pointer ++ MOVV g, R13 ++ ++ // Figure out if we need to switch to m->g0 stack. ++ // We get called to create new OS threads too, and those ++ // come in on the m->g0 stack already. ++ MOVV g_m(g), R5 ++ MOVV m_gsignal(R5), R6 ++ BEQ R6, g, g0 ++ MOVV m_g0(R5), R6 ++ BEQ R6, g, g0 ++ ++ JAL gosave_systemstack_switch<>(SB) ++ MOVV R6, g ++ JAL runtime·save_g(SB) ++ MOVV (g_sched+gobuf_sp)(g), R3 ++ ++ // Now on a scheduling stack (a pthread-created stack). ++g0: ++ // Save room for two of our pointers. ++ ADDV $-16, R3 ++ MOVV R13, 0(R3) // save old g on stack ++ MOVV (g_stack+stack_hi)(R13), R13 ++ SUBVU R12, R13 ++ MOVV R13, 8(R3) // save depth in old g stack (can't just save SP, as stack might be copied during a callback) ++ JAL (R25) ++ ++ // Restore g, stack pointer. R4 is return value. ++ MOVV 0(R3), g ++ JAL runtime·save_g(SB) ++ MOVV (g_stack+stack_hi)(g), R5 ++ MOVV 8(R3), R6 ++ SUBVU R6, R5 ++ MOVV R5, R3 ++ ++ MOVW R4, ret+16(FP) ++ RET ++ ++// func cgocallback(fn, frame unsafe.Pointer, ctxt uintptr) ++// See cgocall.go for more details. ++TEXT ·cgocallback(SB),NOSPLIT,$24-24 ++ NO_LOCAL_POINTERS ++ ++ // Load m and g from thread-local storage. ++ MOVB runtime·iscgo(SB), R19 ++ BEQ R19, nocgo ++ JAL runtime·load_g(SB) ++nocgo: ++ ++ // If g is nil, Go did not create the current thread. ++ // Call needm to obtain one for temporary use. ++ // In this case, we're running on the thread stack, so there's ++ // lots of space, but the linker doesn't know. Hide the call from ++ // the linker analysis by using an indirect call. ++ BEQ g, needm ++ ++ MOVV g_m(g), R12 ++ MOVV R12, savedm-8(SP) ++ JMP havem ++ ++needm: ++ MOVV g, savedm-8(SP) // g is zero, so is m. ++ MOVV $runtime·needm(SB), R4 ++ JAL (R4) ++ ++ // Set m->sched.sp = SP, so that if a panic happens ++ // during the function we are about to execute, it will ++ // have a valid SP to run on the g0 stack. ++ // The next few lines (after the havem label) ++ // will save this SP onto the stack and then write ++ // the same SP back to m->sched.sp. That seems redundant, ++ // but if an unrecovered panic happens, unwindm will ++ // restore the g->sched.sp from the stack location ++ // and then systemstack will try to use it. If we don't set it here, ++ // that restored SP will be uninitialized (typically 0) and ++ // will not be usable. ++ MOVV g_m(g), R12 ++ MOVV m_g0(R12), R19 ++ MOVV R3, (g_sched+gobuf_sp)(R19) ++ ++havem: ++ // Now there's a valid m, and we're running on its m->g0. ++ // Save current m->g0->sched.sp on stack and then set it to SP. ++ // Save current sp in m->g0->sched.sp in preparation for ++ // switch back to m->curg stack. ++ // NOTE: unwindm knows that the saved g->sched.sp is at 8(R29) aka savedsp-16(SP). ++ MOVV m_g0(R12), R19 ++ MOVV (g_sched+gobuf_sp)(R19), R13 ++ MOVV R13, savedsp-24(SP) // must match frame size ++ MOVV R3, (g_sched+gobuf_sp)(R19) ++ ++ // Switch to m->curg stack and call runtime.cgocallbackg. ++ // Because we are taking over the execution of m->curg ++ // but *not* resuming what had been running, we need to ++ // save that information (m->curg->sched) so we can restore it. ++ // We can restore m->curg->sched.sp easily, because calling ++ // runtime.cgocallbackg leaves SP unchanged upon return. ++ // To save m->curg->sched.pc, we push it onto the stack. ++ // This has the added benefit that it looks to the traceback ++ // routine like cgocallbackg is going to return to that ++ // PC (because the frame we allocate below has the same ++ // size as cgocallback_gofunc's frame declared above) ++ // so that the traceback will seamlessly trace back into ++ // the earlier calls. ++ MOVV m_curg(R12), g ++ JAL runtime·save_g(SB) ++ MOVV (g_sched+gobuf_sp)(g), R13 // prepare stack as R13 ++ MOVV (g_sched+gobuf_pc)(g), R4 ++ MOVV R4, -(24+8)(R13) // "saved LR"; must match frame size ++ MOVV fn+0(FP), R5 ++ MOVV frame+8(FP), R6 ++ MOVV ctxt+16(FP), R7 ++ MOVV $-(24+8)(R13), R3 ++ MOVV R5, 8(R3) ++ MOVV R6, 16(R3) ++ MOVV R7, 24(R3) ++ JAL runtime·cgocallbackg(SB) ++ ++ // Restore g->sched (== m->curg->sched) from saved values. ++ MOVV 0(R3), R4 ++ MOVV R4, (g_sched+gobuf_pc)(g) ++ MOVV $(24+8)(R3), R13 // must match frame size ++ MOVV R13, (g_sched+gobuf_sp)(g) ++ ++ // Switch back to m->g0's stack and restore m->g0->sched.sp. ++ // (Unlike m->curg, the g0 goroutine never uses sched.pc, ++ // so we do not have to restore it.) ++ MOVV g_m(g), R12 ++ MOVV m_g0(R12), g ++ JAL runtime·save_g(SB) ++ MOVV (g_sched+gobuf_sp)(g), R3 ++ MOVV savedsp-24(SP), R13 // must match frame size ++ MOVV R13, (g_sched+gobuf_sp)(g) ++ ++ // If the m on entry was nil, we called needm above to borrow an m ++ // for the duration of the call. Since the call is over, return it with dropm. ++ MOVV savedm-8(SP), R12 ++ BNE R12, droppedm ++ MOVV $runtime·dropm(SB), R4 ++ JAL (R4) ++droppedm: ++ ++ // Done! ++ RET ++ ++// void setg(G*); set g. for use by needm. ++TEXT runtime·setg(SB), NOSPLIT, $0-8 ++ MOVV gg+0(FP), g ++ // This only happens if iscgo, so jump straight to save_g ++ JAL runtime·save_g(SB) ++ RET ++ ++// void setg_gcc(G*); set g called from gcc with g in R19 ++TEXT setg_gcc<>(SB),NOSPLIT,$0-0 ++ MOVV R19, g ++ JAL runtime·save_g(SB) ++ RET ++ ++TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0 ++ MOVW (R0), R0 ++ UNDEF ++ ++// AES hashing not implemented for loong64 ++TEXT runtime·memhash(SB),NOSPLIT|NOFRAME,$0-32 ++ JMP runtime·memhashFallback(SB) ++TEXT runtime·strhash(SB),NOSPLIT|NOFRAME,$0-24 ++ JMP runtime·strhashFallback(SB) ++TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24 ++ JMP runtime·memhash32Fallback(SB) ++TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24 ++ JMP runtime·memhash64Fallback(SB) ++ ++TEXT runtime·return0(SB), NOSPLIT, $0 ++ MOVW $0, R19 ++ RET ++ ++// Called from cgo wrappers, this function returns g->m->curg.stack.hi. ++// Must obey the gcc calling convention. ++TEXT _cgo_topofstack(SB),NOSPLIT,$16 ++ // g (R22) and REGTMP (R30) might be clobbered by load_g. They ++ // are callee-save in the gcc calling convention, so save them. ++ MOVV R30, savedREGTMP-16(SP) ++ MOVV g, savedG-8(SP) ++ ++ JAL runtime·load_g(SB) ++ MOVV g_m(g), R19 ++ MOVV m_curg(R19), R19 ++ MOVV (g_stack+stack_hi)(R19), R4 // return value in R4 ++ ++ MOVV savedG-8(SP), g ++ MOVV savedREGTMP-16(SP), R30 ++ RET ++ ++// The top-most function running on a goroutine ++// returns to goexit+PCQuantum. ++TEXT runtime·goexit(SB),NOSPLIT|NOFRAME|TOPFRAME,$0-0 ++ NOR R0, R0 // NOP ++ JAL runtime·goexit1(SB) // does not return ++ // traceback from goexit1 must hit code range of goexit ++ NOR R0, R0 // NOP ++ ++TEXT ·checkASM(SB),NOSPLIT,$0-1 ++ MOVW $1, R19 ++ MOVB R19, ret+0(FP) ++ RET ++ ++// gcWriteBarrier performs a heap pointer write and informs the GC. ++// ++// gcWriteBarrier does NOT follow the Go ABI. It takes two arguments: ++// - R27 is the destination of the write ++// - R28 is the value being written at R27. ++// It clobbers R30 (the linker temp register). ++// The act of CALLing gcWriteBarrier will clobber R1 (LR). ++// It does not clobber any other general-purpose registers, ++// but may clobber others (e.g., floating point registers). ++TEXT runtime·gcWriteBarrier(SB),NOSPLIT,$216 ++ // Save the registers clobbered by the fast path. ++ MOVV R19, 208(R3) ++ MOVV R13, 216(R3) ++ MOVV g_m(g), R19 ++ MOVV m_p(R19), R19 ++ MOVV (p_wbBuf+wbBuf_next)(R19), R13 ++ // Increment wbBuf.next position. ++ ADDV $16, R13 ++ MOVV R13, (p_wbBuf+wbBuf_next)(R19) ++ MOVV (p_wbBuf+wbBuf_end)(R19), R19 ++ MOVV R19, R30 // R30 is linker temp register ++ // Record the write. ++ MOVV R28, -16(R13) // Record value ++ MOVV (R27), R19 // TODO: This turns bad writes into bad reads. ++ MOVV R19, -8(R13) // Record *slot ++ // Is the buffer full? ++ BEQ R13, R30, flush ++ret: ++ MOVV 208(R3), R19 ++ MOVV 216(R3), R13 ++ // Do the write. ++ MOVV R28, (R27) ++ RET ++ ++flush: ++ // Save all general purpose registers since these could be ++ // clobbered by wbBufFlush and were not saved by the caller. ++ MOVV R27, 8(R3) // Also first argument to wbBufFlush ++ MOVV R28, 16(R3) // Also second argument to wbBufFlush ++ // R1 is LR, which was saved by the prologue. ++ MOVV R2, 24(R3) ++ // R3 is SP. ++ MOVV R4, 32(R3) ++ MOVV R5, 40(R3) ++ MOVV R6, 48(R3) ++ MOVV R7, 56(R3) ++ MOVV R8, 64(R3) ++ MOVV R9, 72(R3) ++ MOVV R10, 80(R3) ++ MOVV R11, 88(R3) ++ MOVV R12, 96(R3) ++ // R13 already saved ++ MOVV R14, 104(R3) ++ MOVV R15, 112(R3) ++ MOVV R16, 120(R3) ++ MOVV R17, 128(R3) ++ MOVV R18, 136(R3) ++ // R19 already saved ++ MOVV R20, 144(R3) ++ MOVV R21, 152(R3) ++ // R22 is g. ++ MOVV R23, 160(R3) ++ MOVV R24, 168(R3) ++ MOVV R25, 176(R3) ++ MOVV R26, 184(R3) ++ // R27 already saved ++ // R28 already saved. ++ MOVV R29, 192(R3) ++ // R30 is tmp register. ++ MOVV R31, 200(R3) ++ ++ ++ // This takes arguments R27 and R28. ++ CALL runtime·wbBufFlush(SB) ++ ++ MOVV 8(R3), R27 ++ MOVV 16(R3), R28 ++ MOVV 24(R3), R2 ++ MOVV 32(R3), R4 ++ MOVV 40(R3), R5 ++ MOVV 48(R3), R6 ++ MOVV 56(R3), R7 ++ MOVV 64(R3), R8 ++ MOVV 72(R3), R9 ++ MOVV 80(R3), R10 ++ MOVV 88(R3), R11 ++ MOVV 96(R3), R12 ++ MOVV 104(R3), R14 ++ MOVV 112(R3), R15 ++ MOVV 120(R3), R16 ++ MOVV 128(R3), R17 ++ MOVV 136(R3), R18 ++ MOVV 144(R3), R20 ++ MOVV 152(R3), R21 ++ MOVV 160(R3), R23 ++ MOVV 168(R3), R24 ++ MOVV 176(R3), R25 ++ MOVV 184(R3), R26 ++ MOVV 192(R3), R29 ++ MOVV 200(R3), R31 ++ JMP ret ++ ++// Note: these functions use a special calling convention to save generated code space. ++// Arguments are passed in registers, but the space for those arguments are allocated ++// in the caller's stack frame. These stubs write the args into that stack space and ++// then tail call to the corresponding runtime handler. ++// The tail call makes these stubs disappear in backtraces. ++TEXT runtime·panicIndex(SB),NOSPLIT,$0-16 ++ MOVV R19, x+0(FP) ++ MOVV R18, y+8(FP) ++ JMP runtime·goPanicIndex(SB) ++TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16 ++ MOVV R19, x+0(FP) ++ MOVV R18, y+8(FP) ++ JMP runtime·goPanicIndexU(SB) ++TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16 ++ MOVV R18, x+0(FP) ++ MOVV R17, y+8(FP) ++ JMP runtime·goPanicSliceAlen(SB) ++TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16 ++ MOVV R18, x+0(FP) ++ MOVV R17, y+8(FP) ++ JMP runtime·goPanicSliceAlenU(SB) ++TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16 ++ MOVV R18, x+0(FP) ++ MOVV R17, y+8(FP) ++ JMP runtime·goPanicSliceAcap(SB) ++TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16 ++ MOVV R18, x+0(FP) ++ MOVV R17, y+8(FP) ++ JMP runtime·goPanicSliceAcapU(SB) ++TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16 ++ MOVV R19, x+0(FP) ++ MOVV R18, y+8(FP) ++ JMP runtime·goPanicSliceB(SB) ++TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16 ++ MOVV R19, x+0(FP) ++ MOVV R18, y+8(FP) ++ JMP runtime·goPanicSliceBU(SB) ++TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16 ++ MOVV R17, x+0(FP) ++ MOVV R4, y+8(FP) ++ JMP runtime·goPanicSlice3Alen(SB) ++TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16 ++ MOVV R17, x+0(FP) ++ MOVV R4, y+8(FP) ++ JMP runtime·goPanicSlice3AlenU(SB) ++TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16 ++ MOVV R17, x+0(FP) ++ MOVV R4, y+8(FP) ++ JMP runtime·goPanicSlice3Acap(SB) ++TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16 ++ MOVV R17, x+0(FP) ++ MOVV R4, y+8(FP) ++ JMP runtime·goPanicSlice3AcapU(SB) ++TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16 ++ MOVV R18, x+0(FP) ++ MOVV R17, y+8(FP) ++ JMP runtime·goPanicSlice3B(SB) ++TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16 ++ MOVV R18, x+0(FP) ++ MOVV R17, y+8(FP) ++ JMP runtime·goPanicSlice3BU(SB) ++TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16 ++ MOVV R19, x+0(FP) ++ MOVV R18, y+8(FP) ++ JMP runtime·goPanicSlice3C(SB) ++TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16 ++ MOVV R19, x+0(FP) ++ MOVV R18, y+8(FP) ++ JMP runtime·goPanicSlice3CU(SB) ++TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16 ++ MOVV R17, x+0(FP) ++ MOVV R4, y+8(FP) ++ JMP runtime·goPanicSliceConvert(SB) +diff --git a/src/runtime/atomic_loong64.s b/src/runtime/atomic_loong64.s +new file mode 100644 +index 0000000..b661ec2 +--- /dev/null ++++ b/src/runtime/atomic_loong64.s +@@ -0,0 +1,14 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build loong64 ++ ++#include "textflag.h" ++ ++// DBAR sync load/store operation ++#define DBAR WORD $0x38720000 ++ ++TEXT ·publicationBarrier(SB),NOSPLIT|NOFRAME,$0-0 ++ DBAR ++ RET +diff --git a/src/runtime/cputicks.go b/src/runtime/cputicks.go +index 7c926f4..79ddcdc 100644 +--- a/src/runtime/cputicks.go ++++ b/src/runtime/cputicks.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build !arm && !arm64 && !mips64 && !mips64le && !mips && !mipsle && !wasm +-// +build !arm,!arm64,!mips64,!mips64le,!mips,!mipsle,!wasm ++//go:build !arm && !arm64 && !loong64 && !mips64 && !mips64le && !mips && !mipsle && !wasm ++// +build !arm,!arm64,!loong64,!mips64,!mips64le,!mips,!mipsle,!wasm + + package runtime + +diff --git a/src/runtime/os_linux_loong64.go b/src/runtime/os_linux_loong64.go +new file mode 100644 +index 0000000..e9a8728 +--- /dev/null ++++ b/src/runtime/os_linux_loong64.go +@@ -0,0 +1,19 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build linux && loong64 ++// +build linux,loong64 ++ ++package runtime ++ ++func archauxv(tag, val uintptr) {} ++ ++func osArchInit() {} ++ ++//go:nosplit ++func cputicks() int64 { ++ // Currently cputicks() is used in blocking profiler and to seed fastrand(). ++ // nanotime() is a poor approximation of CPU ticks that is enough for the profiler. ++ return nanotime() ++} +diff --git a/src/runtime/os_linux_noauxv.go b/src/runtime/os_linux_noauxv.go +index 59b5aac..22833e4 100644 +--- a/src/runtime/os_linux_noauxv.go ++++ b/src/runtime/os_linux_noauxv.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build linux && !arm && !arm64 && !mips && !mipsle && !mips64 && !mips64le && !s390x && !ppc64 && !ppc64le +-// +build linux,!arm,!arm64,!mips,!mipsle,!mips64,!mips64le,!s390x,!ppc64,!ppc64le ++//go:build linux && !arm && !arm64 && !loong64 && !mips && !mipsle && !mips64 && !mips64le && !s390x && !ppc64 && !ppc64le ++// +build linux,!arm,!arm64,!loong64,!mips,!mipsle,!mips64,!mips64le,!s390x,!ppc64,!ppc64le + + package runtime + +diff --git a/src/runtime/rt0_linux_loong64.s b/src/runtime/rt0_linux_loong64.s +new file mode 100644 +index 0000000..840c8b1 +--- /dev/null ++++ b/src/runtime/rt0_linux_loong64.s +@@ -0,0 +1,27 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build linux ++// +build loong64 ++ ++#include "textflag.h" ++ ++TEXT _rt0_loong64_linux(SB),NOSPLIT,$0 ++ JMP _main<>(SB) ++ ++TEXT _main<>(SB),NOSPLIT|NOFRAME,$0 ++ // In a statically linked binary, the stack contains argc, ++ // argv as argc string pointers followed by a NULL, envv as a ++ // sequence of string pointers followed by a NULL, and auxv. ++ // There is no TLS base pointer. ++ MOVW 0(R3), R4 // argc ++ ADDV $8, R3, R5 // argv ++ JMP main(SB) ++ ++TEXT main(SB),NOSPLIT|NOFRAME,$0 ++ // in external linking, glibc jumps to main with argc in R4 ++ // and argv in R5 ++ ++ MOVV $runtime·rt0_go(SB), R19 ++ JMP (R19) +diff --git a/src/runtime/sys_loong64.go b/src/runtime/sys_loong64.go +new file mode 100644 +index 0000000..650843e +--- /dev/null ++++ b/src/runtime/sys_loong64.go +@@ -0,0 +1,21 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build loong64 ++// +build loong64 ++ ++package runtime ++ ++import "unsafe" ++ ++// adjust Gobuf as if it executed a call to fn with context ctxt ++// and then did an immediate Gosave. ++func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) { ++ if buf.lr != 0 { ++ throw("invalid use of gostartcall") ++ } ++ buf.lr = buf.pc ++ buf.pc = uintptr(fn) ++ buf.ctxt = ctxt ++} +-- +2.27.0 + diff --git a/0018-runtime-load-save-TLS-variable-g-on-loong64.patch b/0018-runtime-load-save-TLS-variable-g-on-loong64.patch new file mode 100644 index 0000000..7a728e1 --- /dev/null +++ b/0018-runtime-load-save-TLS-variable-g-on-loong64.patch @@ -0,0 +1,68 @@ +From 908eda02d68b8a59e96c2302eab6999ea205f55e Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 1 Dec 2021 15:48:53 +0800 +Subject: [PATCH 18/56] runtime: load/save TLS variable g on loong64 + +Change-Id: I5e09759ce9201596e89a01fc4a6f7fd7e205449f +--- + src/runtime/stubs_loong64.go | 12 ++++++++++++ + src/runtime/tls_loong64.s | 28 ++++++++++++++++++++++++++++ + 2 files changed, 40 insertions(+) + create mode 100644 src/runtime/stubs_loong64.go + create mode 100644 src/runtime/tls_loong64.s + +diff --git a/src/runtime/stubs_loong64.go b/src/runtime/stubs_loong64.go +new file mode 100644 +index 0000000..39e7520 +--- /dev/null ++++ b/src/runtime/stubs_loong64.go +@@ -0,0 +1,12 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build loong64 ++// +build loong64 ++ ++package runtime ++ ++// Called from assembly only; declared for go vet. ++func load_g() ++func save_g() +diff --git a/src/runtime/tls_loong64.s b/src/runtime/tls_loong64.s +new file mode 100644 +index 0000000..30627d8 +--- /dev/null ++++ b/src/runtime/tls_loong64.s +@@ -0,0 +1,28 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build loong64 ++ ++#include "go_asm.h" ++#include "go_tls.h" ++#include "funcdata.h" ++#include "textflag.h" ++ ++// If !iscgo, this is a no-op. ++// ++// NOTE: mcall() assumes this clobbers only R30 (REGTMP). ++TEXT runtime·save_g(SB),NOSPLIT|NOFRAME,$0-0 ++ MOVB runtime·iscgo(SB), R30 ++ BEQ R30, nocgo ++ ++ MOVV g, runtime·tls_g(SB) ++ ++nocgo: ++ RET ++ ++TEXT runtime·load_g(SB),NOSPLIT|NOFRAME,$0-0 ++ MOVV runtime·tls_g(SB), g ++ RET ++ ++GLOBL runtime·tls_g(SB), TLSBSS, $8 +-- +2.27.0 + diff --git a/0019-runtime-implement-signal-for-linux-loong64.patch b/0019-runtime-implement-signal-for-linux-loong64.patch new file mode 100644 index 0000000..e67d5fb --- /dev/null +++ b/0019-runtime-implement-signal-for-linux-loong64.patch @@ -0,0 +1,439 @@ +From 49e7195ef230e1e8a09adae24abb777bbdfb1521 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 1 Dec 2021 17:06:43 +0800 +Subject: [PATCH 19/56] runtime: implement signal for linux/loong64 + +Change-Id: Ifa0229d2044dd53683de4a2b3ab965b16263f267 +--- + src/runtime/defs_linux_loong64.go | 212 ++++++++++++++++++++++++++++ + src/runtime/os_linux.go | 5 + + src/runtime/signal_linux_loong64.go | 76 ++++++++++ + src/runtime/signal_loong64.go | 98 +++++++++++++ + 4 files changed, 391 insertions(+) + create mode 100644 src/runtime/defs_linux_loong64.go + create mode 100644 src/runtime/signal_linux_loong64.go + create mode 100644 src/runtime/signal_loong64.go + +diff --git a/src/runtime/defs_linux_loong64.go b/src/runtime/defs_linux_loong64.go +new file mode 100644 +index 0000000..3e0fac0 +--- /dev/null ++++ b/src/runtime/defs_linux_loong64.go +@@ -0,0 +1,212 @@ ++// Generated using cgo, then manually converted into appropriate naming and code ++// for the Go runtime. ++// go tool cgo -godefs defs_linux.go defs1_linux.go defs2_linux.go ++ ++package runtime ++ ++import "unsafe" ++ ++const ( ++ _EINTR = 0x4 ++ _EAGAIN = 0xb ++ _ENOMEM = 0xc ++ _ENOSYS = 0x26 ++ ++ _PROT_NONE = 0x0 ++ _PROT_READ = 0x1 ++ _PROT_WRITE = 0x2 ++ _PROT_EXEC = 0x4 ++ ++ _MAP_ANON = 0x20 ++ _MAP_PRIVATE = 0x2 ++ _MAP_FIXED = 0x10 ++ ++ _MADV_DONTNEED = 0x4 ++ _MADV_FREE = 0x8 ++ _MADV_HUGEPAGE = 0xe ++ _MADV_NOHUGEPAGE = 0xf ++ ++ _SA_RESTART = 0x10000000 ++ _SA_ONSTACK = 0x8000000 ++ _SA_SIGINFO = 0x4 ++ _SA_RESTORER = 0x0 ++ ++ _SI_KERNEL = 0x80 ++ _SI_TIMER = -0x2 ++ ++ _SIGHUP = 0x1 ++ _SIGINT = 0x2 ++ _SIGQUIT = 0x3 ++ _SIGILL = 0x4 ++ _SIGTRAP = 0x5 ++ _SIGABRT = 0x6 ++ _SIGBUS = 0x7 ++ _SIGFPE = 0x8 ++ _SIGKILL = 0x9 ++ _SIGUSR1 = 0xa ++ _SIGSEGV = 0xb ++ _SIGUSR2 = 0xc ++ _SIGPIPE = 0xd ++ _SIGALRM = 0xe ++ _SIGSTKFLT = 0x10 ++ _SIGCHLD = 0x11 ++ _SIGCONT = 0x12 ++ _SIGSTOP = 0x13 ++ _SIGTSTP = 0x14 ++ _SIGTTIN = 0x15 ++ _SIGTTOU = 0x16 ++ _SIGURG = 0x17 ++ _SIGXCPU = 0x18 ++ _SIGXFSZ = 0x19 ++ _SIGVTALRM = 0x1a ++ _SIGPROF = 0x1b ++ _SIGWINCH = 0x1c ++ _SIGIO = 0x1d ++ _SIGPWR = 0x1e ++ _SIGSYS = 0x1f ++ ++ _SIGRTMIN = 0x20 ++ ++ _FPE_INTDIV = 0x1 ++ _FPE_INTOVF = 0x2 ++ _FPE_FLTDIV = 0x3 ++ _FPE_FLTOVF = 0x4 ++ _FPE_FLTUND = 0x5 ++ _FPE_FLTRES = 0x6 ++ _FPE_FLTINV = 0x7 ++ _FPE_FLTSUB = 0x8 ++ ++ _BUS_ADRALN = 0x1 ++ _BUS_ADRERR = 0x2 ++ _BUS_OBJERR = 0x3 ++ ++ _SEGV_MAPERR = 0x1 ++ _SEGV_ACCERR = 0x2 ++ ++ _ITIMER_REAL = 0x0 ++ _ITIMER_VIRTUAL = 0x1 ++ _ITIMER_PROF = 0x2 ++ ++ _CLOCK_THREAD_CPUTIME_ID = 0x3 ++ ++ _SIGEV_THREAD_ID = 0x4 ++ ++ _EPOLLIN = 0x1 ++ _EPOLLOUT = 0x4 ++ _EPOLLERR = 0x8 ++ _EPOLLHUP = 0x10 ++ _EPOLLRDHUP = 0x2000 ++ _EPOLLET = 0x80000000 ++ _EPOLL_CLOEXEC = 0x80000 ++ _EPOLL_CTL_ADD = 0x1 ++ _EPOLL_CTL_DEL = 0x2 ++ _EPOLL_CTL_MOD = 0x3 ++) ++ ++type timespec struct { ++ tv_sec int64 ++ tv_nsec int64 ++} ++ ++//go:nosplit ++func (ts *timespec) setNsec(ns int64) { ++ ts.tv_sec = ns / 1e9 ++ ts.tv_nsec = ns % 1e9 ++} ++ ++type timeval struct { ++ tv_sec int64 ++ tv_usec int64 ++} ++ ++func (tv *timeval) set_usec(x int32) { ++ tv.tv_usec = int64(x) ++} ++ ++type itimerspec struct { ++ it_interval timespec ++ it_value timespec ++} ++ ++type itimerval struct { ++ it_interval timeval ++ it_value timeval ++} ++ ++type sigeventFields struct { ++ value uintptr ++ signo int32 ++ notify int32 ++ // below here is a union; sigev_notify_thread_id is the only field we use ++ sigev_notify_thread_id int32 ++} ++ ++type sigevent struct { ++ sigeventFields ++ // Pad struct to the max size in the kernel. ++ _ [_sigev_max_size - unsafe.Sizeof(sigeventFields{})]byte ++} ++ ++type epollevent struct { ++ events uint32 ++ pad_cgo_0 [4]byte ++ data [8]byte // unaligned uintptr ++} ++ ++const ( ++ _O_RDONLY = 0x0 ++ _O_NONBLOCK = 0x800 ++ _O_CLOEXEC = 0x80000 ++) ++ ++type sigactiont struct { ++ sa_handler uintptr ++ sa_flags uint64 ++ sa_mask uint64 ++ // Linux on loong64 does not have the sa_restorer field, but the setsig ++ // function references it (for x86). Not much harm to include it at the end. ++ sa_restorer uintptr ++} ++ ++type siginfoFields struct { ++ si_signo int32 ++ si_errno int32 ++ si_code int32 ++ __pad0 [1]int32 ++ // below here is a union; si_addr is the only field we use ++ si_addr uint64 ++} ++ ++type siginfo struct { ++ siginfoFields ++ // Pad struct to the max size in the kernel. ++ _ [_si_max_size - unsafe.Sizeof(siginfoFields{})]byte ++} ++ ++type usigset struct { ++ val [16]uint64 ++} ++ ++type stackt struct { ++ ss_sp *byte ++ ss_flags int32 ++ pad_cgo_0 [4]byte ++ ss_size uintptr ++} ++ ++type sigcontext struct { ++ sc_pc uint64 ++ sc_regs [32]uint64 ++ sc_flags uint32 ++ sc_extcontext [0]uint64 ++} ++ ++type ucontext struct { ++ uc_flags uint64 ++ uc_link *ucontext ++ uc_stack stackt ++ uc_sigmask usigset ++ uc_x_unused [0]uint8 ++ uc_pad_cgo_0 [8]byte ++ uc_mcontext sigcontext ++} +diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go +index c8b29e3..34c3cee 100644 +--- a/src/runtime/os_linux.go ++++ b/src/runtime/os_linux.go +@@ -454,6 +454,11 @@ func setsigstack(i uint32) { + sigaction(i, &sa, nil) + } + ++const ( ++ _si_max_size = 128 ++ _sigev_max_size = 64 ++) ++ + //go:nosplit + //go:nowritebarrierrec + func getsig(i uint32) uintptr { +diff --git a/src/runtime/signal_linux_loong64.go b/src/runtime/signal_linux_loong64.go +new file mode 100644 +index 0000000..8f978c0 +--- /dev/null ++++ b/src/runtime/signal_linux_loong64.go +@@ -0,0 +1,76 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build linux && loong64 ++// +build linux,loong64 ++ ++package runtime ++ ++import ( ++ "runtime/internal/sys" ++ "unsafe" ++) ++ ++type sigctxt struct { ++ info *siginfo ++ ctxt unsafe.Pointer ++} ++ ++//go:nosplit ++//go:nowritebarrierrec ++func (c *sigctxt) regs() *sigcontext { return &(*ucontext)(c.ctxt).uc_mcontext } ++ ++func (c *sigctxt) r0() uint64 { return c.regs().sc_regs[0] } ++func (c *sigctxt) r1() uint64 { return c.regs().sc_regs[1] } ++func (c *sigctxt) r2() uint64 { return c.regs().sc_regs[2] } ++func (c *sigctxt) r3() uint64 { return c.regs().sc_regs[3] } ++func (c *sigctxt) r4() uint64 { return c.regs().sc_regs[4] } ++func (c *sigctxt) r5() uint64 { return c.regs().sc_regs[5] } ++func (c *sigctxt) r6() uint64 { return c.regs().sc_regs[6] } ++func (c *sigctxt) r7() uint64 { return c.regs().sc_regs[7] } ++func (c *sigctxt) r8() uint64 { return c.regs().sc_regs[8] } ++func (c *sigctxt) r9() uint64 { return c.regs().sc_regs[9] } ++func (c *sigctxt) r10() uint64 { return c.regs().sc_regs[10] } ++func (c *sigctxt) r11() uint64 { return c.regs().sc_regs[11] } ++func (c *sigctxt) r12() uint64 { return c.regs().sc_regs[12] } ++func (c *sigctxt) r13() uint64 { return c.regs().sc_regs[13] } ++func (c *sigctxt) r14() uint64 { return c.regs().sc_regs[14] } ++func (c *sigctxt) r15() uint64 { return c.regs().sc_regs[15] } ++func (c *sigctxt) r16() uint64 { return c.regs().sc_regs[16] } ++func (c *sigctxt) r17() uint64 { return c.regs().sc_regs[17] } ++func (c *sigctxt) r18() uint64 { return c.regs().sc_regs[18] } ++func (c *sigctxt) r19() uint64 { return c.regs().sc_regs[19] } ++func (c *sigctxt) r20() uint64 { return c.regs().sc_regs[20] } ++func (c *sigctxt) r21() uint64 { return c.regs().sc_regs[21] } ++func (c *sigctxt) r22() uint64 { return c.regs().sc_regs[22] } ++func (c *sigctxt) r23() uint64 { return c.regs().sc_regs[23] } ++func (c *sigctxt) r24() uint64 { return c.regs().sc_regs[24] } ++func (c *sigctxt) r25() uint64 { return c.regs().sc_regs[25] } ++func (c *sigctxt) r26() uint64 { return c.regs().sc_regs[26] } ++func (c *sigctxt) r27() uint64 { return c.regs().sc_regs[27] } ++func (c *sigctxt) r28() uint64 { return c.regs().sc_regs[28] } ++func (c *sigctxt) r29() uint64 { return c.regs().sc_regs[29] } ++func (c *sigctxt) r30() uint64 { return c.regs().sc_regs[30] } ++func (c *sigctxt) r31() uint64 { return c.regs().sc_regs[31] } ++func (c *sigctxt) sp() uint64 { return c.regs().sc_regs[3] } ++ ++//go:nosplit ++//go:nowritebarrierrec ++func (c *sigctxt) pc() uint64 { return c.regs().sc_pc } ++ ++func (c *sigctxt) link() uint64 { return c.regs().sc_regs[1] } ++ ++func (c *sigctxt) sigcode() uint32 { return uint32(c.info.si_code) } ++func (c *sigctxt) sigaddr() uint64 { return c.info.si_addr } ++ ++func (c *sigctxt) set_r31(x uint64) { c.regs().sc_regs[31] = x } ++func (c *sigctxt) set_r22(x uint64) { c.regs().sc_regs[22] = x } ++func (c *sigctxt) set_pc(x uint64) { c.regs().sc_pc = x } ++func (c *sigctxt) set_sp(x uint64) { c.regs().sc_regs[3] = x } ++func (c *sigctxt) set_link(x uint64) { c.regs().sc_regs[1] = x } ++ ++func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) } ++func (c *sigctxt) set_sigaddr(x uint64) { ++ *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x) ++} +diff --git a/src/runtime/signal_loong64.go b/src/runtime/signal_loong64.go +new file mode 100644 +index 0000000..e65ec58 +--- /dev/null ++++ b/src/runtime/signal_loong64.go +@@ -0,0 +1,98 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build linux && loong64 ++// +build linux,loong64 ++ ++package runtime ++ ++import ( ++ "runtime/internal/sys" ++ "unsafe" ++) ++ ++func dumpregs(c *sigctxt) { ++ print("r0 ", hex(c.r0()), "\t") ++ print("r1 ", hex(c.r1()), "\n") ++ print("r2 ", hex(c.r2()), "\t") ++ print("r3 ", hex(c.r3()), "\n") ++ print("r4 ", hex(c.r4()), "\t") ++ print("r5 ", hex(c.r5()), "\n") ++ print("r6 ", hex(c.r6()), "\t") ++ print("r7 ", hex(c.r7()), "\n") ++ print("r8 ", hex(c.r8()), "\t") ++ print("r9 ", hex(c.r9()), "\n") ++ print("r10 ", hex(c.r10()), "\t") ++ print("r11 ", hex(c.r11()), "\n") ++ print("r12 ", hex(c.r12()), "\t") ++ print("r13 ", hex(c.r13()), "\n") ++ print("r14 ", hex(c.r14()), "\t") ++ print("r15 ", hex(c.r15()), "\n") ++ print("r16 ", hex(c.r16()), "\t") ++ print("r17 ", hex(c.r17()), "\n") ++ print("r18 ", hex(c.r18()), "\t") ++ print("r19 ", hex(c.r19()), "\n") ++ print("r20 ", hex(c.r20()), "\t") ++ print("r21 ", hex(c.r21()), "\n") ++ print("r22 ", hex(c.r22()), "\t") ++ print("r23 ", hex(c.r23()), "\n") ++ print("r24 ", hex(c.r24()), "\t") ++ print("r25 ", hex(c.r25()), "\n") ++ print("r26 ", hex(c.r26()), "\t") ++ print("r27 ", hex(c.r27()), "\n") ++ print("r28 ", hex(c.r28()), "\t") ++ print("r29 ", hex(c.r29()), "\n") ++ print("r30 ", hex(c.r30()), "\t") ++ print("r31 ", hex(c.r31()), "\n") ++ print("pc ", hex(c.pc()), "\t") ++ print("link ", hex(c.link()), "\n") ++} ++ ++//go:nosplit ++//go:nowritebarrierrec ++func (c *sigctxt) sigpc() uintptr { return uintptr(c.pc()) } ++ ++func (c *sigctxt) sigsp() uintptr { return uintptr(c.sp()) } ++func (c *sigctxt) siglr() uintptr { return uintptr(c.link()) } ++func (c *sigctxt) fault() uintptr { return uintptr(c.sigaddr()) } ++ ++// preparePanic sets up the stack to look like a call to sigpanic. ++func (c *sigctxt) preparePanic(sig uint32, gp *g) { ++ // We arrange link, and pc to pretend the panicking ++ // function calls sigpanic directly. ++ // Always save LINK to stack so that panics in leaf ++ // functions are correctly handled. This smashes ++ // the stack frame but we're not going back there ++ // anyway. ++ sp := c.sp() - sys.PtrSize ++ c.set_sp(sp) ++ *(*uint64)(unsafe.Pointer(uintptr(sp))) = c.link() ++ ++ pc := gp.sigpc ++ ++ if shouldPushSigpanic(gp, pc, uintptr(c.link())) { ++ // Make it look the like faulting PC called sigpanic. ++ c.set_link(uint64(pc)) ++ } ++ ++ // In case we are panicking from external C code ++ sigpanicPC := uint64(funcPC(sigpanic)) ++ c.set_r31(sigpanicPC >> 32 << 32) // RSB register ++ c.set_r22(uint64(uintptr(unsafe.Pointer(gp)))) ++ c.set_pc(sigpanicPC) ++} ++ ++func (c *sigctxt) pushCall(targetPC, resumePC uintptr) { ++ // Push the LR to stack, as we'll clobber it in order to ++ // push the call. The function being pushed is responsible ++ // for restoring the LR and setting the SP back. ++ // This extra slot is known to gentraceback. ++ sp := c.sp() - 8 ++ c.set_sp(sp) ++ *(*uint64)(unsafe.Pointer(uintptr(sp))) = c.link() ++ // Set up PC and LR to pretend the function being signaled ++ // calls targetPC at resumePC. ++ c.set_link(uint64(resumePC)) ++ c.set_pc(uint64(targetPC)) ++} +-- +2.27.0 + diff --git a/0020-runtime-support-vdso-for-linux-loong64.patch b/0020-runtime-support-vdso-for-linux-loong64.patch new file mode 100644 index 0000000..96434f0 --- /dev/null +++ b/0020-runtime-support-vdso-for-linux-loong64.patch @@ -0,0 +1,114 @@ +From 8fbdbd5a5221fb3aa1b492f4ae6bf39e65a696df Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 1 Dec 2021 17:12:05 +0800 +Subject: [PATCH 20/56] runtime: support vdso for linux/loong64 + +Change-Id: Ie9bb5ccfc28e65036e2088c232bb333dcb259a60 +--- + src/runtime/os_linux_novdso.go | 4 ++-- + src/runtime/vdso_elf64.go | 4 ++-- + src/runtime/vdso_in_none.go | 4 ++-- + src/runtime/vdso_linux.go | 4 ++-- + src/runtime/vdso_linux_loong64.go | 28 ++++++++++++++++++++++++++++ + 5 files changed, 36 insertions(+), 8 deletions(-) + create mode 100644 src/runtime/vdso_linux_loong64.go + +diff --git a/src/runtime/os_linux_novdso.go b/src/runtime/os_linux_novdso.go +index 8104f63..9783076 100644 +--- a/src/runtime/os_linux_novdso.go ++++ b/src/runtime/os_linux_novdso.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build linux && !386 && !amd64 && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le +-// +build linux,!386,!amd64,!arm,!arm64,!mips64,!mips64le,!ppc64,!ppc64le ++//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips64 && !mips64le && !ppc64 && !ppc64le ++// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips64,!mips64le,!ppc64,!ppc64le + + package runtime + +diff --git a/src/runtime/vdso_elf64.go b/src/runtime/vdso_elf64.go +index 9923bd4..c744f7b 100644 +--- a/src/runtime/vdso_elf64.go ++++ b/src/runtime/vdso_elf64.go +@@ -2,9 +2,9 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build linux && (amd64 || arm64 || mips64 || mips64le || ppc64 || ppc64le) ++//go:build linux && (amd64 || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le) + // +build linux +-// +build amd64 arm64 mips64 mips64le ppc64 ppc64le ++// +build amd64 arm64 loong64 mips64 mips64le ppc64 ppc64le + + package runtime + +diff --git a/src/runtime/vdso_in_none.go b/src/runtime/vdso_in_none.go +index c66fbf8..28bf27a 100644 +--- a/src/runtime/vdso_in_none.go ++++ b/src/runtime/vdso_in_none.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build (linux && !386 && !amd64 && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le) || !linux +-// +build linux,!386,!amd64,!arm,!arm64,!mips64,!mips64le,!ppc64,!ppc64le !linux ++//go:build (linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips64 && !mips64le && !ppc64 && !ppc64le) || !linux ++// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips64,!mips64le,!ppc64,!ppc64le !linux + + package runtime + +diff --git a/src/runtime/vdso_linux.go b/src/runtime/vdso_linux.go +index ae211f9..4f6381f 100644 +--- a/src/runtime/vdso_linux.go ++++ b/src/runtime/vdso_linux.go +@@ -2,9 +2,9 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build linux && (386 || amd64 || arm || arm64 || mips64 || mips64le || ppc64 || ppc64le) ++//go:build linux && (386 || amd64 || arm || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le) + // +build linux +-// +build 386 amd64 arm arm64 mips64 mips64le ppc64 ppc64le ++// +build 386 amd64 arm arm64 loong64 mips64 mips64le ppc64 ppc64le + + package runtime + +diff --git a/src/runtime/vdso_linux_loong64.go b/src/runtime/vdso_linux_loong64.go +new file mode 100644 +index 0000000..e8afdd4 +--- /dev/null ++++ b/src/runtime/vdso_linux_loong64.go +@@ -0,0 +1,28 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build linux && loong64 ++// +build linux,loong64 ++ ++package runtime ++ ++const ( ++ // vdsoArrayMax is the byte-size of a maximally sized array on this architecture. ++ // See cmd/compile/internal/loong64/galign.go arch.MAXWIDTH initialization. ++ vdsoArrayMax = 1<<50 - 1 ++) ++ ++// see man 7 vdso : loong64 ++var vdsoLinuxVersion = vdsoVersionKey{"LINUX_2.6", 0x3ae75f6} ++ ++// The symbol name is not __kernel_clock_gettime as suggested by the manpage; ++// according to Linux source code it should be __vdso_clock_gettime instead. ++var vdsoSymbolKeys = []vdsoSymbolKey{ ++ {"__vdso_clock_gettime", 0xd35ec75, 0x6e43a318, &vdsoClockgettimeSym}, ++} ++ ++// initialize to fall back to syscall ++var ( ++ vdsoClockgettimeSym uintptr = 0 ++) +-- +2.27.0 + diff --git a/0021-runtime-implement-duffzero-duffcopy-for-linux-loong6.patch b/0021-runtime-implement-duffzero-duffcopy-for-linux-loong6.patch new file mode 100644 index 0000000..5638b2d --- /dev/null +++ b/0021-runtime-implement-duffzero-duffcopy-for-linux-loong6.patch @@ -0,0 +1,979 @@ +From b43eeab48a0fddee0874fc19965c0d2139713e36 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 1 Dec 2021 17:13:22 +0800 +Subject: [PATCH 21/56] runtime: implement duffzero/duffcopy for linux/loong64 + +Change-Id: Ida040e76dc8172f60e6aee1ea2b5bce13ab3581e +--- + src/runtime/duff_loong64.s | 909 +++++++++++++++++++++++++++++++++++++ + src/runtime/mkduff.go | 31 ++ + 2 files changed, 940 insertions(+) + create mode 100644 src/runtime/duff_loong64.s + +diff --git a/src/runtime/duff_loong64.s b/src/runtime/duff_loong64.s +new file mode 100644 +index 0000000..f070916 +--- /dev/null ++++ b/src/runtime/duff_loong64.s +@@ -0,0 +1,909 @@ ++// Code generated by mkduff.go; DO NOT EDIT. ++// Run go generate from src/runtime to update. ++// See mkduff.go for comments. ++ ++// +build loong64 ++ ++#include "textflag.h" ++ ++TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ MOVV R0, 8(R19) ++ ADDV $8, R19 ++ RET ++ ++TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0 ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ MOVV (R19), R30 ++ ADDV $8, R19 ++ MOVV R30, (R20) ++ ADDV $8, R20 ++ ++ RET +diff --git a/src/runtime/mkduff.go b/src/runtime/mkduff.go +index da191cc..f12d984 100644 +--- a/src/runtime/mkduff.go ++++ b/src/runtime/mkduff.go +@@ -37,6 +37,7 @@ func main() { + gen("386", notags, zero386, copy386) + gen("arm", notags, zeroARM, copyARM) + gen("arm64", notags, zeroARM64, copyARM64) ++ gen("loong64", tagsLOONG64, zeroLOONG64, copyLOONG64) + gen("ppc64x", tagsPPC64x, zeroPPC64x, copyPPC64x) + gen("mips64x", tagsMIPS64x, zeroMIPS64x, copyMIPS64x) + gen("riscv64", notags, zeroRISCV64, copyRISCV64) +@@ -177,6 +178,36 @@ func copyARM64(w io.Writer) { + fmt.Fprintln(w, "\tRET") + } + ++func tagsLOONG64(w io.Writer) { ++ fmt.Fprintln(w) ++ fmt.Fprintln(w, "// +build loong64") ++ fmt.Fprintln(w) ++} ++ ++func zeroLOONG64(w io.Writer) { ++ // R0: always zero ++ // R19 (aka REGRT1): ptr to memory to be zeroed - 8 ++ // On return, R19 points to the last zeroed dword. ++ fmt.Fprintln(w, "TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0") ++ for i := 0; i < 128; i++ { ++ fmt.Fprintln(w, "\tMOVV\tR0, 8(R19)") ++ fmt.Fprintln(w, "\tADDV\t$8, R19") ++ } ++ fmt.Fprintln(w, "\tRET") ++} ++ ++func copyLOONG64(w io.Writer) { ++ fmt.Fprintln(w, "TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0") ++ for i := 0; i < 128; i++ { ++ fmt.Fprintln(w, "\tMOVV\t(R19), R30") ++ fmt.Fprintln(w, "\tADDV\t$8, R19") ++ fmt.Fprintln(w, "\tMOVV\tR30, (R20)") ++ fmt.Fprintln(w, "\tADDV\t$8, R20") ++ fmt.Fprintln(w) ++ } ++ fmt.Fprintln(w, "\tRET") ++} ++ + func tagsPPC64x(w io.Writer) { + fmt.Fprintln(w) + fmt.Fprintln(w, "//go:build ppc64 || ppc64le") +-- +2.27.0 + diff --git a/0022-runtime-implement-asyncPreempt-for-linux-loong64.patch b/0022-runtime-implement-asyncPreempt-for-linux-loong64.patch new file mode 100644 index 0000000..41406d2 --- /dev/null +++ b/0022-runtime-implement-asyncPreempt-for-linux-loong64.patch @@ -0,0 +1,231 @@ +From a7abaf5e7b0746492777d8395be1da6469b0a00c Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 1 Dec 2021 17:15:16 +0800 +Subject: [PATCH 22/56] runtime: implement asyncPreempt for linux/loong64 + +Change-Id: I7a64e38b15a99816bd74262c02f62dad021cc166 +--- + src/runtime/mkpreempt.go | 49 +++++++++++++ + src/runtime/preempt.go | 2 +- + src/runtime/preempt_loong64.s | 129 ++++++++++++++++++++++++++++++++++ + 3 files changed, 179 insertions(+), 1 deletion(-) + create mode 100644 src/runtime/preempt_loong64.s + +diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go +index 6c98054..1bad20d 100644 +--- a/src/runtime/mkpreempt.go ++++ b/src/runtime/mkpreempt.go +@@ -81,6 +81,7 @@ var arches = map[string]func(){ + "amd64": genAMD64, + "arm": genARM, + "arm64": genARM64, ++ "loong64": genLOONG64, + "mips64x": func() { genMIPS(true) }, + "mipsx": func() { genMIPS(false) }, + "ppc64x": genPPC64, +@@ -449,6 +450,54 @@ func genMIPS(_64bit bool) { + p("JMP (R23)") + } + ++func genLOONG64() { ++ mov := "MOVV" ++ movf := "MOVD" ++ add := "ADDV" ++ sub := "SUBV" ++ r31 := "RSB" ++ regsize := 8 ++ ++ // Add integer registers r4-r21 r23-r29 r31 ++ // R0 (zero), R30 (REGTMP), R2(tp),R3 (SP), R22 (g), R1 (LR) are special, ++ var l = layout{sp: "R3", stack: regsize} // add slot to save PC of interrupted instruction (in LR) ++ for i := 4; i <= 29; i++ { ++ if i == 22 { ++ continue //R3 is REGSP R22 is g ++ } ++ reg := fmt.Sprintf("R%d", i) ++ l.add(mov, reg, regsize) ++ } ++ l.add(mov, r31, regsize) ++ ++ // Add floating point control/status register FCR31 (FCR0-FCR30 are irrelevant) ++ var lfp = layout{sp: "R3", stack: l.stack} ++ // lfp.addSpecial( ++ // mov+" FCR31, R1\n"+mov+" R1, %d(R29)", ++ // mov+" %d(R29), R1\n"+mov+" R1, FCR31", ++ // regsize) ++ // Add floating point registers F0-F31. ++ for i := 0; i <= 31; i++ { ++ reg := fmt.Sprintf("F%d", i) ++ lfp.add(movf, reg, regsize) ++ } ++ ++ // allocate frame, save PC of interrupted instruction (in LR) ++ p(mov+" R1, -%d(R3)", lfp.stack) ++ p(sub+" $%d, R3", lfp.stack) ++ ++ l.save() ++ lfp.save() ++ p("CALL ·asyncPreempt2(SB)") ++ lfp.restore() ++ l.restore() ++ ++ p(mov+" %d(R3), R1", lfp.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it ++ p(mov + " (R3), R30") // load PC to REGTMP ++ p(add+" $%d, R3", lfp.stack+regsize) // pop frame (including the space pushed by sigctxt.pushCall) ++ p("JMP (R30)") ++} ++ + func genPPC64() { + // Add integer registers R3-R29 + // R0 (zero), R1 (SP), R30 (g) are special and not saved here. +diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go +index 1d5aae1..92a05d2 100644 +--- a/src/runtime/preempt.go ++++ b/src/runtime/preempt.go +@@ -386,7 +386,7 @@ func isAsyncSafePoint(gp *g, pc, sp, lr uintptr) (bool, uintptr) { + // Not Go code. + return false, 0 + } +- if (GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "mips64" || GOARCH == "mips64le") && lr == pc+8 && funcspdelta(f, pc, nil) == 0 { ++ if (GOARCH == "loong64" || GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "mips64" || GOARCH == "mips64le") && lr == pc+8 && funcspdelta(f, pc, nil) == 0 { + // We probably stopped at a half-executed CALL instruction, + // where the LR is updated but the PC has not. If we preempt + // here we'll see a seemingly self-recursive call, which is in +diff --git a/src/runtime/preempt_loong64.s b/src/runtime/preempt_loong64.s +new file mode 100644 +index 0000000..363959e +--- /dev/null ++++ b/src/runtime/preempt_loong64.s +@@ -0,0 +1,129 @@ ++// Code generated by mkpreempt.go; DO NOT EDIT. ++ ++#include "go_asm.h" ++#include "textflag.h" ++ ++TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 ++ MOVV R1, -472(R3) ++ SUBV $472, R3 ++ MOVV R4, 8(R3) ++ MOVV R5, 16(R3) ++ MOVV R6, 24(R3) ++ MOVV R7, 32(R3) ++ MOVV R8, 40(R3) ++ MOVV R9, 48(R3) ++ MOVV R10, 56(R3) ++ MOVV R11, 64(R3) ++ MOVV R12, 72(R3) ++ MOVV R13, 80(R3) ++ MOVV R14, 88(R3) ++ MOVV R15, 96(R3) ++ MOVV R16, 104(R3) ++ MOVV R17, 112(R3) ++ MOVV R18, 120(R3) ++ MOVV R19, 128(R3) ++ MOVV R20, 136(R3) ++ MOVV R21, 144(R3) ++ MOVV R23, 152(R3) ++ MOVV R24, 160(R3) ++ MOVV R25, 168(R3) ++ MOVV R26, 176(R3) ++ MOVV R27, 184(R3) ++ MOVV R28, 192(R3) ++ MOVV R29, 200(R3) ++ MOVV RSB, 208(R3) ++ MOVD F0, 216(R3) ++ MOVD F1, 224(R3) ++ MOVD F2, 232(R3) ++ MOVD F3, 240(R3) ++ MOVD F4, 248(R3) ++ MOVD F5, 256(R3) ++ MOVD F6, 264(R3) ++ MOVD F7, 272(R3) ++ MOVD F8, 280(R3) ++ MOVD F9, 288(R3) ++ MOVD F10, 296(R3) ++ MOVD F11, 304(R3) ++ MOVD F12, 312(R3) ++ MOVD F13, 320(R3) ++ MOVD F14, 328(R3) ++ MOVD F15, 336(R3) ++ MOVD F16, 344(R3) ++ MOVD F17, 352(R3) ++ MOVD F18, 360(R3) ++ MOVD F19, 368(R3) ++ MOVD F20, 376(R3) ++ MOVD F21, 384(R3) ++ MOVD F22, 392(R3) ++ MOVD F23, 400(R3) ++ MOVD F24, 408(R3) ++ MOVD F25, 416(R3) ++ MOVD F26, 424(R3) ++ MOVD F27, 432(R3) ++ MOVD F28, 440(R3) ++ MOVD F29, 448(R3) ++ MOVD F30, 456(R3) ++ MOVD F31, 464(R3) ++ CALL ·asyncPreempt2(SB) ++ MOVD 464(R3), F31 ++ MOVD 456(R3), F30 ++ MOVD 448(R3), F29 ++ MOVD 440(R3), F28 ++ MOVD 432(R3), F27 ++ MOVD 424(R3), F26 ++ MOVD 416(R3), F25 ++ MOVD 408(R3), F24 ++ MOVD 400(R3), F23 ++ MOVD 392(R3), F22 ++ MOVD 384(R3), F21 ++ MOVD 376(R3), F20 ++ MOVD 368(R3), F19 ++ MOVD 360(R3), F18 ++ MOVD 352(R3), F17 ++ MOVD 344(R3), F16 ++ MOVD 336(R3), F15 ++ MOVD 328(R3), F14 ++ MOVD 320(R3), F13 ++ MOVD 312(R3), F12 ++ MOVD 304(R3), F11 ++ MOVD 296(R3), F10 ++ MOVD 288(R3), F9 ++ MOVD 280(R3), F8 ++ MOVD 272(R3), F7 ++ MOVD 264(R3), F6 ++ MOVD 256(R3), F5 ++ MOVD 248(R3), F4 ++ MOVD 240(R3), F3 ++ MOVD 232(R3), F2 ++ MOVD 224(R3), F1 ++ MOVD 216(R3), F0 ++ MOVV 208(R3), RSB ++ MOVV 200(R3), R29 ++ MOVV 192(R3), R28 ++ MOVV 184(R3), R27 ++ MOVV 176(R3), R26 ++ MOVV 168(R3), R25 ++ MOVV 160(R3), R24 ++ MOVV 152(R3), R23 ++ MOVV 144(R3), R21 ++ MOVV 136(R3), R20 ++ MOVV 128(R3), R19 ++ MOVV 120(R3), R18 ++ MOVV 112(R3), R17 ++ MOVV 104(R3), R16 ++ MOVV 96(R3), R15 ++ MOVV 88(R3), R14 ++ MOVV 80(R3), R13 ++ MOVV 72(R3), R12 ++ MOVV 64(R3), R11 ++ MOVV 56(R3), R10 ++ MOVV 48(R3), R9 ++ MOVV 40(R3), R8 ++ MOVV 32(R3), R7 ++ MOVV 24(R3), R6 ++ MOVV 16(R3), R5 ++ MOVV 8(R3), R4 ++ MOVV 472(R3), R1 ++ MOVV (R3), R30 ++ ADDV $480, R3 ++ JMP (R30) +-- +2.27.0 + diff --git a/0023-runtime-support-memclr-memmove-for-linux-loong64.patch b/0023-runtime-support-memclr-memmove-for-linux-loong64.patch new file mode 100644 index 0000000..83edac1 --- /dev/null +++ b/0023-runtime-support-memclr-memmove-for-linux-loong64.patch @@ -0,0 +1,178 @@ +From 39225c6d232d8c1e875041203f0048b55019e3df Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 1 Dec 2021 17:25:19 +0800 +Subject: [PATCH 23/56] runtime: support memclr/memmove for linux/loong64 + +Change-Id: I7c1f39670034db6714630d479bc41b6620ba2b1a +--- + src/runtime/memclr_loong64.s | 43 ++++++++++++++ + src/runtime/memmove_loong64.s | 107 ++++++++++++++++++++++++++++++++++ + 2 files changed, 150 insertions(+) + create mode 100644 src/runtime/memclr_loong64.s + create mode 100644 src/runtime/memmove_loong64.s + +diff --git a/src/runtime/memclr_loong64.s b/src/runtime/memclr_loong64.s +new file mode 100644 +index 0000000..c486e84 +--- /dev/null ++++ b/src/runtime/memclr_loong64.s +@@ -0,0 +1,43 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build loong64 ++ ++#include "go_asm.h" ++#include "textflag.h" ++ ++// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) ++TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-16 ++ MOVV ptr+0(FP), R6 ++ MOVV n+8(FP), R7 ++ ADDV R6, R7, R4 ++ ++ // if less than 8 bytes, do one byte at a time ++ SGTU $8, R7, R8 ++ BNE R8, out ++ ++ // do one byte at a time until 8-aligned ++ AND $7, R6, R8 ++ BEQ R8, words ++ MOVB R0, (R6) ++ ADDV $1, R6 ++ JMP -4(PC) ++ ++words: ++ // do 8 bytes at a time if there is room ++ ADDV $-7, R4, R7 ++ ++ SGTU R7, R6, R8 ++ BEQ R8, out ++ MOVV R0, (R6) ++ ADDV $8, R6 ++ JMP -4(PC) ++ ++out: ++ BEQ R6, R4, done ++ MOVB R0, (R6) ++ ADDV $1, R6 ++ JMP -3(PC) ++done: ++ RET +diff --git a/src/runtime/memmove_loong64.s b/src/runtime/memmove_loong64.s +new file mode 100644 +index 0000000..339e83b +--- /dev/null ++++ b/src/runtime/memmove_loong64.s +@@ -0,0 +1,107 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build loong64 ++ ++#include "textflag.h" ++ ++// See memmove Go doc for important implementation constraints. ++ ++// func memmove(to, from unsafe.Pointer, n uintptr) ++TEXT runtime·memmove(SB), NOSPLIT|NOFRAME, $0-24 ++ MOVV to+0(FP), R4 ++ MOVV from+8(FP), R5 ++ MOVV n+16(FP), R6 ++ BNE R6, check ++ RET ++ ++check: ++ SGTU R4, R5, R7 ++ BNE R7, backward ++ ++ ADDV R4, R6, R9 // end pointer ++ ++ // if the two pointers are not of same alignments, do byte copying ++ SUBVU R5, R4, R7 ++ AND $7, R7 ++ BNE R7, out ++ ++ // if less than 8 bytes, do byte copying ++ SGTU $8, R6, R7 ++ BNE R7, out ++ ++ // do one byte at a time until 8-aligned ++ AND $7, R4, R8 ++ BEQ R8, words ++ MOVB (R5), R7 ++ ADDV $1, R5 ++ MOVB R7, (R4) ++ ADDV $1, R4 ++ JMP -6(PC) ++ ++words: ++ // do 8 bytes at a time if there is room ++ ADDV $-7, R9, R6 // R6 is end pointer-7 ++ ++ SGTU R6, R4, R8 ++ BEQ R8, out ++ MOVV (R5), R7 ++ ADDV $8, R5 ++ MOVV R7, (R4) ++ ADDV $8, R4 ++ JMP -6(PC) ++ ++out: ++ BEQ R4, R9, done ++ MOVB (R5), R7 ++ ADDV $1, R5 ++ MOVB R7, (R4) ++ ADDV $1, R4 ++ JMP -5(PC) ++done: ++ RET ++ ++backward: ++ ADDV R6, R5 // from-end pointer ++ ADDV R4, R6, R9 // to-end pointer ++ ++ // if the two pointers are not of same alignments, do byte copying ++ SUBVU R9, R5, R7 ++ AND $7, R7 ++ BNE R7, out1 ++ ++ // if less than 8 bytes, do byte copying ++ SGTU $8, R6, R7 ++ BNE R7, out1 ++ ++ // do one byte at a time until 8-aligned ++ AND $7, R9, R8 ++ BEQ R8, words1 ++ ADDV $-1, R5 ++ MOVB (R5), R7 ++ ADDV $-1, R9 ++ MOVB R7, (R9) ++ JMP -6(PC) ++ ++words1: ++ // do 8 bytes at a time if there is room ++ ADDV $7, R4, R6 // R6 is start pointer+7 ++ ++ SGTU R9, R6, R8 ++ BEQ R8, out1 ++ ADDV $-8, R5 ++ MOVV (R5), R7 ++ ADDV $-8, R9 ++ MOVV R7, (R9) ++ JMP -6(PC) ++ ++out1: ++ BEQ R4, R9, done1 ++ ADDV $-1, R5 ++ MOVB (R5), R7 ++ ADDV $-1, R9 ++ MOVB R7, (R9) ++ JMP -5(PC) ++done1: ++ RET +-- +2.27.0 + diff --git a/0024-runtime-implement-syscalls-for-runtime-bootstrap-on-.patch b/0024-runtime-implement-syscalls-for-runtime-bootstrap-on-.patch new file mode 100644 index 0000000..dc5c4f5 --- /dev/null +++ b/0024-runtime-implement-syscalls-for-runtime-bootstrap-on-.patch @@ -0,0 +1,626 @@ +From aa007c52a0ee9f061391247bf55dca5e5513a977 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 1 Dec 2021 17:27:29 +0800 +Subject: [PATCH 24/56] runtime: implement syscalls for runtime bootstrap on + linux/loong64 + +Change-Id: I848608267932717895d5cff9e33040029c3f3c4b +--- + src/runtime/sys_linux_loong64.s | 605 ++++++++++++++++++++++++++++++++ + 1 file changed, 605 insertions(+) + create mode 100644 src/runtime/sys_linux_loong64.s + +diff --git a/src/runtime/sys_linux_loong64.s b/src/runtime/sys_linux_loong64.s +new file mode 100644 +index 0000000..1496d92 +--- /dev/null ++++ b/src/runtime/sys_linux_loong64.s +@@ -0,0 +1,605 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build linux ++// +build loong64 ++ ++// ++// System calls and other sys.stuff for loong64, Linux ++// ++ ++#include "go_asm.h" ++#include "go_tls.h" ++#include "textflag.h" ++ ++#define AT_FDCWD -100 ++ ++#define SYS_exit 93 ++#define SYS_read 63 ++#define SYS_write 64 ++#define SYS_close 57 ++#define SYS_getpid 172 ++#define SYS_kill 129 ++#define SYS_fcntl 25 ++#define SYS_mmap 222 ++#define SYS_munmap 215 ++#define SYS_setitimer 103 ++#define SYS_clone 220 ++#define SYS_nanosleep 101 ++#define SYS_sched_yield 124 ++#define SYS_rt_sigreturn 139 ++#define SYS_rt_sigaction 134 ++#define SYS_rt_sigprocmask 135 ++#define SYS_sigaltstack 132 ++#define SYS_madvise 233 ++#define SYS_mincore 232 ++#define SYS_gettid 178 ++#define SYS_futex 98 ++#define SYS_sched_getaffinity 123 ++#define SYS_exit_group 94 ++#define SYS_epoll_ctl 21 ++#define SYS_tgkill 131 ++#define SYS_openat 56 ++#define SYS_epoll_pwait 22 ++#define SYS_clock_gettime 113 ++#define SYS_epoll_create1 20 ++#define SYS_brk 214 ++#define SYS_pipe2 59 ++ ++TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0-4 ++ MOVW code+0(FP), R4 ++ MOVV $SYS_exit_group, R11 ++ SYSCALL ++ RET ++ ++// func exitThread(wait *uint32) ++TEXT runtime·exitThread(SB),NOSPLIT|NOFRAME,$0-8 ++ MOVV wait+0(FP), R19 ++ // We're done using the stack. ++ MOVW $0, R11 ++ DBAR ++ MOVW R11, (R19) ++ DBAR ++ MOVW $0, R4 // exit code ++ MOVV $SYS_exit, R11 ++ SYSCALL ++ JMP 0(PC) ++ ++TEXT runtime·open(SB),NOSPLIT|NOFRAME,$0-20 ++ MOVW $AT_FDCWD, R4 // AT_FDCWD, so this acts like open ++ MOVV name+0(FP), R5 ++ MOVW mode+8(FP), R6 ++ MOVW perm+12(FP), R7 ++ MOVV $SYS_openat, R11 ++ SYSCALL ++ MOVW $-4096, R5 ++ BGEU R5, R4, 2(PC) ++ MOVW $-1, R4 ++ MOVW R4, ret+16(FP) ++ RET ++ ++TEXT runtime·closefd(SB),NOSPLIT|NOFRAME,$0-12 ++ MOVW fd+0(FP), R4 ++ MOVV $SYS_close, R11 ++ SYSCALL ++ MOVW $-4096, R5 ++ BGEU R5, R4, 2(PC) ++ MOVW $-1, R4 ++ MOVW R4, ret+8(FP) ++ RET ++ ++TEXT runtime·write1(SB),NOSPLIT|NOFRAME,$0-28 ++ MOVV fd+0(FP), R4 ++ MOVV p+8(FP), R5 ++ MOVW n+16(FP), R6 ++ MOVV $SYS_write, R11 ++ SYSCALL ++ MOVW R4, ret+24(FP) ++ RET ++ ++TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0-28 ++ MOVW fd+0(FP), R4 ++ MOVV p+8(FP), R5 ++ MOVW n+16(FP), R6 ++ MOVV $SYS_read, R11 ++ SYSCALL ++ MOVW R4, ret+24(FP) ++ RET ++ ++// func pipe() (r, w int32, errno int32) ++TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12 ++ MOVV $r+0(FP), R4 ++ MOVV R0, R5 ++ MOVV $SYS_pipe2, R11 ++ SYSCALL ++ MOVW R4, errno+8(FP) ++ RET ++ ++// func pipe2(flags int32) (r, w int32, errno int32) ++TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20 ++ MOVV $r+8(FP), R4 ++ MOVW flags+0(FP), R5 ++ MOVV $SYS_pipe2, R11 ++ SYSCALL ++ MOVW R4, errno+16(FP) ++ RET ++ ++TEXT runtime·usleep(SB),NOSPLIT,$16-4 ++ MOVWU usec+0(FP), R6 ++ MOVV R6, R5 ++ MOVW $1000000, R4 ++ DIVVU R4, R6, R6 ++ MOVV R6, 8(R3) ++ MOVW $1000, R4 ++ MULVU R6, R4, R4 ++ SUBVU R4, R5 ++ MOVV R5, 16(R3) ++ ++ // nanosleep(&ts, 0) ++ ADDV $8, R3, R4 ++ MOVW $0, R5 ++ MOVV $SYS_nanosleep, R11 ++ SYSCALL ++ RET ++ ++TEXT runtime·gettid(SB),NOSPLIT,$0-4 ++ MOVV $SYS_gettid, R11 ++ SYSCALL ++ MOVW R4, ret+0(FP) ++ RET ++ ++TEXT runtime·raise(SB),NOSPLIT|NOFRAME,$0 ++ MOVV $SYS_getpid, R11 ++ SYSCALL ++ MOVW R4, R23 ++ MOVV $SYS_gettid, R11 ++ SYSCALL ++ MOVW R4, R5 // arg 2 tid ++ MOVW R23, R4 // arg 1 pid ++ MOVW sig+0(FP), R6 // arg 3 ++ MOVV $SYS_tgkill, R11 ++ SYSCALL ++ RET ++ ++TEXT runtime·raiseproc(SB),NOSPLIT|NOFRAME,$0 ++ MOVV $SYS_getpid, R11 ++ SYSCALL ++ //MOVW R4, R4 // arg 1 pid ++ MOVW sig+0(FP), R5 // arg 2 ++ MOVV $SYS_kill, R11 ++ SYSCALL ++ RET ++ ++TEXT ·getpid(SB),NOSPLIT|NOFRAME,$0-8 ++ MOVV $SYS_getpid, R11 ++ SYSCALL ++ MOVV R4, ret+0(FP) ++ RET ++ ++TEXT ·tgkill(SB),NOSPLIT|NOFRAME,$0-24 ++ MOVV tgid+0(FP), R4 ++ MOVV tid+8(FP), R5 ++ MOVV sig+16(FP), R6 ++ MOVV $SYS_tgkill, R11 ++ SYSCALL ++ RET ++ ++TEXT runtime·setitimer(SB),NOSPLIT|NOFRAME,$0-24 ++ MOVW mode+0(FP), R4 ++ MOVV new+8(FP), R5 ++ MOVV old+16(FP), R6 ++ MOVV $SYS_setitimer, R11 ++ SYSCALL ++ RET ++ ++TEXT runtime·mincore(SB),NOSPLIT|NOFRAME,$0-28 ++ MOVV addr+0(FP), R4 ++ MOVV n+8(FP), R5 ++ MOVV dst+16(FP), R6 ++ MOVV $SYS_mincore, R11 ++ SYSCALL ++ MOVW R4, ret+24(FP) ++ RET ++ ++// func walltime() (sec int64, nsec int32) ++TEXT runtime·walltime(SB),NOSPLIT,$16-12 ++ MOVV R3, R23 // R23 is unchanged by C code ++ MOVV R3, R25 ++ ++ MOVV g_m(g), R24 // R24 = m ++ ++ // Set vdsoPC and vdsoSP for SIGPROF traceback. ++ // Save the old values on stack and restore them on exit, ++ // so this function is reentrant. ++ MOVV m_vdsoPC(R24), R11 ++ MOVV m_vdsoSP(R24), R7 ++ MOVV R11, 8(R3) ++ MOVV R7, 16(R3) ++ ++ MOVV $ret-8(FP), R11 // caller's SP ++ MOVV R1, m_vdsoPC(R24) ++ MOVV R11, m_vdsoSP(R24) ++ ++ MOVV m_curg(R24), R4 ++ MOVV g, R5 ++ BNE R4, R5, noswitch ++ ++ MOVV m_g0(R24), R4 ++ MOVV (g_sched+gobuf_sp)(R4), R25 // Set SP to g0 stack ++ ++noswitch: ++ SUBV $16, R25 ++ AND $~15, R25 // Align for C code ++ MOVV R25, R3 ++ ++ MOVW $0, R4 // CLOCK_REALTIME=0 ++ MOVV $0(R3), R5 ++ ++ MOVV runtime·vdsoClockgettimeSym(SB), R20 ++ BEQ R20, fallback ++ ++ JAL (R20) ++ ++finish: ++ MOVV 0(R3), R7 // sec ++ MOVV 8(R3), R5 // nsec ++ ++ MOVV R23, R3 // restore SP ++ // Restore vdsoPC, vdsoSP ++ // We don't worry about being signaled between the two stores. ++ // If we are not in a signal handler, we'll restore vdsoSP to 0, ++ // and no one will care about vdsoPC. If we are in a signal handler, ++ // we cannot receive another signal. ++ MOVV 16(R3), R25 ++ MOVV R25, m_vdsoSP(R24) ++ MOVV 8(R3), R25 ++ MOVV R25, m_vdsoPC(R24) ++ ++ MOVV R7, sec+0(FP) ++ MOVW R5, nsec+8(FP) ++ RET ++ ++fallback: ++ MOVV $SYS_clock_gettime, R11 ++ SYSCALL ++ JMP finish ++ ++TEXT runtime·nanotime1(SB),NOSPLIT,$16-8 ++ MOVV R3, R23 // R23 is unchanged by C code ++ MOVV R3, R25 ++ ++ MOVV g_m(g), R24 // R24 = m ++ ++ // Set vdsoPC and vdsoSP for SIGPROF traceback. ++ // Save the old values on stack and restore them on exit, ++ // so this function is reentrant. ++ MOVV m_vdsoPC(R24), R11 ++ MOVV m_vdsoSP(R24), R7 ++ MOVV R11, 8(R3) ++ MOVV R7, 16(R3) ++ ++ MOVV $ret-8(FP), R11 // caller's SP ++ MOVV R1, m_vdsoPC(R24) ++ MOVV R11, m_vdsoSP(R24) ++ ++ MOVV m_curg(R24), R4 ++ MOVV g, R5 ++ BNE R4, R5, noswitch ++ ++ MOVV m_g0(R24), R4 ++ MOVV (g_sched+gobuf_sp)(R4), R25 // Set SP to g0 stack ++ ++noswitch: ++ SUBV $16, R25 ++ AND $~15, R25 // Align for C code ++ MOVV R25, R3 ++ ++ MOVW $1, R4 // CLOCK_MONOTONIC=1 ++ MOVV $0(R3), R5 ++ ++ MOVV runtime·vdsoClockgettimeSym(SB), R20 ++ BEQ R20, fallback ++ ++ JAL (R20) ++ ++finish: ++ MOVV 0(R3), R7 // sec ++ MOVV 8(R3), R5 // nsec ++ ++ MOVV R23, R3 // restore SP ++ // Restore vdsoPC, vdsoSP ++ // We don't worry about being signaled between the two stores. ++ // If we are not in a signal handler, we'll restore vdsoSP to 0, ++ // and no one will care about vdsoPC. If we are in a signal handler, ++ // we cannot receive another signal. ++ MOVV 16(R3), R25 ++ MOVV R25, m_vdsoSP(R24) ++ MOVV 8(R3), R25 ++ MOVV R25, m_vdsoPC(R24) ++ ++ // sec is in R7, nsec in R5 ++ // return nsec in R7 ++ MOVV $1000000000, R4 ++ MULVU R4, R7, R7 ++ ADDVU R5, R7 ++ MOVV R7, ret+0(FP) ++ RET ++ ++fallback: ++ MOVV $SYS_clock_gettime, R11 ++ SYSCALL ++ JMP finish ++ ++TEXT runtime·rtsigprocmask(SB),NOSPLIT|NOFRAME,$0-28 ++ MOVW how+0(FP), R4 ++ MOVV new+8(FP), R5 ++ MOVV old+16(FP), R6 ++ MOVW size+24(FP), R7 ++ MOVV $SYS_rt_sigprocmask, R11 ++ SYSCALL ++ MOVW $-4096, R5 ++ BGEU R5, R4, 2(PC) ++ MOVV R0, 0xf1(R0) // crash ++ RET ++ ++TEXT runtime·rt_sigaction(SB),NOSPLIT|NOFRAME,$0-36 ++ MOVV sig+0(FP), R4 ++ MOVV new+8(FP), R5 ++ MOVV old+16(FP), R6 ++ MOVV size+24(FP), R7 ++ MOVV $SYS_rt_sigaction, R11 ++ SYSCALL ++ MOVW R4, ret+32(FP) ++ RET ++ ++TEXT runtime·sigfwd(SB),NOSPLIT,$0-32 ++ MOVW sig+8(FP), R4 ++ MOVV info+16(FP), R5 ++ MOVV ctx+24(FP), R6 ++ MOVV fn+0(FP), R20 ++ JAL (R20) ++ RET ++ ++TEXT runtime·sigtramp(SB),NOSPLIT,$64 ++ // this might be called in external code context, ++ // where g is not set. ++ MOVB runtime·iscgo(SB), R19 ++ BEQ R19, 2(PC) ++ JAL runtime·load_g(SB) ++ ++ MOVW R4, 8(R3) ++ MOVV R5, 16(R3) ++ MOVV R6, 24(R3) ++ MOVV $runtime·sigtrampgo(SB), R19 ++ JAL (R19) ++ RET ++ ++TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0 ++ JMP runtime·sigtramp(SB) ++ ++TEXT runtime·mmap(SB),NOSPLIT|NOFRAME,$0 ++ MOVV addr+0(FP), R4 ++ MOVV n+8(FP), R5 ++ MOVW prot+16(FP), R6 ++ MOVW flags+20(FP), R7 ++ MOVW fd+24(FP), R8 ++ MOVW off+28(FP), R9 ++ ++ MOVV $SYS_mmap, R11 ++ SYSCALL ++ MOVW $-4096, R5 ++ BGEU R5, R4, ok ++ MOVV $0, p+32(FP) ++ SUBVU R4, R0, R4 ++ MOVV R4, err+40(FP) ++ RET ++ok: ++ MOVV R4, p+32(FP) ++ MOVV $0, err+40(FP) ++ RET ++ ++TEXT runtime·munmap(SB),NOSPLIT|NOFRAME,$0 ++ MOVV addr+0(FP), R4 ++ MOVV n+8(FP), R5 ++ MOVV $SYS_munmap, R11 ++ SYSCALL ++ MOVW $-4096, R5 ++ BGEU R5, R4, 2(PC) ++ MOVV R0, 0xf3(R0) // crash ++ RET ++ ++TEXT runtime·madvise(SB),NOSPLIT|NOFRAME,$0 ++ MOVV addr+0(FP), R4 ++ MOVV n+8(FP), R5 ++ MOVW flags+16(FP), R6 ++ MOVV $SYS_madvise, R11 ++ SYSCALL ++ MOVW R4, ret+24(FP) ++ RET ++ ++// int64 futex(int32 *uaddr, int32 op, int32 val, ++// struct timespec *timeout, int32 *uaddr2, int32 val2); ++TEXT runtime·futex(SB),NOSPLIT|NOFRAME,$0 ++ MOVV addr+0(FP), R4 ++ MOVW op+8(FP), R5 ++ MOVW val+12(FP), R6 ++ MOVV ts+16(FP), R7 ++ MOVV addr2+24(FP), R8 ++ MOVW val3+32(FP), R9 ++ MOVV $SYS_futex, R11 ++ SYSCALL ++ MOVW R4, ret+40(FP) ++ RET ++ ++// int64 clone(int32 flags, void *stk, M *mp, G *gp, void (*fn)(void)); ++TEXT runtime·clone(SB),NOSPLIT|NOFRAME,$0 ++ MOVW flags+0(FP), R4 ++ MOVV stk+8(FP), R5 ++ ++ // Copy mp, gp, fn off parent stack for use by child. ++ // Careful: Linux system call clobbers ???. ++ MOVV mp+16(FP), R23 ++ MOVV gp+24(FP), R24 ++ MOVV fn+32(FP), R25 ++ ++ MOVV R23, -8(R5) ++ MOVV R24, -16(R5) ++ MOVV R25, -24(R5) ++ MOVV $1234, R23 ++ MOVV R23, -32(R5) ++ ++ MOVV $SYS_clone, R11 ++ SYSCALL ++ ++ // In parent, return. ++ BEQ R4, 3(PC) ++ MOVW R4, ret+40(FP) ++ RET ++ ++ // In child, on new stack. ++ MOVV -32(R3), R23 ++ MOVV $1234, R19 ++ BEQ R23, R19, 2(PC) ++ MOVV R0, 0(R0) ++ ++ // Initialize m->procid to Linux tid ++ MOVV $SYS_gettid, R11 ++ SYSCALL ++ ++ MOVV -24(R3), R25 // fn ++ MOVV -16(R3), R24 // g ++ MOVV -8(R3), R23 // m ++ ++ BEQ R23, nog ++ BEQ R24, nog ++ ++ MOVV R4, m_procid(R23) ++ ++ // TODO: setup TLS. ++ ++ // In child, set up new stack ++ MOVV R23, g_m(R24) ++ MOVV R24, g ++ //CALL runtime·stackcheck(SB) ++ ++nog: ++ // Call fn ++ JAL (R25) ++ ++ // It shouldn't return. If it does, exit that thread. ++ MOVW $111, R4 ++ MOVV $SYS_exit, R11 ++ SYSCALL ++ JMP -3(PC) // keep exiting ++ ++TEXT runtime·sigaltstack(SB),NOSPLIT|NOFRAME,$0 ++ MOVV new+0(FP), R4 ++ MOVV old+8(FP), R5 ++ MOVV $SYS_sigaltstack, R11 ++ SYSCALL ++ MOVW $-4096, R5 ++ BGEU R5, R4, 2(PC) ++ MOVV R0, 0xf1(R0) // crash ++ RET ++ ++TEXT runtime·osyield(SB),NOSPLIT|NOFRAME,$0 ++ MOVV $SYS_sched_yield, R11 ++ SYSCALL ++ RET ++ ++TEXT runtime·sched_getaffinity(SB),NOSPLIT|NOFRAME,$0 ++ MOVV pid+0(FP), R4 ++ MOVV len+8(FP), R5 ++ MOVV buf+16(FP), R6 ++ MOVV $SYS_sched_getaffinity, R11 ++ SYSCALL ++ MOVW R4, ret+24(FP) ++ RET ++ ++// int32 runtime·epollcreate(int32 size); ++TEXT runtime·epollcreate(SB),NOSPLIT|NOFRAME,$0 ++ MOVW size+0(FP), R4 ++ MOVV $SYS_epoll_create1, R11 ++ SYSCALL ++ MOVW R4, ret+8(FP) ++ RET ++ ++// int32 runtime·epollcreate1(int32 flags); ++TEXT runtime·epollcreate1(SB),NOSPLIT|NOFRAME,$0 ++ MOVW flags+0(FP), R4 ++ MOVV $SYS_epoll_create1, R11 ++ SYSCALL ++ MOVW R4, ret+8(FP) ++ RET ++ ++// func epollctl(epfd, op, fd int32, ev *epollEvent) int ++TEXT runtime·epollctl(SB),NOSPLIT|NOFRAME,$0 ++ MOVW epfd+0(FP), R4 ++ MOVW op+4(FP), R5 ++ MOVW fd+8(FP), R6 ++ MOVV ev+16(FP), R7 ++ MOVV $SYS_epoll_ctl, R11 ++ SYSCALL ++ MOVW R4, ret+24(FP) ++ RET ++ ++// int32 runtime·epollwait(int32 epfd, EpollEvent *ev, int32 nev, int32 timeout); ++TEXT runtime·epollwait(SB),NOSPLIT|NOFRAME,$0 ++ MOVW epfd+0(FP), R4 ++ MOVV ev+8(FP), R5 ++ MOVW nev+16(FP), R6 ++ MOVW timeout+20(FP), R7 ++ MOVV $0, R8 ++ MOVV $SYS_epoll_pwait, R11 ++ SYSCALL ++ MOVW R4, ret+24(FP) ++ RET ++ ++// void runtime·closeonexec(int32 fd); ++TEXT runtime·closeonexec(SB),NOSPLIT|NOFRAME,$0 ++ MOVW fd+0(FP), R4 // fd ++ MOVV $2, R5 // F_SETFD ++ MOVV $1, R6 // FD_CLOEXEC ++ MOVV $SYS_fcntl, R11 ++ SYSCALL ++ RET ++ ++// func runtime·setNonblock(int32 fd) ++TEXT runtime·setNonblock(SB),NOSPLIT|NOFRAME,$0-4 ++ MOVW fd+0(FP), R4 // fd ++ MOVV $3, R5 // F_GETFL ++ MOVV $0, R6 ++ MOVV $SYS_fcntl, R11 ++ SYSCALL ++ MOVW $0x800, R6 // O_NONBLOCK ++ OR R4, R6 ++ MOVW fd+0(FP), R4 // fd ++ MOVV $4, R5 // F_SETFL ++ MOVV $SYS_fcntl, R11 ++ SYSCALL ++ RET ++ ++// func sbrk0() uintptr ++TEXT runtime·sbrk0(SB),NOSPLIT|NOFRAME,$0-8 ++ // Implemented as brk(NULL). ++ MOVV $0, R4 ++ MOVV $SYS_brk, R11 ++ SYSCALL ++ MOVV R4, ret+0(FP) ++ RET ++ ++TEXT runtime·access(SB),$0-20 ++ MOVV R0, 2(R0) // unimplemented, only needed for android; declared in stubs_linux.go ++ MOVW R0, ret+16(FP) // for vet ++ RET ++ ++TEXT runtime·connect(SB),$0-28 ++ MOVV R0, 2(R0) // unimplemented, only needed for android; declared in stubs_linux.go ++ MOVW R0, ret+24(FP) // for vet ++ RET ++ ++TEXT runtime·socket(SB),$0-20 ++ MOVV R0, 2(R0) // unimplemented, only needed for android; declared in stubs_linux.go ++ MOVW R0, ret+16(FP) // for vet ++ RET +-- +2.27.0 + diff --git a/0025-runtime-add-build-tag-for-common-support-on-linux-lo.patch b/0025-runtime-add-build-tag-for-common-support-on-linux-lo.patch new file mode 100644 index 0000000..5dd844c --- /dev/null +++ b/0025-runtime-add-build-tag-for-common-support-on-linux-lo.patch @@ -0,0 +1,63 @@ +From a6e70cac715b37dc651680ca6530d97e22a9228a Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 1 Dec 2021 17:42:46 +0800 +Subject: [PATCH 25/56] runtime: add build tag for common support on + linux/loong64 + +Change-Id: Ide01fb8a39fe3e890f6cbc5d28f4a1d47eb5d79b +--- + src/runtime/hash64.go | 4 ++-- + src/runtime/lfstack_64bit.go | 4 ++-- + src/runtime/mpagealloc_64bit.go | 6 ++++-- + 3 files changed, 8 insertions(+), 6 deletions(-) + +diff --git a/src/runtime/hash64.go b/src/runtime/hash64.go +index 5f7d00b..62ce67a 100644 +--- a/src/runtime/hash64.go ++++ b/src/runtime/hash64.go +@@ -5,8 +5,8 @@ + // Hashing algorithm inspired by + // wyhash: https://github.com/wangyi-fudan/wyhash + +-//go:build amd64 || arm64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || wasm +-// +build amd64 arm64 mips64 mips64le ppc64 ppc64le riscv64 s390x wasm ++//go:build amd64 || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || wasm ++// +build amd64 arm64 loong64 mips64 mips64le ppc64 ppc64le riscv64 s390x wasm + + package runtime + +diff --git a/src/runtime/lfstack_64bit.go b/src/runtime/lfstack_64bit.go +index 4812dd1..71bd2fc 100644 +--- a/src/runtime/lfstack_64bit.go ++++ b/src/runtime/lfstack_64bit.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build amd64 || arm64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || wasm +-// +build amd64 arm64 mips64 mips64le ppc64 ppc64le riscv64 s390x wasm ++//go:build amd64 || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || wasm ++// +build amd64 arm64 loong64 mips64 mips64le ppc64 ppc64le riscv64 s390x wasm + + package runtime + +diff --git a/src/runtime/mpagealloc_64bit.go b/src/runtime/mpagealloc_64bit.go +index 8b72c25..1710ca9 100644 +--- a/src/runtime/mpagealloc_64bit.go ++++ b/src/runtime/mpagealloc_64bit.go +@@ -2,8 +2,10 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build amd64 || arm64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x +-// +build amd64 arm64 mips64 mips64le ppc64 ppc64le riscv64 s390x ++//go:build amd64 || (!ios && arm64) || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x ++// +build amd64 !ios,arm64 loong64 mips64 mips64le ppc64 ppc64le riscv64 s390x ++ ++// See mpagealloc_32bit.go for why ios/arm64 is excluded here. + + package runtime + +-- +2.27.0 + diff --git a/0026-runtime-fix-runtime-test-error-for-loong64.patch b/0026-runtime-fix-runtime-test-error-for-loong64.patch new file mode 100644 index 0000000..e4fd94f --- /dev/null +++ b/0026-runtime-fix-runtime-test-error-for-loong64.patch @@ -0,0 +1,26 @@ +From 4c228cf3bfdac143791a6363ad3f6eeab44d5445 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 1 Dec 2021 17:43:12 +0800 +Subject: [PATCH 26/56] runtime: fix runtime test error for loong64 + +Change-Id: I61bef32b38ab07543a147cf172b169eae21b26cf +--- + src/runtime/gcinfo_test.go | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/runtime/gcinfo_test.go b/src/runtime/gcinfo_test.go +index 0808b41..daa4ec5 100644 +--- a/src/runtime/gcinfo_test.go ++++ b/src/runtime/gcinfo_test.go +@@ -164,7 +164,7 @@ func infoBigStruct() []byte { + typeScalar, typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64 + typePointer, typeScalar, // i string + } +- case "arm64", "amd64", "mips64", "mips64le", "ppc64", "ppc64le", "riscv64", "s390x", "wasm": ++ case "arm64", "amd64", "loong64", "mips64", "mips64le", "ppc64", "ppc64le", "riscv64", "s390x", "wasm": + return []byte{ + typePointer, // q *int + typeScalar, typeScalar, typeScalar, // w byte; e [17]byte +-- +2.27.0 + diff --git a/0027-runtime-internal-add-atomic-support-for-loong64.patch b/0027-runtime-internal-add-atomic-support-for-loong64.patch new file mode 100644 index 0000000..33d2ec3 --- /dev/null +++ b/0027-runtime-internal-add-atomic-support-for-loong64.patch @@ -0,0 +1,414 @@ +From cc5c48a0360aff425fa177002b81e7ae0ebb5483 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 16:55:35 +0800 +Subject: [PATCH 27/56] runtime/internal: add atomic support for loong64 + +Change-Id: I0333503db044c6f39df2d7f8d9dff213b1361d6c +--- + src/runtime/internal/atomic/atomic_loong64.go | 84 +++++ + src/runtime/internal/atomic/atomic_loong64.s | 302 ++++++++++++++++++ + 2 files changed, 386 insertions(+) + create mode 100644 src/runtime/internal/atomic/atomic_loong64.go + create mode 100644 src/runtime/internal/atomic/atomic_loong64.s + +diff --git a/src/runtime/internal/atomic/atomic_loong64.go b/src/runtime/internal/atomic/atomic_loong64.go +new file mode 100644 +index 0000000..5631226 +--- /dev/null ++++ b/src/runtime/internal/atomic/atomic_loong64.go +@@ -0,0 +1,84 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build loong64 ++// +build loong64 ++ ++package atomic ++ ++import "unsafe" ++ ++//go:noescape ++func Xadd(ptr *uint32, delta int32) uint32 ++ ++//go:noescape ++func Xadd64(ptr *uint64, delta int64) uint64 ++ ++//go:noescape ++func Xadduintptr(ptr *uintptr, delta uintptr) uintptr ++ ++//go:noescape ++func Xchg(ptr *uint32, new uint32) uint32 ++ ++//go:noescape ++func Xchg64(ptr *uint64, new uint64) uint64 ++ ++//go:noescape ++func Xchguintptr(ptr *uintptr, new uintptr) uintptr ++ ++//go:noescape ++func Load(ptr *uint32) uint32 ++ ++//go:noescape ++func Load8(ptr *uint8) uint8 ++ ++//go:noescape ++func Load64(ptr *uint64) uint64 ++ ++// NO go:noescape annotation; *ptr escapes if result escapes (#31525) ++func Loadp(ptr unsafe.Pointer) unsafe.Pointer ++ ++//go:noescape ++func LoadAcq(ptr *uint32) uint32 ++ ++//go:noescape ++func LoadAcquintptr(ptr *uintptr) uintptr ++ ++//go:noescape ++func And8(ptr *uint8, val uint8) ++ ++//go:noescape ++func And(ptr *uint32, val uint32) ++ ++//go:noescape ++func Or8(ptr *uint8, val uint8) ++ ++//go:noescape ++func Or(ptr *uint32, val uint32) ++ ++// NOTE: Do not add atomicxor8 (XOR is not idempotent). ++ ++//go:noescape ++func Cas64(ptr *uint64, old, new uint64) bool ++ ++//go:noescape ++func CasRel(ptr *uint32, old, new uint32) bool ++ ++//go:noescape ++func Store(ptr *uint32, val uint32) ++ ++//go:noescape ++func Store8(ptr *uint8, val uint8) ++ ++//go:noescape ++func Store64(ptr *uint64, val uint64) ++ ++// NO go:noescape annotation; see atomic_pointer.go. ++func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) ++ ++//go:noescape ++func StoreRel(ptr *uint32, val uint32) ++ ++//go:noescape ++func StoreReluintptr(ptr *uintptr, val uintptr) +diff --git a/src/runtime/internal/atomic/atomic_loong64.s b/src/runtime/internal/atomic/atomic_loong64.s +new file mode 100644 +index 0000000..fef7931 +--- /dev/null ++++ b/src/runtime/internal/atomic/atomic_loong64.s +@@ -0,0 +1,302 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++#include "textflag.h" ++ ++// DBAR sync load/store operation ++#define DBAR WORD $0x38720000 ++ ++// bool cas(uint32 *ptr, uint32 old, uint32 new) ++// Atomically: ++// if(*ptr == old){ ++// *ptr = new; ++// return 1; ++// } else ++// return 0; ++TEXT ·Cas(SB), NOSPLIT, $0-17 ++ MOVV ptr+0(FP), R4 ++ MOVW old+8(FP), R5 ++ MOVW new+12(FP), R6 ++ DBAR ++cas_again: ++ MOVV R6, R7 ++ LL (R4), R8 ++ BNE R5, R8, cas_fail ++ SC R7, (R4) ++ BEQ R7, cas_again ++ MOVV $1, R4 ++ MOVB R4, ret+16(FP) ++ DBAR ++ RET ++cas_fail: ++ MOVV $0, R4 ++ JMP -4(PC) ++ ++// bool cas64(uint64 *ptr, uint64 old, uint64 new) ++// Atomically: ++// if(*ptr == old){ ++// *ptr = new; ++// return 1; ++// } else { ++// return 0; ++// } ++TEXT ·Cas64(SB), NOSPLIT, $0-25 ++ MOVV ptr+0(FP), R4 ++ MOVV old+8(FP), R5 ++ MOVV new+16(FP), R6 ++ DBAR ++cas64_again: ++ MOVV R6, R7 ++ LLV (R4), R8 ++ BNE R5, R8, cas64_fail ++ SCV R7, (R4) ++ BEQ R7, cas64_again ++ MOVV $1, R4 ++ MOVB R4, ret+24(FP) ++ DBAR ++ RET ++cas64_fail: ++ MOVV $0, R4 ++ JMP -4(PC) ++ ++TEXT ·Casuintptr(SB), NOSPLIT, $0-25 ++ JMP ·Cas64(SB) ++ ++TEXT ·CasRel(SB), NOSPLIT, $0-17 ++ JMP ·Cas(SB) ++ ++TEXT ·Loaduintptr(SB), NOSPLIT|NOFRAME, $0-16 ++ JMP ·Load64(SB) ++ ++TEXT ·Loaduint(SB), NOSPLIT|NOFRAME, $0-16 ++ JMP ·Load64(SB) ++ ++TEXT ·Storeuintptr(SB), NOSPLIT, $0-16 ++ JMP ·Store64(SB) ++ ++TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 ++ JMP ·Xadd64(SB) ++ ++TEXT ·Loadint64(SB), NOSPLIT, $0-16 ++ JMP ·Load64(SB) ++ ++TEXT ·Xaddint64(SB), NOSPLIT, $0-24 ++ JMP ·Xadd64(SB) ++ ++// bool casp(void **val, void *old, void *new) ++// Atomically: ++// if(*val == old){ ++// *val = new; ++// return 1; ++// } else ++// return 0; ++TEXT ·Casp1(SB), NOSPLIT, $0-25 ++ JMP runtime∕internal∕atomic·Cas64(SB) ++ ++// uint32 xadd(uint32 volatile *ptr, int32 delta) ++// Atomically: ++// *val += delta; ++// return *val; ++TEXT ·Xadd(SB), NOSPLIT, $0-20 ++ MOVV ptr+0(FP), R4 ++ MOVW delta+8(FP), R5 ++ DBAR ++ LL (R4), R6 ++ ADDU R6, R5, R7 ++ MOVV R7, R6 ++ SC R7, (R4) ++ BEQ R7, -4(PC) ++ MOVW R6, ret+16(FP) ++ DBAR ++ RET ++ ++TEXT ·Xadd64(SB), NOSPLIT, $0-24 ++ MOVV ptr+0(FP), R4 ++ MOVV delta+8(FP), R5 ++ DBAR ++ LLV (R4), R6 ++ ADDVU R6, R5, R7 ++ MOVV R7, R6 ++ SCV R7, (R4) ++ BEQ R7, -4(PC) ++ MOVV R6, ret+16(FP) ++ DBAR ++ RET ++ ++TEXT ·Xchg(SB), NOSPLIT, $0-20 ++ MOVV ptr+0(FP), R4 ++ MOVW new+8(FP), R5 ++ ++ DBAR ++ MOVV R5, R6 ++ LL (R4), R7 ++ SC R6, (R4) ++ BEQ R6, -3(PC) ++ MOVW R7, ret+16(FP) ++ DBAR ++ RET ++ ++TEXT ·Xchg64(SB), NOSPLIT, $0-24 ++ MOVV ptr+0(FP), R4 ++ MOVV new+8(FP), R5 ++ ++ DBAR ++ MOVV R5, R6 ++ LLV (R4), R7 ++ SCV R6, (R4) ++ BEQ R6, -3(PC) ++ MOVV R7, ret+16(FP) ++ DBAR ++ RET ++ ++TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 ++ JMP ·Xchg64(SB) ++ ++TEXT ·StorepNoWB(SB), NOSPLIT, $0-16 ++ JMP ·Store64(SB) ++ ++TEXT ·StoreRel(SB), NOSPLIT, $0-12 ++ JMP ·Store(SB) ++ ++TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16 ++ JMP ·Store64(SB) ++ ++TEXT ·Store(SB), NOSPLIT, $0-12 ++ MOVV ptr+0(FP), R4 ++ MOVW val+8(FP), R5 ++ DBAR ++ MOVW R5, 0(R4) ++ DBAR ++ RET ++ ++TEXT ·Store8(SB), NOSPLIT, $0-9 ++ MOVV ptr+0(FP), R4 ++ MOVB val+8(FP), R5 ++ DBAR ++ MOVB R5, 0(R4) ++ DBAR ++ RET ++ ++TEXT ·Store64(SB), NOSPLIT, $0-16 ++ MOVV ptr+0(FP), R4 ++ MOVV val+8(FP), R5 ++ DBAR ++ MOVV R5, 0(R4) ++ DBAR ++ RET ++ ++// void Or8(byte volatile*, byte); ++TEXT ·Or8(SB), NOSPLIT, $0-9 ++ MOVV ptr+0(FP), R4 ++ MOVBU val+8(FP), R5 ++ // Align ptr down to 4 bytes so we can use 32-bit load/store. ++ MOVV $~3, R6 ++ AND R4, R6 ++ // R7 = ((ptr & 3) * 8) ++ AND $3, R4, R7 ++ SLLV $3, R7 ++ // Shift val for aligned ptr. R5 = val << R4 ++ SLLV R7, R5 ++ ++ DBAR ++ LL (R6), R7 ++ OR R5, R7 ++ SC R7, (R6) ++ BEQ R7, -4(PC) ++ DBAR ++ RET ++ ++// void And8(byte volatile*, byte); ++TEXT ·And8(SB), NOSPLIT, $0-9 ++ MOVV ptr+0(FP), R4 ++ MOVBU val+8(FP), R5 ++ // Align ptr down to 4 bytes so we can use 32-bit load/store. ++ MOVV $~3, R6 ++ AND R4, R6 ++ // R7 = ((ptr & 3) * 8) ++ AND $3, R4, R7 ++ SLLV $3, R7 ++ // Shift val for aligned ptr. R5 = val << R7 | ^(0xFF << R7) ++ MOVV $0xFF, R8 ++ SLLV R7, R5 ++ SLLV R7, R8 ++ NOR R0, R8 ++ OR R8, R5 ++ ++ DBAR ++ LL (R6), R7 ++ AND R5, R7 ++ SC R7, (R6) ++ BEQ R7, -4(PC) ++ DBAR ++ RET ++ ++// func Or(addr *uint32, v uint32) ++TEXT ·Or(SB), NOSPLIT, $0-12 ++ MOVV ptr+0(FP), R4 ++ MOVW val+8(FP), R5 ++ DBAR ++ LL (R4), R6 ++ OR R5, R6 ++ SC R6, (R4) ++ BEQ R6, -4(PC) ++ DBAR ++ RET ++ ++// func And(addr *uint32, v uint32) ++TEXT ·And(SB), NOSPLIT, $0-12 ++ MOVV ptr+0(FP), R4 ++ MOVW val+8(FP), R5 ++ DBAR ++ LL (R4), R6 ++ AND R5, R6 ++ SC R6, (R4) ++ BEQ R6, -4(PC) ++ DBAR ++ RET ++ ++// uint32 runtime∕internal∕atomic·Load(uint32 volatile* ptr) ++TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-12 ++ MOVV ptr+0(FP), R19 ++ DBAR ++ MOVWU 0(R19), R19 ++ DBAR ++ MOVW R19, ret+8(FP) ++ RET ++ ++// uint8 runtime∕internal∕atomic·Load8(uint8 volatile* ptr) ++TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-9 ++ MOVV ptr+0(FP), R19 ++ DBAR ++ MOVBU 0(R19), R19 ++ DBAR ++ MOVB R19, ret+8(FP) ++ RET ++ ++// uint64 runtime∕internal∕atomic·Load64(uint64 volatile* ptr) ++TEXT ·Load64(SB),NOSPLIT|NOFRAME,$0-16 ++ MOVV ptr+0(FP), R19 ++ DBAR ++ MOVV 0(R19), R19 ++ DBAR ++ MOVV R19, ret+8(FP) ++ RET ++ ++// void *runtime∕internal∕atomic·Loadp(void *volatile *ptr) ++TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$0-16 ++ MOVV ptr+0(FP), R19 ++ DBAR ++ MOVV 0(R19), R19 ++ DBAR ++ MOVV R19, ret+8(FP) ++ RET ++ ++// uint32 runtime∕internal∕atomic·LoadAcq(uint32 volatile* ptr) ++TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-12 ++ JMP atomic·Load(SB) ++ ++// uintptr ·LoadAcquintptr(uintptr volatile* ptr) ++TEXT ·LoadAcquintptr(SB),NOSPLIT|NOFRAME,$0-16 ++ JMP atomic·Load64(SB) ++ +-- +2.27.0 + diff --git a/0028-cmd-cgo-configure-cgo-tool-for-loong64.patch b/0028-cmd-cgo-configure-cgo-tool-for-loong64.patch new file mode 100644 index 0000000..395192a --- /dev/null +++ b/0028-cmd-cgo-configure-cgo-tool-for-loong64.patch @@ -0,0 +1,47 @@ +From 5526b3e6b0aa12a081a94a078e7b7ac792dbbdb7 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 15:32:16 +0800 +Subject: [PATCH 28/56] cmd/cgo: configure cgo tool for loong64 + +Change-Id: I9699fd9af0112e72193ac24b736b85c580887a0f +--- + src/cmd/cgo/gcc.go | 2 ++ + src/cmd/cgo/main.go | 2 ++ + 2 files changed, 4 insertions(+) + +diff --git a/src/cmd/cgo/gcc.go b/src/cmd/cgo/gcc.go +index a73e998..b9250d5 100644 +--- a/src/cmd/cgo/gcc.go ++++ b/src/cmd/cgo/gcc.go +@@ -1593,6 +1593,8 @@ func (p *Package) gccMachine() []string { + } else if gomips == "softfloat" { + return []string{"-mabi=32", "-msoft-float"} + } ++ case "loong64": ++ return []string{"-mabi=lp64d"} + } + return nil + } +diff --git a/src/cmd/cgo/main.go b/src/cmd/cgo/main.go +index c6a0c52..46bd91b 100644 +--- a/src/cmd/cgo/main.go ++++ b/src/cmd/cgo/main.go +@@ -176,6 +176,7 @@ var ptrSizeMap = map[string]int64{ + "amd64": 8, + "arm": 4, + "arm64": 8, ++ "loong64": 8, + "m68k": 4, + "mips": 4, + "mipsle": 4, +@@ -201,6 +202,7 @@ var intSizeMap = map[string]int64{ + "amd64": 8, + "arm": 4, + "arm64": 8, ++ "loong64": 8, + "m68k": 4, + "mips": 4, + "mipsle": 4, +-- +2.27.0 + diff --git a/0029-runtime-cgo-add-cgo-function-call-support-for-loong6.patch b/0029-runtime-cgo-add-cgo-function-call-support-for-loong6.patch new file mode 100644 index 0000000..2f5f9e5 --- /dev/null +++ b/0029-runtime-cgo-add-cgo-function-call-support-for-loong6.patch @@ -0,0 +1,250 @@ +From 442bacd59da52a6f952723c8327986e8278cbcbe Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 16:53:45 +0800 +Subject: [PATCH 29/56] runtime/cgo: add cgo function call support for loong64 + +Change-Id: I8ef0e7f17d6ada3d2f07c81524136b78457e7795 +--- + src/runtime/cgo/asm_loong64.s | 69 +++++++++++++++++++++++++ + src/runtime/cgo/gcc_linux_loong64.c | 78 +++++++++++++++++++++++++++++ + src/runtime/cgo/gcc_loong64.S | 67 +++++++++++++++++++++++++ + 3 files changed, 214 insertions(+) + create mode 100644 src/runtime/cgo/asm_loong64.s + create mode 100644 src/runtime/cgo/gcc_linux_loong64.c + create mode 100644 src/runtime/cgo/gcc_loong64.S + +diff --git a/src/runtime/cgo/asm_loong64.s b/src/runtime/cgo/asm_loong64.s +new file mode 100644 +index 0000000..aa5a4ca +--- /dev/null ++++ b/src/runtime/cgo/asm_loong64.s +@@ -0,0 +1,69 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build loong64 ++ ++#include "textflag.h" ++ ++// Called by C code generated by cmd/cgo. ++// func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr) ++// Saves C callee-saved registers and calls cgocallback with three arguments. ++// fn is the PC of a func(a unsafe.Pointer) function. ++TEXT crosscall2(SB),NOSPLIT|NOFRAME,$0 ++ /* ++ * We still need to save all callee save register as before, and then ++ * push 3 args for fn (R4, R5, R7), skipping R6. ++ * Also note that at procedure entry in gc world, 8(R29) will be the ++ * first arg. ++ */ ++ ++ ADDV $(-8*22), R3 ++ MOVV R4, (8*1)(R3) // fn unsafe.Pointer ++ MOVV R5, (8*2)(R3) // a unsafe.Pointer ++ MOVV R7, (8*3)(R3) // ctxt uintptr ++ MOVV R23, (8*4)(R3) ++ MOVV R24, (8*5)(R3) ++ MOVV R25, (8*6)(R3) ++ MOVV R26, (8*7)(R3) ++ MOVV R27, (8*8)(R3) ++ MOVV R28, (8*9)(R3) ++ MOVV R29, (8*10)(R3) ++ MOVV R30, (8*11)(R3) ++ MOVV g, (8*12)(R3) ++ MOVV R1, (8*13)(R3) ++ MOVD F24, (8*14)(R3) ++ MOVD F25, (8*15)(R3) ++ MOVD F26, (8*16)(R3) ++ MOVD F27, (8*17)(R3) ++ MOVD F28, (8*18)(R3) ++ MOVD F29, (8*19)(R3) ++ MOVD F30, (8*20)(R3) ++ MOVD F31, (8*21)(R3) ++ ++ // Initialize Go ABI environment ++ JAL runtime·load_g(SB) ++ ++ JAL runtime·cgocallback(SB) ++ ++ MOVV (8*4)(R3), R23 ++ MOVV (8*5)(R3), R24 ++ MOVV (8*6)(R3), R25 ++ MOVV (8*7)(R3), R26 ++ MOVV (8*8)(R3), R27 ++ MOVV (8*9)(R3), R28 ++ MOVV (8*10)(R3), R29 ++ MOVV (8*11)(R3), R30 ++ MOVV (8*12)(R3), g ++ MOVV (8*13)(R3), R1 ++ MOVD (8*14)(R3), F24 ++ MOVD (8*15)(R3), F25 ++ MOVD (8*16)(R3), F26 ++ MOVD (8*17)(R3), F27 ++ MOVD (8*18)(R3), F28 ++ MOVD (8*19)(R3), F29 ++ MOVD (8*20)(R3), F30 ++ MOVD (8*21)(R3), F31 ++ ADDV $(8*22), R3 ++ ++ RET +diff --git a/src/runtime/cgo/gcc_linux_loong64.c b/src/runtime/cgo/gcc_linux_loong64.c +new file mode 100644 +index 0000000..c8c5fda +--- /dev/null ++++ b/src/runtime/cgo/gcc_linux_loong64.c +@@ -0,0 +1,78 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build cgo ++// +build linux ++// +build loong64 ++ ++#include ++#include ++#include ++#include "libcgo.h" ++#include "libcgo_unix.h" ++ ++static void *threadentry(void*); ++ ++void (*x_cgo_inittls)(void **tlsg, void **tlsbase); ++static void (*setg_gcc)(void*); ++ ++void ++_cgo_sys_thread_start(ThreadStart *ts) ++{ ++ pthread_attr_t attr; ++ sigset_t ign, oset; ++ pthread_t p; ++ size_t size; ++ int err; ++ ++ sigfillset(&ign); ++ pthread_sigmask(SIG_SETMASK, &ign, &oset); ++ ++ // Not sure why the memset is necessary here, ++ // but without it, we get a bogus stack size ++ // out of pthread_attr_getstacksize. C'est la Linux. ++ memset(&attr, 0, sizeof attr); ++ pthread_attr_init(&attr); ++ size = 0; ++ pthread_attr_getstacksize(&attr, &size); ++ // Leave stacklo=0 and set stackhi=size; mstart will do the rest. ++ ts->g->stackhi = size; ++ err = _cgo_try_pthread_create(&p, &attr, threadentry, ts); ++ ++ pthread_sigmask(SIG_SETMASK, &oset, nil); ++ ++ if (err != 0) { ++ fatalf("pthread_create failed: %s", strerror(err)); ++ } ++} ++ ++extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g); ++static void* ++threadentry(void *v) ++{ ++ ThreadStart ts; ++ ++ ts = *(ThreadStart*)v; ++ free(v); ++ ++ crosscall1(ts.fn, setg_gcc, (void*)ts.g); ++ return nil; ++} ++ ++void ++x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase) ++{ ++ pthread_attr_t attr; ++ size_t size; ++ ++ setg_gcc = setg; ++ pthread_attr_init(&attr); ++ pthread_attr_getstacksize(&attr, &size); ++ g->stacklo = (uintptr)&attr - size + 4096; ++ pthread_attr_destroy(&attr); ++ ++ if (x_cgo_inittls) { ++ x_cgo_inittls(tlsg, tlsbase); ++ } ++} +diff --git a/src/runtime/cgo/gcc_loong64.S b/src/runtime/cgo/gcc_loong64.S +new file mode 100644 +index 0000000..e294164 +--- /dev/null ++++ b/src/runtime/cgo/gcc_loong64.S +@@ -0,0 +1,67 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build loong64 ++ ++/* ++ * void crosscall1(void (*fn)(void), void (*setg_gcc)(void *g), void *g) ++ * ++ * Calling into the gc tool chain, where all registers are caller save. ++ * Called from standard lp64d ABI, where $r1, $r3, $r23-$r30, and $f24-$f31 ++ * are callee-save, so they must be saved explicitly, along with $r1 (LR). ++ */ ++.globl crosscall1 ++crosscall1: ++ addi.d $r3, $r3, -160 ++ st.d $r1, $r3, 0 ++ st.d $r23, $r3, 8 ++ st.d $r24, $r3, 16 ++ st.d $r25, $r3, 24 ++ st.d $r26, $r3, 32 ++ st.d $r27, $r3, 40 ++ st.d $r28, $r3, 48 ++ st.d $r29, $r3, 56 ++ st.d $r30, $r3, 64 ++ st.d $r2, $r3, 72 ++ st.d $r22, $r3, 80 ++ fst.d $f24, $r3, 88 ++ fst.d $f25, $r3, 96 ++ fst.d $f26, $r3, 104 ++ fst.d $f27, $r3, 112 ++ fst.d $f28, $r3, 120 ++ fst.d $f29, $r3, 128 ++ fst.d $f30, $r3, 136 ++ fst.d $f31, $r3, 144 ++ ++ move $r18, $r4 // save R4 ++ move $r19, $r6 ++ jirl $r1, $r5, 0 // call setg_gcc (clobbers R4) ++ jirl $r1, $r18, 0 // call fn ++ ++ ld.d $r23, $r3, 8 ++ ld.d $r24, $r3, 16 ++ ld.d $r25, $r3, 24 ++ ld.d $r26, $r3, 32 ++ ld.d $r27, $r3, 40 ++ ld.d $r28, $r3, 48 ++ ld.d $r29, $r3, 56 ++ ld.d $r30, $r3, 64 ++ ld.d $r2, $r3, 72 ++ ld.d $r22, $r3, 80 ++ fld.d $f24, $r3, 88 ++ fld.d $f25, $r3, 96 ++ fld.d $f26, $r3, 104 ++ fld.d $f27, $r3, 112 ++ fld.d $f28, $r3, 120 ++ fld.d $f29, $r3, 128 ++ fld.d $f30, $r3, 136 ++ fld.d $f31, $r3, 144 ++ ld.d $r1, $r3, 0 ++ addi.d $r3, $r3, 160 ++ jirl $r0, $r1, 0 ++ ++ ++#ifdef __ELF__ ++.section .note.GNU-stack,"",%progbits ++#endif +-- +2.27.0 + diff --git a/0030-cmd-nm-cmd-objdump-cmd-pprof-disassembly-is-not-supp.patch b/0030-cmd-nm-cmd-objdump-cmd-pprof-disassembly-is-not-supp.patch new file mode 100644 index 0000000..cebbd76 --- /dev/null +++ b/0030-cmd-nm-cmd-objdump-cmd-pprof-disassembly-is-not-supp.patch @@ -0,0 +1,55 @@ +From 1a312cc0cc1adfd4a1cce476b30b849ef42c71b8 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 17:06:29 +0800 +Subject: [PATCH 30/56] cmd/nm, cmd/objdump, cmd/pprof: disassembly is not + supported on loong64 + +Change-Id: Ic96e4f0c46d9a6b8cd020e899f32c40681daf9c5 +--- + src/cmd/nm/nm_cgo_test.go | 2 +- + src/cmd/objdump/objdump_test.go | 2 ++ + src/cmd/pprof/pprof_test.go | 2 ++ + 3 files changed, 5 insertions(+), 1 deletion(-) + +diff --git a/src/cmd/nm/nm_cgo_test.go b/src/cmd/nm/nm_cgo_test.go +index 1544be0..24f4321 100644 +--- a/src/cmd/nm/nm_cgo_test.go ++++ b/src/cmd/nm/nm_cgo_test.go +@@ -25,7 +25,7 @@ func canInternalLink() bool { + } + case "linux": + switch runtime.GOARCH { +- case "arm64", "mips64", "mips64le", "mips", "mipsle", "ppc64", "ppc64le", "riscv64": ++ case "arm64", "loong64", "mips64", "mips64le", "mips", "mipsle", "ppc64", "ppc64le", "riscv64": + return false + } + case "openbsd": +diff --git a/src/cmd/objdump/objdump_test.go b/src/cmd/objdump/objdump_test.go +index f231a7c..40fe21b 100644 +--- a/src/cmd/objdump/objdump_test.go ++++ b/src/cmd/objdump/objdump_test.go +@@ -107,6 +107,8 @@ var ppcGnuNeed = []string{ + t.Skipf("builder %q has an incompatible version of XCode installed, see go.dev/issue/49700", testenv.Builder()) + } + switch runtime.GOARCH { ++ case "loong64": ++ t.Skipf("skipping on %s", runtime.GOARCH) + case "mips", "mipsle", "mips64", "mips64le": + t.Skipf("skipping on %s, issue 12559", runtime.GOARCH) + case "riscv64": +diff --git a/src/cmd/pprof/pprof_test.go b/src/cmd/pprof/pprof_test.go +index 11e251b..9a37b97 100644 +--- a/src/cmd/pprof/pprof_test.go ++++ b/src/cmd/pprof/pprof_test.go +@@ -72,6 +72,8 @@ func mustHaveCPUProfiling(t *testing.T) { + + func mustHaveDisasm(t *testing.T) { + switch runtime.GOARCH { ++ case "loong64": ++ t.Skipf("skipping on %s.", runtime.GOARCH) + case "mips", "mipsle", "mips64", "mips64le": + t.Skipf("skipping on %s, issue 12559", runtime.GOARCH) + case "riscv64": +-- +2.27.0 + diff --git a/0031-cmd-dist-support-dist-tool-for-loong64.patch b/0031-cmd-dist-support-dist-tool-for-loong64.patch new file mode 100644 index 0000000..12d9e20 --- /dev/null +++ b/0031-cmd-dist-support-dist-tool-for-loong64.patch @@ -0,0 +1,88 @@ +From 544b21c07975cb02be7f315e5e337766ce668529 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 15:53:05 +0800 +Subject: [PATCH 31/56] cmd/dist: support dist tool for loong64 + +Change-Id: I61dca43680d8e5bd3198a38577450a53f405a987 +--- + src/cmd/dist/build.go | 2 ++ + src/cmd/dist/main.go | 4 +++- + src/cmd/dist/test.go | 6 +++--- + 3 files changed, 8 insertions(+), 4 deletions(-) + +diff --git a/src/cmd/dist/build.go b/src/cmd/dist/build.go +index bec1769..c18ca75 100644 +--- a/src/cmd/dist/build.go ++++ b/src/cmd/dist/build.go +@@ -65,6 +65,7 @@ var okgoarch = []string{ + "amd64", + "arm", + "arm64", ++ "loong64", + "mips", + "mipsle", + "mips64", +@@ -1571,6 +1572,7 @@ var cgoEnabled = map[string]bool{ + "linux/amd64": true, + "linux/arm": true, + "linux/arm64": true, ++ "linux/loong64": true, + "linux/ppc64": false, + "linux/ppc64le": true, + "linux/mips": true, +diff --git a/src/cmd/dist/main.go b/src/cmd/dist/main.go +index 37de1ac..f8ef99f 100644 +--- a/src/cmd/dist/main.go ++++ b/src/cmd/dist/main.go +@@ -125,6 +125,8 @@ func main() { + if elfIsLittleEndian(os.Args[0]) { + gohostarch = "mipsle" + } ++ case strings.Contains(out, "loongarch64"): ++ gohostarch = "loong64" + case strings.Contains(out, "riscv64"): + gohostarch = "riscv64" + case strings.Contains(out, "s390x"): +@@ -142,7 +144,7 @@ func main() { + } + } + +- if gohostarch == "arm" || gohostarch == "mips64" || gohostarch == "mips64le" { ++ if gohostarch == "arm" || gohostarch == "mips64" || gohostarch == "mips64le" || gohostarch == "loong64" { + maxbg = min(maxbg, runtime.NumCPU()) + } + bginit() +diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go +index f40fa92..485fe6b 100644 +--- a/src/cmd/dist/test.go ++++ b/src/cmd/dist/test.go +@@ -166,7 +166,7 @@ func (t *tester) run() { + switch goarch { + case "arm": + t.timeoutScale = 2 +- case "mips", "mipsle", "mips64", "mips64le": ++ case "loong64", "mips", "mipsle", "mips64", "mips64le": + t.timeoutScale = 4 + } + if s := os.Getenv("GO_TEST_TIMEOUT_SCALE"); s != "" { +@@ -983,7 +983,7 @@ func (t *tester) extLink() bool { + "darwin-amd64", "darwin-arm64", + "dragonfly-amd64", + "freebsd-386", "freebsd-amd64", "freebsd-arm", +- "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-ppc64le", "linux-mips64", "linux-mips64le", "linux-mips", "linux-mipsle", "linux-riscv64", "linux-s390x", ++ "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-loong64", "linux-ppc64le", "linux-mips64", "linux-mips64le", "linux-mips", "linux-mipsle", "linux-riscv64", "linux-s390x", + "netbsd-386", "netbsd-amd64", + "openbsd-386", "openbsd-amd64", + "windows-386", "windows-amd64": +@@ -1014,7 +1014,7 @@ func (t *tester) internalLink() bool { + // Internally linking cgo is incomplete on some architectures. + // https://golang.org/issue/10373 + // https://golang.org/issue/14449 +- if goarch == "mips64" || goarch == "mips64le" || goarch == "mips" || goarch == "mipsle" || goarch == "riscv64" { ++ if goarch == "loong64" || goarch == "mips64" || goarch == "mips64le" || goarch == "mips" || goarch == "mipsle" || goarch == "riscv64" { + return false + } + if goos == "aix" { +-- +2.27.0 + diff --git a/0032-cmd-vendor-update-vendored-golang.org-x-sys-to-suppo.patch b/0032-cmd-vendor-update-vendored-golang.org-x-sys-to-suppo.patch new file mode 100644 index 0000000..a64f82d --- /dev/null +++ b/0032-cmd-vendor-update-vendored-golang.org-x-sys-to-suppo.patch @@ -0,0 +1,2740 @@ +From d8abfcf9ec6d35d968345c987d4607d40383a886 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 17:00:41 +0800 +Subject: [PATCH 32/56] cmd/vendor: update vendored golang.org/x/sys to support + syscall on loong64 + +Change-Id: Id247072a416c9e3da9de801a3daacf1b60ff3f24 +--- + .../golang.org/x/sys/unix/asm_linux_loong64.s | 54 ++ + .../golang.org/x/sys/unix/endian_little.go | 4 +- + .../golang.org/x/sys/unix/syscall_linux.go | 2 +- + .../x/sys/unix/syscall_linux_loong64.go | 221 +++++ + .../x/sys/unix/zerrors_linux_loong64.go | 831 ++++++++++++++++++ + .../x/sys/unix/zsyscall_linux_loong64.go | 563 ++++++++++++ + .../x/sys/unix/zsysnum_linux_loong64.go | 313 +++++++ + .../x/sys/unix/ztypes_linux_loong64.go | 667 ++++++++++++++ + 8 files changed, 2652 insertions(+), 3 deletions(-) + create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/asm_linux_loong64.s + create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go + create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go + create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go + create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go + create mode 100644 src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go + +diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_loong64.s +new file mode 100644 +index 0000000..1ccfa5d +--- /dev/null ++++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_loong64.s +@@ -0,0 +1,54 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build linux && loong64 && gc ++// +build linux ++// +build loong64 ++// +build gc ++ ++#include "textflag.h" ++ ++ ++// Just jump to package syscall's implementation for all these functions. ++// The runtime may know about them. ++ ++TEXT ·Syscall(SB),NOSPLIT,$0-56 ++ JMP syscall·Syscall(SB) ++ ++TEXT ·Syscall6(SB),NOSPLIT,$0-80 ++ JMP syscall·Syscall6(SB) ++ ++TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 ++ JAL runtime·entersyscall(SB) ++ MOVV a1+8(FP), R4 ++ MOVV a2+16(FP), R5 ++ MOVV a3+24(FP), R6 ++ MOVV R0, R7 ++ MOVV R0, R8 ++ MOVV R0, R9 ++ MOVV trap+0(FP), R11 // syscall entry ++ SYSCALL ++ MOVV R4, r1+32(FP) ++ MOVV R5, r2+40(FP) ++ JAL runtime·exitsyscall(SB) ++ RET ++ ++TEXT ·RawSyscall(SB),NOSPLIT,$0-56 ++ JMP syscall·RawSyscall(SB) ++ ++TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 ++ JMP syscall·RawSyscall6(SB) ++ ++TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 ++ MOVV a1+8(FP), R4 ++ MOVV a2+16(FP), R5 ++ MOVV a3+24(FP), R6 ++ MOVV R0, R7 ++ MOVV R0, R8 ++ MOVV R0, R9 ++ MOVV trap+0(FP), R11 // syscall entry ++ SYSCALL ++ MOVV R4, r1+32(FP) ++ MOVV R5, r2+40(FP) ++ RET +diff --git a/src/cmd/vendor/golang.org/x/sys/unix/endian_little.go b/src/cmd/vendor/golang.org/x/sys/unix/endian_little.go +index 4362f47..b0f2bc4 100644 +--- a/src/cmd/vendor/golang.org/x/sys/unix/endian_little.go ++++ b/src/cmd/vendor/golang.org/x/sys/unix/endian_little.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + // +-//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh +-// +build 386 amd64 amd64p32 alpha arm arm64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh ++//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh ++// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh + + package unix + +diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go +index 2dd7c8e..d40e011 100644 +--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go ++++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go +@@ -1732,7 +1732,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e + + func Dup2(oldfd, newfd int) error { + // Android O and newer blocks dup2; riscv and arm64 don't implement dup2. +- if runtime.GOOS == "android" || runtime.GOARCH == "riscv64" || runtime.GOARCH == "arm64" { ++ if runtime.GOOS == "android" || runtime.GOARCH == "riscv64" || runtime.GOARCH == "arm64" || runtime.GOARCH == "loong64" { + return Dup3(oldfd, newfd, 0) + } + return dup2(oldfd, newfd) +diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +new file mode 100644 +index 0000000..0714ce6 +--- /dev/null ++++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +@@ -0,0 +1,221 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build loong64 && linux ++// +build loong64,linux ++ ++package unix ++ ++import "unsafe" ++ ++//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT ++//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 ++//sys Fchown(fd int, uid int, gid int) (err error) ++//sys Fstat(fd int, stat *Stat_t) (err error) ++//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) ++//sys Fstatfs(fd int, buf *Statfs_t) (err error) ++//sys Ftruncate(fd int, length int64) (err error) ++//sysnb Getegid() (egid int) ++//sysnb Geteuid() (euid int) ++//sysnb Getgid() (gid int) ++//sysnb Getuid() (uid int) ++//sys Listen(s int, n int) (err error) ++//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 ++//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 ++//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK ++ ++func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { ++ var ts *Timespec ++ if timeout != nil { ++ ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} ++ } ++ return Pselect(nfd, r, w, e, ts, nil) ++} ++ ++//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) ++//sys setfsgid(gid int) (prev int, err error) ++//sys setfsuid(uid int) (prev int, err error) ++//sysnb Setregid(rgid int, egid int) (err error) ++//sysnb Setresgid(rgid int, egid int, sgid int) (err error) ++//sysnb Setresuid(ruid int, euid int, suid int) (err error) ++//sysnb Setreuid(ruid int, euid int) (err error) ++//sys Shutdown(fd int, how int) (err error) ++//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) ++ ++func Stat(path string, stat *Stat_t) (err error) { ++ return Fstatat(AT_FDCWD, path, stat, 0) ++} ++ ++func Lchown(path string, uid int, gid int) (err error) { ++ return Fchownat(AT_FDCWD, path, uid, gid, AT_SYMLINK_NOFOLLOW) ++} ++ ++func Lstat(path string, stat *Stat_t) (err error) { ++ return Fstatat(AT_FDCWD, path, stat, AT_SYMLINK_NOFOLLOW) ++} ++ ++//sys Statfs(path string, buf *Statfs_t) (err error) ++//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) ++//sys Truncate(path string, length int64) (err error) ++ ++func Ustat(dev int, ubuf *Ustat_t) (err error) { ++ return ENOSYS ++} ++ ++//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) ++//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) ++//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) ++//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) ++//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) ++//sysnb setgroups(n int, list *_Gid_t) (err error) ++//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) ++//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) ++//sysnb socket(domain int, typ int, proto int) (fd int, err error) ++//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) ++//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) ++//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) ++//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) ++//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) ++//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) ++//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) ++//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) ++ ++//sysnb Gettimeofday(tv *Timeval) (err error) ++ ++func setTimespec(sec, nsec int64) Timespec { ++ return Timespec{Sec: sec, Nsec: nsec} ++} ++ ++func setTimeval(sec, usec int64) Timeval { ++ return Timeval{Sec: sec, Usec: usec} ++} ++ ++func Pipe(p []int) (err error) { ++ if len(p) != 2 { ++ return EINVAL ++ } ++ var pp [2]_C_int ++ err = pipe2(&pp, 0) ++ p[0] = int(pp[0]) ++ p[1] = int(pp[1]) ++ return err ++} ++ ++//sysnb pipe2(p *[2]_C_int, flags int) (err error) ++ ++func Pipe2(p []int, flags int) (err error) { ++ if len(p) != 2 { ++ return EINVAL ++ } ++ var pp [2]_C_int ++ err = pipe2(&pp, flags) ++ p[0] = int(pp[0]) ++ p[1] = int(pp[1]) ++ return err ++} ++ ++func Getrlimit(resource int, rlim *Rlimit) (err error) { ++ err = prlimit(0, resource, nil, rlim) ++ return ++} ++ ++func Setrlimit(resource int, rlim *Rlimit) (err error) { ++ err = prlimit(0, resource, rlim, nil) ++ return ++} ++ ++func futimesat(dirfd int, path string, tv *[2]Timeval) (err error) { ++ if tv == nil { ++ return utimensat(dirfd, path, nil, 0) ++ } ++ ++ ts := []Timespec{ ++ NsecToTimespec(TimevalToNsec(tv[0])), ++ NsecToTimespec(TimevalToNsec(tv[1])), ++ } ++ return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) ++} ++ ++func Time(t *Time_t) (Time_t, error) { ++ var tv Timeval ++ err := Gettimeofday(&tv) ++ if err != nil { ++ return 0, err ++ } ++ if t != nil { ++ *t = Time_t(tv.Sec) ++ } ++ return Time_t(tv.Sec), nil ++} ++ ++func Utime(path string, buf *Utimbuf) error { ++ tv := []Timeval{ ++ {Sec: buf.Actime}, ++ {Sec: buf.Modtime}, ++ } ++ return Utimes(path, tv) ++} ++ ++func utimes(path string, tv *[2]Timeval) (err error) { ++ if tv == nil { ++ return utimensat(AT_FDCWD, path, nil, 0) ++ } ++ ++ ts := []Timespec{ ++ NsecToTimespec(TimevalToNsec(tv[0])), ++ NsecToTimespec(TimevalToNsec(tv[1])), ++ } ++ return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) ++} ++ ++func (r *PtraceRegs) GetEra() uint64 { return r.Era } ++ ++func (r *PtraceRegs) SetEra(era uint64) { r.Era = era } ++ ++func (iov *Iovec) SetLen(length int) { ++ iov.Len = uint64(length) ++} ++ ++func (msghdr *Msghdr) SetControllen(length int) { ++ msghdr.Controllen = uint64(length) ++} ++ ++func (msghdr *Msghdr) SetIovlen(length int) { ++ msghdr.Iovlen = uint64(length) ++} ++ ++func (cmsg *Cmsghdr) SetLen(length int) { ++ cmsg.Len = uint64(length) ++} ++ ++func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { ++ rsa.Service_name_len = uint64(length) ++} ++ ++func Pause() error { ++ _, err := ppoll(nil, 0, nil, nil) ++ return err ++} ++ ++func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { ++ return Renameat2(olddirfd, oldpath, newdirfd, newpath, 0) ++} ++ ++//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) ++ ++func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { ++ cmdlineLen := len(cmdline) ++ if cmdlineLen > 0 { ++ // Account for the additional NULL byte added by ++ // BytePtrFromString in kexecFileLoad. The kexec_file_load ++ // syscall expects a NULL-terminated string. ++ cmdlineLen++ ++ } ++ return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) ++} ++ ++// dup2 exists because func Dup3 in syscall_linux.go references ++// it in an unreachable path. dup2 isn't available on arm64. ++func dup2(oldfd int, newfd int) error ++ +diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +new file mode 100644 +index 0000000..0b93031 +--- /dev/null ++++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +@@ -0,0 +1,831 @@ ++// mkerrors.sh -Wall -Werror -static -I/tmp/include ++// Code generated by the command above; see README.md. DO NOT EDIT. ++ ++//go:build loong64 && linux ++// +build loong64,linux ++ ++// Code generated by cmd/cgo -godefs; DO NOT EDIT. ++// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go ++ ++package unix ++ ++import "syscall" ++ ++const ( ++ B1000000 = 0x1008 ++ B115200 = 0x1002 ++ B1152000 = 0x1009 ++ B1500000 = 0x100a ++ B2000000 = 0x100b ++ B230400 = 0x1003 ++ B2500000 = 0x100c ++ B3000000 = 0x100d ++ B3500000 = 0x100e ++ B4000000 = 0x100f ++ B460800 = 0x1004 ++ B500000 = 0x1005 ++ B57600 = 0x1001 ++ B576000 = 0x1006 ++ B921600 = 0x1007 ++ BLKBSZGET = 0x80081270 ++ BLKBSZSET = 0x40081271 ++ BLKFLSBUF = 0x1261 ++ BLKFRAGET = 0x1265 ++ BLKFRASET = 0x1264 ++ BLKGETSIZE = 0x1260 ++ BLKGETSIZE64 = 0x80081272 ++ BLKPBSZGET = 0x127b ++ BLKRAGET = 0x1263 ++ BLKRASET = 0x1262 ++ BLKROGET = 0x125e ++ BLKROSET = 0x125d ++ BLKRRPART = 0x125f ++ BLKSECTGET = 0x1267 ++ BLKSECTSET = 0x1266 ++ BLKSSZGET = 0x1268 ++ BOTHER = 0x1000 ++ BS1 = 0x2000 ++ BSDLY = 0x2000 ++ CBAUD = 0x100f ++ CBAUDEX = 0x1000 ++ CEPH_SUPER_MAGIC = 0xc36400 ++ CIBAUD = 0x100f0000 ++ CIFS_SUPER_MAGIC = 0xff534d42 ++ CLOCAL = 0x800 ++ CR1 = 0x200 ++ CR2 = 0x400 ++ CR3 = 0x600 ++ CRDLY = 0x600 ++ CREAD = 0x80 ++ CS6 = 0x10 ++ CS7 = 0x20 ++ CS8 = 0x30 ++ CSIZE = 0x30 ++ CSTOPB = 0x40 ++ ECCGETLAYOUT = 0x81484d11 ++ ECCGETSTATS = 0x80104d12 ++ ECHOCTL = 0x200 ++ ECHOE = 0x10 ++ ECHOK = 0x20 ++ ECHOKE = 0x800 ++ ECHONL = 0x40 ++ ECHOPRT = 0x400 ++ EFD_CLOEXEC = 0x80000 ++ EFD_NONBLOCK = 0x800 ++ EPOLL_CLOEXEC = 0x80000 ++ EXFAT_SUPER_MAGIC = 0x2011bab0 ++ EXTPROC = 0x10000 ++ FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc ++ FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa ++ FAN_RENAME = 0x10000000 ++ FAN_REPORT_DFID_NAME_TARGET = 0x1e00 ++ FAN_REPORT_TARGET_FID = 0x1000 ++ FF1 = 0x8000 ++ FFDLY = 0x8000 ++ FICLONE = 0x40049409 ++ FICLONERANGE = 0x4020940d ++ FLUSHO = 0x1000 ++ FS_IOC_ENABLE_VERITY = 0x40806685 ++ FS_IOC_GETFLAGS = 0x80086601 ++ FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b ++ FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 ++ FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 ++ FS_IOC_SETFLAGS = 0x40086602 ++ FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 ++ F_GETLK = 0x5 ++ F_GETLK64 = 0x5 ++ F_GETOWN = 0x9 ++ F_RDLCK = 0x0 ++ F_SETLK = 0x6 ++ F_SETLK64 = 0x6 ++ F_SETLKW = 0x7 ++ F_SETLKW64 = 0x7 ++ F_SETOWN = 0x8 ++ F_UNLCK = 0x2 ++ F_WRLCK = 0x1 ++ HIDIOCGRAWINFO = 0x80084803 ++ HIDIOCGRDESC = 0x90044802 ++ HIDIOCGRDESCSIZE = 0x80044801 ++ HUPCL = 0x400 ++ ICANON = 0x2 ++ IEXTEN = 0x8000 ++ IN_CLOEXEC = 0x80000 ++ IN_NONBLOCK = 0x800 ++ IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 ++ ISIG = 0x1 ++ IUCLC = 0x200 ++ IXOFF = 0x1000 ++ IXON = 0x400 ++ KEXEC_ARCH_LOONGARCH = 0x1020000 ++ MAP_ANON = 0x20 ++ MAP_ANONYMOUS = 0x20 ++ MAP_DENYWRITE = 0x800 ++ MAP_EXECUTABLE = 0x1000 ++ MAP_GROWSDOWN = 0x100 ++ MAP_HUGETLB = 0x40000 ++ MAP_LOCKED = 0x2000 ++ MAP_NONBLOCK = 0x10000 ++ MAP_NORESERVE = 0x4000 ++ MAP_POPULATE = 0x8000 ++ MAP_STACK = 0x20000 ++ MAP_SYNC = 0x80000 ++ MCL_CURRENT = 0x1 ++ MCL_FUTURE = 0x2 ++ MCL_ONFAULT = 0x4 ++ MEMERASE = 0x40084d02 ++ MEMERASE64 = 0x40104d14 ++ MEMGETBADBLOCK = 0x40084d0b ++ MEMGETINFO = 0x80204d01 ++ MEMGETOOBSEL = 0x80c84d0a ++ MEMGETREGIONCOUNT = 0x80044d07 ++ MEMISLOCKED = 0x80084d17 ++ MEMLOCK = 0x40084d05 ++ MEMREADOOB = 0xc0104d04 ++ MEMSETBADBLOCK = 0x40084d0c ++ MEMUNLOCK = 0x40084d06 ++ MEMWRITEOOB = 0xc0104d03 ++ MODULE_INIT_COMPRESSED_FILE = 0x4 ++ MTDFILEMODE = 0x4d13 ++ NFDBITS = 0x40 ++ NLDLY = 0x100 ++ NOFLSH = 0x80 ++ NS_GET_NSTYPE = 0xb703 ++ NS_GET_OWNER_UID = 0xb704 ++ NS_GET_PARENT = 0xb702 ++ NS_GET_USERNS = 0xb701 ++ OLCUC = 0x2 ++ ONLCR = 0x4 ++ OTPERASE = 0x400c4d19 ++ OTPGETREGIONCOUNT = 0x40044d0e ++ OTPGETREGIONINFO = 0x400c4d0f ++ OTPLOCK = 0x800c4d10 ++ OTPSELECT = 0x80044d0d ++ O_APPEND = 0x400 ++ O_ASYNC = 0x2000 ++ O_CLOEXEC = 0x80000 ++ O_CREAT = 0x40 ++ O_DIRECT = 0x4000 ++ O_DIRECTORY = 0x10000 ++ O_DSYNC = 0x1000 ++ O_EXCL = 0x80 ++ O_FSYNC = 0x101000 ++ O_LARGEFILE = 0x0 ++ O_NDELAY = 0x800 ++ O_NOATIME = 0x40000 ++ O_NOCTTY = 0x100 ++ O_NOFOLLOW = 0x20000 ++ O_NONBLOCK = 0x800 ++ O_PATH = 0x200000 ++ O_RSYNC = 0x101000 ++ O_SYNC = 0x101000 ++ O_TMPFILE = 0x410000 ++ O_TRUNC = 0x200 ++ PARENB = 0x100 ++ PARODD = 0x200 ++ PENDIN = 0x4000 ++ PERF_EVENT_IOC_DISABLE = 0x2401 ++ PERF_EVENT_IOC_ENABLE = 0x2400 ++ PERF_EVENT_IOC_ID = 0x80082407 ++ PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b ++ PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 ++ PERF_EVENT_IOC_PERIOD = 0x40082404 ++ PERF_EVENT_IOC_QUERY_BPF = 0xc008240a ++ PERF_EVENT_IOC_REFRESH = 0x2402 ++ PERF_EVENT_IOC_RESET = 0x2403 ++ PERF_EVENT_IOC_SET_BPF = 0x40042408 ++ PERF_EVENT_IOC_SET_FILTER = 0x40082406 ++ PERF_EVENT_IOC_SET_OUTPUT = 0x2405 ++ PERF_MEM_HOPS_1 = 0x2 ++ PERF_MEM_HOPS_2 = 0x3 ++ PERF_MEM_HOPS_3 = 0x4 ++ PPPIOCATTACH = 0x4004743d ++ PPPIOCATTCHAN = 0x40047438 ++ PPPIOCBRIDGECHAN = 0x40047435 ++ PPPIOCCONNECT = 0x4004743a ++ PPPIOCDETACH = 0x4004743c ++ PPPIOCDISCONN = 0x7439 ++ PPPIOCGASYNCMAP = 0x80047458 ++ PPPIOCGCHAN = 0x80047437 ++ PPPIOCGDEBUG = 0x80047441 ++ PPPIOCGFLAGS = 0x8004745a ++ PPPIOCGIDLE = 0x8010743f ++ PPPIOCGIDLE32 = 0x8008743f ++ PPPIOCGIDLE64 = 0x8010743f ++ PPPIOCGL2TPSTATS = 0x80487436 ++ PPPIOCGMRU = 0x80047453 ++ PPPIOCGRASYNCMAP = 0x80047455 ++ PPPIOCGUNIT = 0x80047456 ++ PPPIOCGXASYNCMAP = 0x80207450 ++ PPPIOCSACTIVE = 0x40107446 ++ PPPIOCSASYNCMAP = 0x40047457 ++ PPPIOCSCOMPRESS = 0x4010744d ++ PPPIOCSDEBUG = 0x40047440 ++ PPPIOCSFLAGS = 0x40047459 ++ PPPIOCSMAXCID = 0x40047451 ++ PPPIOCSMRRU = 0x4004743b ++ PPPIOCSMRU = 0x40047452 ++ PPPIOCSNPMODE = 0x4008744b ++ PPPIOCSPASS = 0x40107447 ++ PPPIOCSRASYNCMAP = 0x40047454 ++ PPPIOCSXASYNCMAP = 0x4020744f ++ PPPIOCUNBRIDGECHAN = 0x7434 ++ PPPIOCXFERUNIT = 0x744e ++ PR_SET_PTRACER_ANY = 0xffffffffffffffff ++ PR_SET_VMA = 0x53564d41 ++ PR_SET_VMA_ANON_NAME = 0x0 ++ PTRACE_SYSEMU = 0x1f ++ PTRACE_SYSEMU_SINGLESTEP = 0x20 ++ RLIMIT_AS = 0x9 ++ RLIMIT_MEMLOCK = 0x8 ++ RLIMIT_NOFILE = 0x7 ++ RLIMIT_NPROC = 0x6 ++ RLIMIT_RSS = 0x5 ++ RNDADDENTROPY = 0x40085203 ++ RNDADDTOENTCNT = 0x40045201 ++ RNDCLEARPOOL = 0x5206 ++ RNDGETENTCNT = 0x80045200 ++ RNDGETPOOL = 0x80085202 ++ RNDRESEEDCRNG = 0x5207 ++ RNDZAPENTCNT = 0x5204 ++ RTC_AIE_OFF = 0x7002 ++ RTC_AIE_ON = 0x7001 ++ RTC_ALM_READ = 0x80247008 ++ RTC_ALM_SET = 0x40247007 ++ RTC_EPOCH_READ = 0x8008700d ++ RTC_EPOCH_SET = 0x4008700e ++ RTC_IRQP_READ = 0x8008700b ++ RTC_IRQP_SET = 0x4008700c ++ RTC_PARAM_GET = 0x40187013 ++ RTC_PARAM_SET = 0x40187014 ++ RTC_PIE_OFF = 0x7006 ++ RTC_PIE_ON = 0x7005 ++ RTC_PLL_GET = 0x80207011 ++ RTC_PLL_SET = 0x40207012 ++ RTC_RD_TIME = 0x80247009 ++ RTC_SET_TIME = 0x4024700a ++ RTC_UIE_OFF = 0x7004 ++ RTC_UIE_ON = 0x7003 ++ RTC_VL_CLR = 0x7014 ++ RTC_VL_READ = 0x80047013 ++ RTC_WIE_OFF = 0x7010 ++ RTC_WIE_ON = 0x700f ++ RTC_WKALM_RD = 0x80287010 ++ RTC_WKALM_SET = 0x4028700f ++ SCM_TIMESTAMPING = 0x25 ++ SCM_TIMESTAMPING_OPT_STATS = 0x36 ++ SCM_TIMESTAMPING_PKTINFO = 0x3a ++ SCM_TIMESTAMPNS = 0x23 ++ SCM_TXTIME = 0x3d ++ SCM_WIFI_STATUS = 0x29 ++ SFD_CLOEXEC = 0x80000 ++ SFD_NONBLOCK = 0x800 ++ SIOCATMARK = 0x8905 ++ SIOCGPGRP = 0x8904 ++ SIOCGSTAMPNS_NEW = 0x80108907 ++ SIOCGSTAMP_NEW = 0x80108906 ++ SIOCINQ = 0x541b ++ SIOCOUTQ = 0x5411 ++ SIOCSPGRP = 0x8902 ++ SMB2_SUPER_MAGIC = 0xfe534d42 ++ SOCK_CLOEXEC = 0x80000 ++ SOCK_DGRAM = 0x2 ++ SOCK_NONBLOCK = 0x800 ++ SOCK_STREAM = 0x1 ++ SOL_SOCKET = 0x1 ++ SO_ACCEPTCONN = 0x1e ++ SO_ATTACH_BPF = 0x32 ++ SO_ATTACH_REUSEPORT_CBPF = 0x33 ++ SO_ATTACH_REUSEPORT_EBPF = 0x34 ++ SO_BINDTODEVICE = 0x19 ++ SO_BINDTOIFINDEX = 0x3e ++ SO_BPF_EXTENSIONS = 0x30 ++ SO_BROADCAST = 0x6 ++ SO_BSDCOMPAT = 0xe ++ SO_BUF_LOCK = 0x48 ++ SO_BUSY_POLL = 0x2e ++ SO_BUSY_POLL_BUDGET = 0x46 ++ SO_CNX_ADVICE = 0x35 ++ SO_COOKIE = 0x39 ++ SO_DETACH_REUSEPORT_BPF = 0x44 ++ SO_DOMAIN = 0x27 ++ SO_DONTROUTE = 0x5 ++ SO_ERROR = 0x4 ++ SO_INCOMING_CPU = 0x31 ++ SO_INCOMING_NAPI_ID = 0x38 ++ SO_KEEPALIVE = 0x9 ++ SO_LINGER = 0xd ++ SO_LOCK_FILTER = 0x2c ++ SO_MARK = 0x24 ++ SO_MAX_PACING_RATE = 0x2f ++ SO_MEMINFO = 0x37 ++ SO_NETNS_COOKIE = 0x47 ++ SO_NOFCS = 0x2b ++ SO_OOBINLINE = 0xa ++ SO_PASSCRED = 0x10 ++ SO_PASSSEC = 0x22 ++ SO_PEEK_OFF = 0x2a ++ SO_PEERCRED = 0x11 ++ SO_PEERGROUPS = 0x3b ++ SO_PEERSEC = 0x1f ++ SO_PREFER_BUSY_POLL = 0x45 ++ SO_PROTOCOL = 0x26 ++ SO_RCVBUF = 0x8 ++ SO_RCVBUFFORCE = 0x21 ++ SO_RCVLOWAT = 0x12 ++ SO_RCVTIMEO = 0x14 ++ SO_RCVTIMEO_NEW = 0x42 ++ SO_RCVTIMEO_OLD = 0x14 ++ SO_RESERVE_MEM = 0x49 ++ SO_REUSEADDR = 0x2 ++ SO_REUSEPORT = 0xf ++ SO_RXQ_OVFL = 0x28 ++ SO_SECURITY_AUTHENTICATION = 0x16 ++ SO_SECURITY_ENCRYPTION_NETWORK = 0x18 ++ SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 ++ SO_SELECT_ERR_QUEUE = 0x2d ++ SO_SNDBUF = 0x7 ++ SO_SNDBUFFORCE = 0x20 ++ SO_SNDLOWAT = 0x13 ++ SO_SNDTIMEO = 0x15 ++ SO_SNDTIMEO_NEW = 0x43 ++ SO_SNDTIMEO_OLD = 0x15 ++ SO_TIMESTAMPING = 0x25 ++ SO_TIMESTAMPING_NEW = 0x41 ++ SO_TIMESTAMPING_OLD = 0x25 ++ SO_TIMESTAMPNS = 0x23 ++ SO_TIMESTAMPNS_NEW = 0x40 ++ SO_TIMESTAMPNS_OLD = 0x23 ++ SO_TIMESTAMP_NEW = 0x3f ++ SO_TXTIME = 0x3d ++ SO_TYPE = 0x3 ++ SO_WIFI_STATUS = 0x29 ++ SO_ZEROCOPY = 0x3c ++ TAB1 = 0x800 ++ TAB2 = 0x1000 ++ TAB3 = 0x1800 ++ TABDLY = 0x1800 ++ TCFLSH = 0x540b ++ TCGETA = 0x5405 ++ TCGETS = 0x5401 ++ TCGETS2 = 0x802c542a ++ TCGETX = 0x5432 ++ TCSAFLUSH = 0x2 ++ TCSBRK = 0x5409 ++ TCSBRKP = 0x5425 ++ TCSETA = 0x5406 ++ TCSETAF = 0x5408 ++ TCSETAW = 0x5407 ++ TCSETS = 0x5402 ++ TCSETS2 = 0x402c542b ++ TCSETSF = 0x5404 ++ TCSETSF2 = 0x402c542d ++ TCSETSW = 0x5403 ++ TCSETSW2 = 0x402c542c ++ TCSETX = 0x5433 ++ TCSETXF = 0x5434 ++ TCSETXW = 0x5435 ++ TCXONC = 0x540a ++ TFD_CLOEXEC = 0x80000 ++ TFD_NONBLOCK = 0x800 ++ TIOCCBRK = 0x5428 ++ TIOCCONS = 0x541d ++ TIOCEXCL = 0x540c ++ TIOCGDEV = 0x80045432 ++ TIOCGETD = 0x5424 ++ TIOCGEXCL = 0x80045440 ++ TIOCGICOUNT = 0x545d ++ TIOCGISO7816 = 0x80285442 ++ TIOCGLCKTRMIOS = 0x5456 ++ TIOCGPGRP = 0x540f ++ TIOCGPKT = 0x80045438 ++ TIOCGPTLCK = 0x80045439 ++ TIOCGPTN = 0x80045430 ++ TIOCGPTPEER = 0x5441 ++ TIOCGRS485 = 0x542e ++ TIOCGSERIAL = 0x541e ++ TIOCGSID = 0x5429 ++ TIOCGSOFTCAR = 0x5419 ++ TIOCGWINSZ = 0x5413 ++ TIOCINQ = 0x541b ++ TIOCLINUX = 0x541c ++ TIOCMBIC = 0x5417 ++ TIOCMBIS = 0x5416 ++ TIOCMGET = 0x5415 ++ TIOCMIWAIT = 0x545c ++ TIOCMSET = 0x5418 ++ TIOCM_CAR = 0x40 ++ TIOCM_CD = 0x40 ++ TIOCM_CTS = 0x20 ++ TIOCM_DSR = 0x100 ++ TIOCM_RI = 0x80 ++ TIOCM_RNG = 0x80 ++ TIOCM_SR = 0x10 ++ TIOCM_ST = 0x8 ++ TIOCNOTTY = 0x5422 ++ TIOCNXCL = 0x540d ++ TIOCOUTQ = 0x5411 ++ TIOCPKT = 0x5420 ++ TIOCSBRK = 0x5427 ++ TIOCSCTTY = 0x540e ++ TIOCSERCONFIG = 0x5453 ++ TIOCSERGETLSR = 0x5459 ++ TIOCSERGETMULTI = 0x545a ++ TIOCSERGSTRUCT = 0x5458 ++ TIOCSERGWILD = 0x5454 ++ TIOCSERSETMULTI = 0x545b ++ TIOCSERSWILD = 0x5455 ++ TIOCSER_TEMT = 0x1 ++ TIOCSETD = 0x5423 ++ TIOCSIG = 0x40045436 ++ TIOCSISO7816 = 0xc0285443 ++ TIOCSLCKTRMIOS = 0x5457 ++ TIOCSPGRP = 0x5410 ++ TIOCSPTLCK = 0x40045431 ++ TIOCSRS485 = 0x542f ++ TIOCSSERIAL = 0x541f ++ TIOCSSOFTCAR = 0x541a ++ TIOCSTI = 0x5412 ++ TIOCSWINSZ = 0x5414 ++ TIOCVHANGUP = 0x5437 ++ TOSTOP = 0x100 ++ TUNATTACHFILTER = 0x401054d5 ++ TUNDETACHFILTER = 0x401054d6 ++ TUNGETDEVNETNS = 0x54e3 ++ TUNGETFEATURES = 0x800454cf ++ TUNGETFILTER = 0x801054db ++ TUNGETIFF = 0x800454d2 ++ TUNGETSNDBUF = 0x800454d3 ++ TUNGETVNETBE = 0x800454df ++ TUNGETVNETHDRSZ = 0x800454d7 ++ TUNGETVNETLE = 0x800454dd ++ TUNSETCARRIER = 0x400454e2 ++ TUNSETDEBUG = 0x400454c9 ++ TUNSETFILTEREBPF = 0x800454e1 ++ TUNSETGROUP = 0x400454ce ++ TUNSETIFF = 0x400454ca ++ TUNSETIFINDEX = 0x400454da ++ TUNSETLINK = 0x400454cd ++ TUNSETNOCSUM = 0x400454c8 ++ TUNSETOFFLOAD = 0x400454d0 ++ TUNSETOWNER = 0x400454cc ++ TUNSETPERSIST = 0x400454cb ++ TUNSETQUEUE = 0x400454d9 ++ TUNSETSNDBUF = 0x400454d4 ++ TUNSETSTEERINGEBPF = 0x800454e0 ++ TUNSETTXFILTER = 0x400454d1 ++ TUNSETVNETBE = 0x400454de ++ TUNSETVNETHDRSZ = 0x400454d8 ++ TUNSETVNETLE = 0x400454dc ++ UBI_IOCATT = 0x40186f40 ++ UBI_IOCDET = 0x40046f41 ++ UBI_IOCEBCH = 0x40044f02 ++ UBI_IOCEBER = 0x40044f01 ++ UBI_IOCEBISMAP = 0x80044f05 ++ UBI_IOCEBMAP = 0x40084f03 ++ UBI_IOCEBUNMAP = 0x40044f04 ++ UBI_IOCMKVOL = 0x40986f00 ++ UBI_IOCRMVOL = 0x40046f01 ++ UBI_IOCRNVOL = 0x51106f03 ++ UBI_IOCRPEB = 0x40046f04 ++ UBI_IOCRSVOL = 0x400c6f02 ++ UBI_IOCSETVOLPROP = 0x40104f06 ++ UBI_IOCSPEB = 0x40046f05 ++ UBI_IOCVOLCRBLK = 0x40804f07 ++ UBI_IOCVOLRMBLK = 0x4f08 ++ UBI_IOCVOLUP = 0x40084f00 ++ VDISCARD = 0xd ++ VEOF = 0x4 ++ VEOL = 0xb ++ VEOL2 = 0x10 ++ VMIN = 0x6 ++ VREPRINT = 0xc ++ VSTART = 0x8 ++ VSTOP = 0x9 ++ VSUSP = 0xa ++ VSWTC = 0x7 ++ VT1 = 0x4000 ++ VTDLY = 0x4000 ++ VTIME = 0x5 ++ VWERASE = 0xe ++ WDIOC_GETBOOTSTATUS = 0x80045702 ++ WDIOC_GETPRETIMEOUT = 0x80045709 ++ WDIOC_GETSTATUS = 0x80045701 ++ WDIOC_GETSUPPORT = 0x80285700 ++ WDIOC_GETTEMP = 0x80045703 ++ WDIOC_GETTIMELEFT = 0x8004570a ++ WDIOC_GETTIMEOUT = 0x80045707 ++ WDIOC_KEEPALIVE = 0x80045705 ++ WDIOC_SETOPTIONS = 0x80045704 ++ WORDSIZE = 0x40 ++ XCASE = 0x4 ++ XTABS = 0x1800 ++ _HIDIOCGRAWNAME = 0x80804804 ++ _HIDIOCGRAWPHYS = 0x80404805 ++ _HIDIOCGRAWUNIQ = 0x80404808 ++) ++ ++// Errors ++const ( ++ EADDRINUSE = syscall.Errno(0x62) ++ EADDRNOTAVAIL = syscall.Errno(0x63) ++ EADV = syscall.Errno(0x44) ++ EAFNOSUPPORT = syscall.Errno(0x61) ++ EALREADY = syscall.Errno(0x72) ++ EBADE = syscall.Errno(0x34) ++ EBADFD = syscall.Errno(0x4d) ++ EBADMSG = syscall.Errno(0x4a) ++ EBADR = syscall.Errno(0x35) ++ EBADRQC = syscall.Errno(0x38) ++ EBADSLT = syscall.Errno(0x39) ++ EBFONT = syscall.Errno(0x3b) ++ ECANCELED = syscall.Errno(0x7d) ++ ECHRNG = syscall.Errno(0x2c) ++ ECOMM = syscall.Errno(0x46) ++ ECONNABORTED = syscall.Errno(0x67) ++ ECONNREFUSED = syscall.Errno(0x6f) ++ ECONNRESET = syscall.Errno(0x68) ++ EDEADLK = syscall.Errno(0x23) ++ EDEADLOCK = syscall.Errno(0x23) ++ EDESTADDRREQ = syscall.Errno(0x59) ++ EDOTDOT = syscall.Errno(0x49) ++ EDQUOT = syscall.Errno(0x7a) ++ EHOSTDOWN = syscall.Errno(0x70) ++ EHOSTUNREACH = syscall.Errno(0x71) ++ EHWPOISON = syscall.Errno(0x85) ++ EIDRM = syscall.Errno(0x2b) ++ EILSEQ = syscall.Errno(0x54) ++ EINPROGRESS = syscall.Errno(0x73) ++ EISCONN = syscall.Errno(0x6a) ++ EISNAM = syscall.Errno(0x78) ++ EKEYEXPIRED = syscall.Errno(0x7f) ++ EKEYREJECTED = syscall.Errno(0x81) ++ EKEYREVOKED = syscall.Errno(0x80) ++ EL2HLT = syscall.Errno(0x33) ++ EL2NSYNC = syscall.Errno(0x2d) ++ EL3HLT = syscall.Errno(0x2e) ++ EL3RST = syscall.Errno(0x2f) ++ ELIBACC = syscall.Errno(0x4f) ++ ELIBBAD = syscall.Errno(0x50) ++ ELIBEXEC = syscall.Errno(0x53) ++ ELIBMAX = syscall.Errno(0x52) ++ ELIBSCN = syscall.Errno(0x51) ++ ELNRNG = syscall.Errno(0x30) ++ ELOOP = syscall.Errno(0x28) ++ EMEDIUMTYPE = syscall.Errno(0x7c) ++ EMSGSIZE = syscall.Errno(0x5a) ++ EMULTIHOP = syscall.Errno(0x48) ++ ENAMETOOLONG = syscall.Errno(0x24) ++ ENAVAIL = syscall.Errno(0x77) ++ ENETDOWN = syscall.Errno(0x64) ++ ENETRESET = syscall.Errno(0x66) ++ ENETUNREACH = syscall.Errno(0x65) ++ ENOANO = syscall.Errno(0x37) ++ ENOBUFS = syscall.Errno(0x69) ++ ENOCSI = syscall.Errno(0x32) ++ ENODATA = syscall.Errno(0x3d) ++ ENOKEY = syscall.Errno(0x7e) ++ ENOLCK = syscall.Errno(0x25) ++ ENOLINK = syscall.Errno(0x43) ++ ENOMEDIUM = syscall.Errno(0x7b) ++ ENOMSG = syscall.Errno(0x2a) ++ ENONET = syscall.Errno(0x40) ++ ENOPKG = syscall.Errno(0x41) ++ ENOPROTOOPT = syscall.Errno(0x5c) ++ ENOSR = syscall.Errno(0x3f) ++ ENOSTR = syscall.Errno(0x3c) ++ ENOSYS = syscall.Errno(0x26) ++ ENOTCONN = syscall.Errno(0x6b) ++ ENOTEMPTY = syscall.Errno(0x27) ++ ENOTNAM = syscall.Errno(0x76) ++ ENOTRECOVERABLE = syscall.Errno(0x83) ++ ENOTSOCK = syscall.Errno(0x58) ++ ENOTSUP = syscall.Errno(0x5f) ++ ENOTUNIQ = syscall.Errno(0x4c) ++ EOPNOTSUPP = syscall.Errno(0x5f) ++ EOVERFLOW = syscall.Errno(0x4b) ++ EOWNERDEAD = syscall.Errno(0x82) ++ EPFNOSUPPORT = syscall.Errno(0x60) ++ EPROTO = syscall.Errno(0x47) ++ EPROTONOSUPPORT = syscall.Errno(0x5d) ++ EPROTOTYPE = syscall.Errno(0x5b) ++ EREMCHG = syscall.Errno(0x4e) ++ EREMOTE = syscall.Errno(0x42) ++ EREMOTEIO = syscall.Errno(0x79) ++ ERESTART = syscall.Errno(0x55) ++ ERFKILL = syscall.Errno(0x84) ++ ESHUTDOWN = syscall.Errno(0x6c) ++ ESOCKTNOSUPPORT = syscall.Errno(0x5e) ++ ESRMNT = syscall.Errno(0x45) ++ ESTALE = syscall.Errno(0x74) ++ ESTRPIPE = syscall.Errno(0x56) ++ ETIME = syscall.Errno(0x3e) ++ ETIMEDOUT = syscall.Errno(0x6e) ++ ETOOMANYREFS = syscall.Errno(0x6d) ++ EUCLEAN = syscall.Errno(0x75) ++ EUNATCH = syscall.Errno(0x31) ++ EUSERS = syscall.Errno(0x57) ++ EXFULL = syscall.Errno(0x36) ++) ++ ++// Signals ++const ( ++ SIGBUS = syscall.Signal(0x7) ++ SIGCHLD = syscall.Signal(0x11) ++ SIGCLD = syscall.Signal(0x11) ++ SIGCONT = syscall.Signal(0x12) ++ SIGIO = syscall.Signal(0x1d) ++ SIGPOLL = syscall.Signal(0x1d) ++ SIGPROF = syscall.Signal(0x1b) ++ SIGPWR = syscall.Signal(0x1e) ++ SIGSTKFLT = syscall.Signal(0x10) ++ SIGSTOP = syscall.Signal(0x13) ++ SIGSYS = syscall.Signal(0x1f) ++ SIGTSTP = syscall.Signal(0x14) ++ SIGTTIN = syscall.Signal(0x15) ++ SIGTTOU = syscall.Signal(0x16) ++ SIGURG = syscall.Signal(0x17) ++ SIGUSR1 = syscall.Signal(0xa) ++ SIGUSR2 = syscall.Signal(0xc) ++ SIGVTALRM = syscall.Signal(0x1a) ++ SIGWINCH = syscall.Signal(0x1c) ++ SIGXCPU = syscall.Signal(0x18) ++ SIGXFSZ = syscall.Signal(0x19) ++) ++ ++// Error table ++var errorList = [...]struct { ++ num syscall.Errno ++ name string ++ desc string ++}{ ++ {1, "EPERM", "operation not permitted"}, ++ {2, "ENOENT", "no such file or directory"}, ++ {3, "ESRCH", "no such process"}, ++ {4, "EINTR", "interrupted system call"}, ++ {5, "EIO", "input/output error"}, ++ {6, "ENXIO", "no such device or address"}, ++ {7, "E2BIG", "argument list too long"}, ++ {8, "ENOEXEC", "exec format error"}, ++ {9, "EBADF", "bad file descriptor"}, ++ {10, "ECHILD", "no child processes"}, ++ {11, "EAGAIN", "resource temporarily unavailable"}, ++ {12, "ENOMEM", "cannot allocate memory"}, ++ {13, "EACCES", "permission denied"}, ++ {14, "EFAULT", "bad address"}, ++ {15, "ENOTBLK", "block device required"}, ++ {16, "EBUSY", "device or resource busy"}, ++ {17, "EEXIST", "file exists"}, ++ {18, "EXDEV", "invalid cross-device link"}, ++ {19, "ENODEV", "no such device"}, ++ {20, "ENOTDIR", "not a directory"}, ++ {21, "EISDIR", "is a directory"}, ++ {22, "EINVAL", "invalid argument"}, ++ {23, "ENFILE", "too many open files in system"}, ++ {24, "EMFILE", "too many open files"}, ++ {25, "ENOTTY", "inappropriate ioctl for device"}, ++ {26, "ETXTBSY", "text file busy"}, ++ {27, "EFBIG", "file too large"}, ++ {28, "ENOSPC", "no space left on device"}, ++ {29, "ESPIPE", "illegal seek"}, ++ {30, "EROFS", "read-only file system"}, ++ {31, "EMLINK", "too many links"}, ++ {32, "EPIPE", "broken pipe"}, ++ {33, "EDOM", "numerical argument out of domain"}, ++ {34, "ERANGE", "numerical result out of range"}, ++ {35, "EDEADLK", "resource deadlock avoided"}, ++ {36, "ENAMETOOLONG", "file name too long"}, ++ {37, "ENOLCK", "no locks available"}, ++ {38, "ENOSYS", "function not implemented"}, ++ {39, "ENOTEMPTY", "directory not empty"}, ++ {40, "ELOOP", "too many levels of symbolic links"}, ++ {42, "ENOMSG", "no message of desired type"}, ++ {43, "EIDRM", "identifier removed"}, ++ {44, "ECHRNG", "channel number out of range"}, ++ {45, "EL2NSYNC", "level 2 not synchronized"}, ++ {46, "EL3HLT", "level 3 halted"}, ++ {47, "EL3RST", "level 3 reset"}, ++ {48, "ELNRNG", "link number out of range"}, ++ {49, "EUNATCH", "protocol driver not attached"}, ++ {50, "ENOCSI", "no CSI structure available"}, ++ {51, "EL2HLT", "level 2 halted"}, ++ {52, "EBADE", "invalid exchange"}, ++ {53, "EBADR", "invalid request descriptor"}, ++ {54, "EXFULL", "exchange full"}, ++ {55, "ENOANO", "no anode"}, ++ {56, "EBADRQC", "invalid request code"}, ++ {57, "EBADSLT", "invalid slot"}, ++ {59, "EBFONT", "bad font file format"}, ++ {60, "ENOSTR", "device not a stream"}, ++ {61, "ENODATA", "no data available"}, ++ {62, "ETIME", "timer expired"}, ++ {63, "ENOSR", "out of streams resources"}, ++ {64, "ENONET", "machine is not on the network"}, ++ {65, "ENOPKG", "package not installed"}, ++ {66, "EREMOTE", "object is remote"}, ++ {67, "ENOLINK", "link has been severed"}, ++ {68, "EADV", "advertise error"}, ++ {69, "ESRMNT", "srmount error"}, ++ {70, "ECOMM", "communication error on send"}, ++ {71, "EPROTO", "protocol error"}, ++ {72, "EMULTIHOP", "multihop attempted"}, ++ {73, "EDOTDOT", "RFS specific error"}, ++ {74, "EBADMSG", "bad message"}, ++ {75, "EOVERFLOW", "value too large for defined data type"}, ++ {76, "ENOTUNIQ", "name not unique on network"}, ++ {77, "EBADFD", "file descriptor in bad state"}, ++ {78, "EREMCHG", "remote address changed"}, ++ {79, "ELIBACC", "can not access a needed shared library"}, ++ {80, "ELIBBAD", "accessing a corrupted shared library"}, ++ {81, "ELIBSCN", ".lib section in a.out corrupted"}, ++ {82, "ELIBMAX", "attempting to link in too many shared libraries"}, ++ {83, "ELIBEXEC", "cannot exec a shared library directly"}, ++ {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, ++ {85, "ERESTART", "interrupted system call should be restarted"}, ++ {86, "ESTRPIPE", "streams pipe error"}, ++ {87, "EUSERS", "too many users"}, ++ {88, "ENOTSOCK", "socket operation on non-socket"}, ++ {89, "EDESTADDRREQ", "destination address required"}, ++ {90, "EMSGSIZE", "message too long"}, ++ {91, "EPROTOTYPE", "protocol wrong type for socket"}, ++ {92, "ENOPROTOOPT", "protocol not available"}, ++ {93, "EPROTONOSUPPORT", "protocol not supported"}, ++ {94, "ESOCKTNOSUPPORT", "socket type not supported"}, ++ {95, "ENOTSUP", "operation not supported"}, ++ {96, "EPFNOSUPPORT", "protocol family not supported"}, ++ {97, "EAFNOSUPPORT", "address family not supported by protocol"}, ++ {98, "EADDRINUSE", "address already in use"}, ++ {99, "EADDRNOTAVAIL", "cannot assign requested address"}, ++ {100, "ENETDOWN", "network is down"}, ++ {101, "ENETUNREACH", "network is unreachable"}, ++ {102, "ENETRESET", "network dropped connection on reset"}, ++ {103, "ECONNABORTED", "software caused connection abort"}, ++ {104, "ECONNRESET", "connection reset by peer"}, ++ {105, "ENOBUFS", "no buffer space available"}, ++ {106, "EISCONN", "transport endpoint is already connected"}, ++ {107, "ENOTCONN", "transport endpoint is not connected"}, ++ {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, ++ {109, "ETOOMANYREFS", "too many references: cannot splice"}, ++ {110, "ETIMEDOUT", "connection timed out"}, ++ {111, "ECONNREFUSED", "connection refused"}, ++ {112, "EHOSTDOWN", "host is down"}, ++ {113, "EHOSTUNREACH", "no route to host"}, ++ {114, "EALREADY", "operation already in progress"}, ++ {115, "EINPROGRESS", "operation now in progress"}, ++ {116, "ESTALE", "stale file handle"}, ++ {117, "EUCLEAN", "structure needs cleaning"}, ++ {118, "ENOTNAM", "not a XENIX named type file"}, ++ {119, "ENAVAIL", "no XENIX semaphores available"}, ++ {120, "EISNAM", "is a named type file"}, ++ {121, "EREMOTEIO", "remote I/O error"}, ++ {122, "EDQUOT", "disk quota exceeded"}, ++ {123, "ENOMEDIUM", "no medium found"}, ++ {124, "EMEDIUMTYPE", "wrong medium type"}, ++ {125, "ECANCELED", "operation canceled"}, ++ {126, "ENOKEY", "required key not available"}, ++ {127, "EKEYEXPIRED", "key has expired"}, ++ {128, "EKEYREVOKED", "key has been revoked"}, ++ {129, "EKEYREJECTED", "key was rejected by service"}, ++ {130, "EOWNERDEAD", "owner died"}, ++ {131, "ENOTRECOVERABLE", "state not recoverable"}, ++ {132, "ERFKILL", "operation not possible due to RF-kill"}, ++ {133, "EHWPOISON", "memory page has hardware error"}, ++} ++ ++// Signal table ++var signalList = [...]struct { ++ num syscall.Signal ++ name string ++ desc string ++}{ ++ {1, "SIGHUP", "hangup"}, ++ {2, "SIGINT", "interrupt"}, ++ {3, "SIGQUIT", "quit"}, ++ {4, "SIGILL", "illegal instruction"}, ++ {5, "SIGTRAP", "trace/breakpoint trap"}, ++ {6, "SIGABRT", "aborted"}, ++ {7, "SIGBUS", "bus error"}, ++ {8, "SIGFPE", "floating point exception"}, ++ {9, "SIGKILL", "killed"}, ++ {10, "SIGUSR1", "user defined signal 1"}, ++ {11, "SIGSEGV", "segmentation fault"}, ++ {12, "SIGUSR2", "user defined signal 2"}, ++ {13, "SIGPIPE", "broken pipe"}, ++ {14, "SIGALRM", "alarm clock"}, ++ {15, "SIGTERM", "terminated"}, ++ {16, "SIGSTKFLT", "stack fault"}, ++ {17, "SIGCHLD", "child exited"}, ++ {18, "SIGCONT", "continued"}, ++ {19, "SIGSTOP", "stopped (signal)"}, ++ {20, "SIGTSTP", "stopped"}, ++ {21, "SIGTTIN", "stopped (tty input)"}, ++ {22, "SIGTTOU", "stopped (tty output)"}, ++ {23, "SIGURG", "urgent I/O condition"}, ++ {24, "SIGXCPU", "CPU time limit exceeded"}, ++ {25, "SIGXFSZ", "file size limit exceeded"}, ++ {26, "SIGVTALRM", "virtual timer expired"}, ++ {27, "SIGPROF", "profiling timer expired"}, ++ {28, "SIGWINCH", "window changed"}, ++ {29, "SIGIO", "I/O possible"}, ++ {30, "SIGPWR", "power failure"}, ++ {31, "SIGSYS", "bad system call"}, ++} +diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go +new file mode 100644 +index 0000000..e455a8b +--- /dev/null ++++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go +@@ -0,0 +1,563 @@ ++// go run mksyscall.go -tags linux,loong64 syscall_linux.go syscall_linux_loong64.go ++// Code generated by the command above; see README.md. DO NOT EDIT. ++ ++//go:build linux && loong64 ++// +build linux,loong64 ++ ++package unix ++ ++import ( ++ "syscall" ++ "unsafe" ++) ++ ++var _ syscall.Errno ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { ++ _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { ++ _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { ++ r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) ++ n = int64(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(events) > 0 { ++ _p0 = unsafe.Pointer(&events[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall6(SYS_EPOLL_PWAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fadvise(fd int, offset int64, length int64, advice int) (err error) { ++ _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fchown(fd int, uid int, gid int) (err error) { ++ _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fstat(fd int, stat *Stat_t) (err error) { ++ _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fstatfs(fd int, buf *Statfs_t) (err error) { ++ _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Ftruncate(fd int, length int64) (err error) { ++ _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getegid() (egid int) { ++ r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) ++ egid = int(r0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Geteuid() (euid int) { ++ r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) ++ euid = int(r0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getgid() (gid int) { ++ r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) ++ gid = int(r0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getuid() (uid int) { ++ r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) ++ uid = int(r0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Listen(s int, n int) (err error) { ++ _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Pread(fd int, p []byte, offset int64) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(p) > 0 { ++ _p0 = unsafe.Pointer(&p[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Pwrite(fd int, p []byte, offset int64) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(p) > 0 { ++ _p0 = unsafe.Pointer(&p[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Seek(fd int, offset int64, whence int) (off int64, err error) { ++ r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) ++ off = int64(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { ++ r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) ++ written = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func setfsgid(gid int) (prev int, err error) { ++ r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) ++ prev = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func setfsuid(uid int) (prev int, err error) { ++ r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) ++ prev = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Setregid(rgid int, egid int) (err error) { ++ _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Setresgid(rgid int, egid int, sgid int) (err error) { ++ _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Setresuid(ruid int, euid int, suid int) (err error) { ++ _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Setreuid(ruid int, euid int) (err error) { ++ _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Shutdown(fd int, how int) (err error) { ++ _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { ++ r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) ++ n = int64(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Statfs(path string, buf *Statfs_t) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { ++ _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Truncate(path string, length int64) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { ++ r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) ++ fd = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { ++ r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) ++ fd = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { ++ _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { ++ _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func getgroups(n int, list *_Gid_t) (nn int, err error) { ++ r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) ++ nn = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func setgroups(n int, list *_Gid_t) (err error) { ++ _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { ++ _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { ++ _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func socket(domain int, typ int, proto int) (fd int, err error) { ++ r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) ++ fd = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { ++ _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { ++ _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { ++ _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(p) > 0 { ++ _p0 = unsafe.Pointer(&p[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { ++ var _p0 unsafe.Pointer ++ if len(buf) > 0 { ++ _p0 = unsafe.Pointer(&buf[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { ++ r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { ++ r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { ++ r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) ++ xaddr = uintptr(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Gettimeofday(tv *Timeval) (err error) { ++ _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(cmdline) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} +diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +new file mode 100644 +index 0000000..e443f9a +--- /dev/null ++++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +@@ -0,0 +1,313 @@ ++// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h ++// Code generated by the command above; see README.md. DO NOT EDIT. ++ ++//go:build loong64 && linux ++// +build loong64,linux ++ ++package unix ++ ++const ( ++ SYS_IO_SETUP = 0 ++ SYS_IO_DESTROY = 1 ++ SYS_IO_SUBMIT = 2 ++ SYS_IO_CANCEL = 3 ++ SYS_IO_GETEVENTS = 4 ++ SYS_SETXATTR = 5 ++ SYS_LSETXATTR = 6 ++ SYS_FSETXATTR = 7 ++ SYS_GETXATTR = 8 ++ SYS_LGETXATTR = 9 ++ SYS_FGETXATTR = 10 ++ SYS_LISTXATTR = 11 ++ SYS_LLISTXATTR = 12 ++ SYS_FLISTXATTR = 13 ++ SYS_REMOVEXATTR = 14 ++ SYS_LREMOVEXATTR = 15 ++ SYS_FREMOVEXATTR = 16 ++ SYS_GETCWD = 17 ++ SYS_LOOKUP_DCOOKIE = 18 ++ SYS_EVENTFD2 = 19 ++ SYS_EPOLL_CREATE1 = 20 ++ SYS_EPOLL_CTL = 21 ++ SYS_EPOLL_PWAIT = 22 ++ SYS_DUP = 23 ++ SYS_DUP3 = 24 ++ SYS_FCNTL = 25 ++ SYS_INOTIFY_INIT1 = 26 ++ SYS_INOTIFY_ADD_WATCH = 27 ++ SYS_INOTIFY_RM_WATCH = 28 ++ SYS_IOCTL = 29 ++ SYS_IOPRIO_SET = 30 ++ SYS_IOPRIO_GET = 31 ++ SYS_FLOCK = 32 ++ SYS_MKNODAT = 33 ++ SYS_MKDIRAT = 34 ++ SYS_UNLINKAT = 35 ++ SYS_SYMLINKAT = 36 ++ SYS_LINKAT = 37 ++ SYS_UMOUNT2 = 39 ++ SYS_MOUNT = 40 ++ SYS_PIVOT_ROOT = 41 ++ SYS_NFSSERVCTL = 42 ++ SYS_STATFS = 43 ++ SYS_FSTATFS = 44 ++ SYS_TRUNCATE = 45 ++ SYS_FTRUNCATE = 46 ++ SYS_FALLOCATE = 47 ++ SYS_FACCESSAT = 48 ++ SYS_CHDIR = 49 ++ SYS_FCHDIR = 50 ++ SYS_CHROOT = 51 ++ SYS_FCHMOD = 52 ++ SYS_FCHMODAT = 53 ++ SYS_FCHOWNAT = 54 ++ SYS_FCHOWN = 55 ++ SYS_OPENAT = 56 ++ SYS_CLOSE = 57 ++ SYS_VHANGUP = 58 ++ SYS_PIPE2 = 59 ++ SYS_QUOTACTL = 60 ++ SYS_GETDENTS64 = 61 ++ SYS_LSEEK = 62 ++ SYS_READ = 63 ++ SYS_WRITE = 64 ++ SYS_READV = 65 ++ SYS_WRITEV = 66 ++ SYS_PREAD64 = 67 ++ SYS_PWRITE64 = 68 ++ SYS_PREADV = 69 ++ SYS_PWRITEV = 70 ++ SYS_SENDFILE = 71 ++ SYS_PSELECT6 = 72 ++ SYS_PPOLL = 73 ++ SYS_SIGNALFD4 = 74 ++ SYS_VMSPLICE = 75 ++ SYS_SPLICE = 76 ++ SYS_TEE = 77 ++ SYS_READLINKAT = 78 ++ SYS_FSTATAT = 79 ++ SYS_FSTAT = 80 ++ SYS_SYNC = 81 ++ SYS_FSYNC = 82 ++ SYS_FDATASYNC = 83 ++ SYS_SYNC_FILE_RANGE = 84 ++ SYS_TIMERFD_CREATE = 85 ++ SYS_TIMERFD_SETTIME = 86 ++ SYS_TIMERFD_GETTIME = 87 ++ SYS_UTIMENSAT = 88 ++ SYS_ACCT = 89 ++ SYS_CAPGET = 90 ++ SYS_CAPSET = 91 ++ SYS_PERSONALITY = 92 ++ SYS_EXIT = 93 ++ SYS_EXIT_GROUP = 94 ++ SYS_WAITID = 95 ++ SYS_SET_TID_ADDRESS = 96 ++ SYS_UNSHARE = 97 ++ SYS_FUTEX = 98 ++ SYS_SET_ROBUST_LIST = 99 ++ SYS_GET_ROBUST_LIST = 100 ++ SYS_NANOSLEEP = 101 ++ SYS_GETITIMER = 102 ++ SYS_SETITIMER = 103 ++ SYS_KEXEC_LOAD = 104 ++ SYS_INIT_MODULE = 105 ++ SYS_DELETE_MODULE = 106 ++ SYS_TIMER_CREATE = 107 ++ SYS_TIMER_GETTIME = 108 ++ SYS_TIMER_GETOVERRUN = 109 ++ SYS_TIMER_SETTIME = 110 ++ SYS_TIMER_DELETE = 111 ++ SYS_CLOCK_SETTIME = 112 ++ SYS_CLOCK_GETTIME = 113 ++ SYS_CLOCK_GETRES = 114 ++ SYS_CLOCK_NANOSLEEP = 115 ++ SYS_SYSLOG = 116 ++ SYS_PTRACE = 117 ++ SYS_SCHED_SETPARAM = 118 ++ SYS_SCHED_SETSCHEDULER = 119 ++ SYS_SCHED_GETSCHEDULER = 120 ++ SYS_SCHED_GETPARAM = 121 ++ SYS_SCHED_SETAFFINITY = 122 ++ SYS_SCHED_GETAFFINITY = 123 ++ SYS_SCHED_YIELD = 124 ++ SYS_SCHED_GET_PRIORITY_MAX = 125 ++ SYS_SCHED_GET_PRIORITY_MIN = 126 ++ SYS_SCHED_RR_GET_INTERVAL = 127 ++ SYS_RESTART_SYSCALL = 128 ++ SYS_KILL = 129 ++ SYS_TKILL = 130 ++ SYS_TGKILL = 131 ++ SYS_SIGALTSTACK = 132 ++ SYS_RT_SIGSUSPEND = 133 ++ SYS_RT_SIGACTION = 134 ++ SYS_RT_SIGPROCMASK = 135 ++ SYS_RT_SIGPENDING = 136 ++ SYS_RT_SIGTIMEDWAIT = 137 ++ SYS_RT_SIGQUEUEINFO = 138 ++ SYS_RT_SIGRETURN = 139 ++ SYS_SETPRIORITY = 140 ++ SYS_GETPRIORITY = 141 ++ SYS_REBOOT = 142 ++ SYS_SETREGID = 143 ++ SYS_SETGID = 144 ++ SYS_SETREUID = 145 ++ SYS_SETUID = 146 ++ SYS_SETRESUID = 147 ++ SYS_GETRESUID = 148 ++ SYS_SETRESGID = 149 ++ SYS_GETRESGID = 150 ++ SYS_SETFSUID = 151 ++ SYS_SETFSGID = 152 ++ SYS_TIMES = 153 ++ SYS_SETPGID = 154 ++ SYS_GETPGID = 155 ++ SYS_GETSID = 156 ++ SYS_SETSID = 157 ++ SYS_GETGROUPS = 158 ++ SYS_SETGROUPS = 159 ++ SYS_UNAME = 160 ++ SYS_SETHOSTNAME = 161 ++ SYS_SETDOMAINNAME = 162 ++ SYS_GETRUSAGE = 165 ++ SYS_UMASK = 166 ++ SYS_PRCTL = 167 ++ SYS_GETCPU = 168 ++ SYS_GETTIMEOFDAY = 169 ++ SYS_SETTIMEOFDAY = 170 ++ SYS_ADJTIMEX = 171 ++ SYS_GETPID = 172 ++ SYS_GETPPID = 173 ++ SYS_GETUID = 174 ++ SYS_GETEUID = 175 ++ SYS_GETGID = 176 ++ SYS_GETEGID = 177 ++ SYS_GETTID = 178 ++ SYS_SYSINFO = 179 ++ SYS_MQ_OPEN = 180 ++ SYS_MQ_UNLINK = 181 ++ SYS_MQ_TIMEDSEND = 182 ++ SYS_MQ_TIMEDRECEIVE = 183 ++ SYS_MQ_NOTIFY = 184 ++ SYS_MQ_GETSETATTR = 185 ++ SYS_MSGGET = 186 ++ SYS_MSGCTL = 187 ++ SYS_MSGRCV = 188 ++ SYS_MSGSND = 189 ++ SYS_SEMGET = 190 ++ SYS_SEMCTL = 191 ++ SYS_SEMTIMEDOP = 192 ++ SYS_SEMOP = 193 ++ SYS_SHMGET = 194 ++ SYS_SHMCTL = 195 ++ SYS_SHMAT = 196 ++ SYS_SHMDT = 197 ++ SYS_SOCKET = 198 ++ SYS_SOCKETPAIR = 199 ++ SYS_BIND = 200 ++ SYS_LISTEN = 201 ++ SYS_ACCEPT = 202 ++ SYS_CONNECT = 203 ++ SYS_GETSOCKNAME = 204 ++ SYS_GETPEERNAME = 205 ++ SYS_SENDTO = 206 ++ SYS_RECVFROM = 207 ++ SYS_SETSOCKOPT = 208 ++ SYS_GETSOCKOPT = 209 ++ SYS_SHUTDOWN = 210 ++ SYS_SENDMSG = 211 ++ SYS_RECVMSG = 212 ++ SYS_READAHEAD = 213 ++ SYS_BRK = 214 ++ SYS_MUNMAP = 215 ++ SYS_MREMAP = 216 ++ SYS_ADD_KEY = 217 ++ SYS_REQUEST_KEY = 218 ++ SYS_KEYCTL = 219 ++ SYS_CLONE = 220 ++ SYS_EXECVE = 221 ++ SYS_MMAP = 222 ++ SYS_FADVISE64 = 223 ++ SYS_SWAPON = 224 ++ SYS_SWAPOFF = 225 ++ SYS_MPROTECT = 226 ++ SYS_MSYNC = 227 ++ SYS_MLOCK = 228 ++ SYS_MUNLOCK = 229 ++ SYS_MLOCKALL = 230 ++ SYS_MUNLOCKALL = 231 ++ SYS_MINCORE = 232 ++ SYS_MADVISE = 233 ++ SYS_REMAP_FILE_PAGES = 234 ++ SYS_MBIND = 235 ++ SYS_GET_MEMPOLICY = 236 ++ SYS_SET_MEMPOLICY = 237 ++ SYS_MIGRATE_PAGES = 238 ++ SYS_MOVE_PAGES = 239 ++ SYS_RT_TGSIGQUEUEINFO = 240 ++ SYS_PERF_EVENT_OPEN = 241 ++ SYS_ACCEPT4 = 242 ++ SYS_RECVMMSG = 243 ++ SYS_ARCH_SPECIFIC_SYSCALL = 244 ++ SYS_WAIT4 = 260 ++ SYS_PRLIMIT64 = 261 ++ SYS_FANOTIFY_INIT = 262 ++ SYS_FANOTIFY_MARK = 263 ++ SYS_NAME_TO_HANDLE_AT = 264 ++ SYS_OPEN_BY_HANDLE_AT = 265 ++ SYS_CLOCK_ADJTIME = 266 ++ SYS_SYNCFS = 267 ++ SYS_SETNS = 268 ++ SYS_SENDMMSG = 269 ++ SYS_PROCESS_VM_READV = 270 ++ SYS_PROCESS_VM_WRITEV = 271 ++ SYS_KCMP = 272 ++ SYS_FINIT_MODULE = 273 ++ SYS_SCHED_SETATTR = 274 ++ SYS_SCHED_GETATTR = 275 ++ SYS_RENAMEAT2 = 276 ++ SYS_SECCOMP = 277 ++ SYS_GETRANDOM = 278 ++ SYS_MEMFD_CREATE = 279 ++ SYS_BPF = 280 ++ SYS_EXECVEAT = 281 ++ SYS_USERFAULTFD = 282 ++ SYS_MEMBARRIER = 283 ++ SYS_MLOCK2 = 284 ++ SYS_COPY_FILE_RANGE = 285 ++ SYS_PREADV2 = 286 ++ SYS_PWRITEV2 = 287 ++ SYS_PKEY_MPROTECT = 288 ++ SYS_PKEY_ALLOC = 289 ++ SYS_PKEY_FREE = 290 ++ SYS_STATX = 291 ++ SYS_IO_PGETEVENTS = 292 ++ SYS_RSEQ = 293 ++ SYS_KEXEC_FILE_LOAD = 294 ++ SYS_PIDFD_SEND_SIGNAL = 424 ++ SYS_IO_URING_SETUP = 425 ++ SYS_IO_URING_ENTER = 426 ++ SYS_IO_URING_REGISTER = 427 ++ SYS_OPEN_TREE = 428 ++ SYS_MOVE_MOUNT = 429 ++ SYS_FSOPEN = 430 ++ SYS_FSCONFIG = 431 ++ SYS_FSMOUNT = 432 ++ SYS_FSPICK = 433 ++ SYS_PIDFD_OPEN = 434 ++ SYS_CLONE3 = 435 ++ SYS_CLOSE_RANGE = 436 ++ SYS_OPENAT2 = 437 ++ SYS_PIDFD_GETFD = 438 ++ SYS_FACCESSAT2 = 439 ++ SYS_PROCESS_MADVISE = 440 ++ SYS_EPOLL_PWAIT2 = 441 ++ SYS_MOUNT_SETATTR = 442 ++ SYS_QUOTACTL_FD = 443 ++ SYS_LANDLOCK_CREATE_RULESET = 444 ++ SYS_LANDLOCK_ADD_RULE = 445 ++ SYS_LANDLOCK_RESTRICT_SELF = 446 ++ SYS_PROCESS_MRELEASE = 448 ++ SYS_FUTEX_WAITV = 449 ++ SYS_SET_MEMPOLICY_HOME_NODE = 450 ++) +diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +new file mode 100644 +index 0000000..c19f60a +--- /dev/null ++++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +@@ -0,0 +1,667 @@ ++// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go ++// Code generated by the command above; see README.md. DO NOT EDIT. ++ ++//go:build loong64 && linux ++// +build loong64,linux ++ ++package unix ++ ++const ( ++ SizeofPtr = 0x8 ++ SizeofLong = 0x8 ++) ++ ++type ( ++ _C_long int64 ++) ++ ++type Timespec struct { ++ Sec int64 ++ Nsec int64 ++} ++ ++type Timeval struct { ++ Sec int64 ++ Usec int64 ++} ++ ++type Timex struct { ++ Modes uint32 ++ Offset int64 ++ Freq int64 ++ Maxerror int64 ++ Esterror int64 ++ Status int32 ++ Constant int64 ++ Precision int64 ++ Tolerance int64 ++ Time Timeval ++ Tick int64 ++ Ppsfreq int64 ++ Jitter int64 ++ Shift int32 ++ Stabil int64 ++ Jitcnt int64 ++ Calcnt int64 ++ Errcnt int64 ++ Stbcnt int64 ++ Tai int32 ++ _ [44]byte ++} ++ ++type Time_t int64 ++ ++type Tms struct { ++ Utime int64 ++ Stime int64 ++ Cutime int64 ++ Cstime int64 ++} ++ ++type Utimbuf struct { ++ Actime int64 ++ Modtime int64 ++} ++ ++type Rusage struct { ++ Utime Timeval ++ Stime Timeval ++ Maxrss int64 ++ Ixrss int64 ++ Idrss int64 ++ Isrss int64 ++ Minflt int64 ++ Majflt int64 ++ Nswap int64 ++ Inblock int64 ++ Oublock int64 ++ Msgsnd int64 ++ Msgrcv int64 ++ Nsignals int64 ++ Nvcsw int64 ++ Nivcsw int64 ++} ++ ++type Stat_t struct { ++ Dev uint64 ++ Ino uint64 ++ Mode uint32 ++ Nlink uint32 ++ Uid uint32 ++ Gid uint32 ++ Rdev uint64 ++ _ uint64 ++ Size int64 ++ Blksize int32 ++ _ int32 ++ Blocks int64 ++ Atim Timespec ++ Mtim Timespec ++ Ctim Timespec ++ _ [2]int32 ++} ++ ++type Dirent struct { ++ Ino uint64 ++ Off int64 ++ Reclen uint16 ++ Type uint8 ++ Name [256]int8 ++ _ [5]byte ++} ++ ++type Flock_t struct { ++ Type int16 ++ Whence int16 ++ Start int64 ++ Len int64 ++ Pid int32 ++ _ [4]byte ++} ++ ++type DmNameList struct { ++ Dev uint64 ++ Next uint32 ++ Name [0]byte ++ _ [4]byte ++} ++ ++const ( ++ FADV_DONTNEED = 0x4 ++ FADV_NOREUSE = 0x5 ++) ++ ++type RawSockaddrNFCLLCP struct { ++ Sa_family uint16 ++ Dev_idx uint32 ++ Target_idx uint32 ++ Nfc_protocol uint32 ++ Dsap uint8 ++ Ssap uint8 ++ Service_name [63]uint8 ++ Service_name_len uint64 ++} ++ ++type RawSockaddr struct { ++ Family uint16 ++ Data [14]int8 ++} ++ ++type RawSockaddrAny struct { ++ Addr RawSockaddr ++ Pad [96]int8 ++} ++ ++type Iovec struct { ++ Base *byte ++ Len uint64 ++} ++ ++type Msghdr struct { ++ Name *byte ++ Namelen uint32 ++ Iov *Iovec ++ Iovlen uint64 ++ Control *byte ++ Controllen uint64 ++ Flags int32 ++ _ [4]byte ++} ++ ++type Cmsghdr struct { ++ Len uint64 ++ Level int32 ++ Type int32 ++} ++ ++type ifreq struct { ++ Ifrn [16]byte ++ Ifru [24]byte ++} ++ ++const ( ++ SizeofSockaddrNFCLLCP = 0x60 ++ SizeofIovec = 0x10 ++ SizeofMsghdr = 0x38 ++ SizeofCmsghdr = 0x10 ++) ++ ++const ( ++ SizeofSockFprog = 0x10 ++) ++ ++type PtraceRegs struct { ++ Regs [32]uint64 ++ Orig_a0 uint64 ++ Era uint64 ++ Badv uint64 ++ Reserved [10]uint64 ++} ++ ++type FdSet struct { ++ Bits [16]int64 ++} ++ ++type Sysinfo_t struct { ++ Uptime int64 ++ Loads [3]uint64 ++ Totalram uint64 ++ Freeram uint64 ++ Sharedram uint64 ++ Bufferram uint64 ++ Totalswap uint64 ++ Freeswap uint64 ++ Procs uint16 ++ Pad uint16 ++ Totalhigh uint64 ++ Freehigh uint64 ++ Unit uint32 ++ _ [0]int8 ++ _ [4]byte ++} ++ ++type Ustat_t struct { ++ Tfree int32 ++ Tinode uint64 ++ Fname [6]int8 ++ Fpack [6]int8 ++ _ [4]byte ++} ++ ++type EpollEvent struct { ++ Events uint32 ++ _ int32 ++ Fd int32 ++ Pad int32 ++} ++ ++const ( ++ POLLRDHUP = 0x2000 ++) ++ ++type Sigset_t struct { ++ Val [16]uint64 ++} ++ ++const _C__NSIG = 0x41 ++ ++type Termios struct { ++ Iflag uint32 ++ Oflag uint32 ++ Cflag uint32 ++ Lflag uint32 ++ Line uint8 ++ Cc [19]uint8 ++ Ispeed uint32 ++ Ospeed uint32 ++} ++ ++type Taskstats struct { ++ Version uint16 ++ Ac_exitcode uint32 ++ Ac_flag uint8 ++ Ac_nice uint8 ++ Cpu_count uint64 ++ Cpu_delay_total uint64 ++ Blkio_count uint64 ++ Blkio_delay_total uint64 ++ Swapin_count uint64 ++ Swapin_delay_total uint64 ++ Cpu_run_real_total uint64 ++ Cpu_run_virtual_total uint64 ++ Ac_comm [32]int8 ++ Ac_sched uint8 ++ Ac_pad [3]uint8 ++ _ [4]byte ++ Ac_uid uint32 ++ Ac_gid uint32 ++ Ac_pid uint32 ++ Ac_ppid uint32 ++ Ac_btime uint32 ++ Ac_etime uint64 ++ Ac_utime uint64 ++ Ac_stime uint64 ++ Ac_minflt uint64 ++ Ac_majflt uint64 ++ Coremem uint64 ++ Virtmem uint64 ++ Hiwater_rss uint64 ++ Hiwater_vm uint64 ++ Read_char uint64 ++ Write_char uint64 ++ Read_syscalls uint64 ++ Write_syscalls uint64 ++ Read_bytes uint64 ++ Write_bytes uint64 ++ Cancelled_write_bytes uint64 ++ Nvcsw uint64 ++ Nivcsw uint64 ++ Ac_utimescaled uint64 ++ Ac_stimescaled uint64 ++ Cpu_scaled_run_real_total uint64 ++ Freepages_count uint64 ++ Freepages_delay_total uint64 ++ Thrashing_count uint64 ++ Thrashing_delay_total uint64 ++ Ac_btime64 uint64 ++ Compact_count uint64 ++ Compact_delay_total uint64 ++} ++ ++type cpuMask uint64 ++ ++const ( ++ _NCPUBITS = 0x40 ++) ++ ++const ( ++ CBitFieldMaskBit0 = 0x1 ++ CBitFieldMaskBit1 = 0x2 ++ CBitFieldMaskBit2 = 0x4 ++ CBitFieldMaskBit3 = 0x8 ++ CBitFieldMaskBit4 = 0x10 ++ CBitFieldMaskBit5 = 0x20 ++ CBitFieldMaskBit6 = 0x40 ++ CBitFieldMaskBit7 = 0x80 ++ CBitFieldMaskBit8 = 0x100 ++ CBitFieldMaskBit9 = 0x200 ++ CBitFieldMaskBit10 = 0x400 ++ CBitFieldMaskBit11 = 0x800 ++ CBitFieldMaskBit12 = 0x1000 ++ CBitFieldMaskBit13 = 0x2000 ++ CBitFieldMaskBit14 = 0x4000 ++ CBitFieldMaskBit15 = 0x8000 ++ CBitFieldMaskBit16 = 0x10000 ++ CBitFieldMaskBit17 = 0x20000 ++ CBitFieldMaskBit18 = 0x40000 ++ CBitFieldMaskBit19 = 0x80000 ++ CBitFieldMaskBit20 = 0x100000 ++ CBitFieldMaskBit21 = 0x200000 ++ CBitFieldMaskBit22 = 0x400000 ++ CBitFieldMaskBit23 = 0x800000 ++ CBitFieldMaskBit24 = 0x1000000 ++ CBitFieldMaskBit25 = 0x2000000 ++ CBitFieldMaskBit26 = 0x4000000 ++ CBitFieldMaskBit27 = 0x8000000 ++ CBitFieldMaskBit28 = 0x10000000 ++ CBitFieldMaskBit29 = 0x20000000 ++ CBitFieldMaskBit30 = 0x40000000 ++ CBitFieldMaskBit31 = 0x80000000 ++ CBitFieldMaskBit32 = 0x100000000 ++ CBitFieldMaskBit33 = 0x200000000 ++ CBitFieldMaskBit34 = 0x400000000 ++ CBitFieldMaskBit35 = 0x800000000 ++ CBitFieldMaskBit36 = 0x1000000000 ++ CBitFieldMaskBit37 = 0x2000000000 ++ CBitFieldMaskBit38 = 0x4000000000 ++ CBitFieldMaskBit39 = 0x8000000000 ++ CBitFieldMaskBit40 = 0x10000000000 ++ CBitFieldMaskBit41 = 0x20000000000 ++ CBitFieldMaskBit42 = 0x40000000000 ++ CBitFieldMaskBit43 = 0x80000000000 ++ CBitFieldMaskBit44 = 0x100000000000 ++ CBitFieldMaskBit45 = 0x200000000000 ++ CBitFieldMaskBit46 = 0x400000000000 ++ CBitFieldMaskBit47 = 0x800000000000 ++ CBitFieldMaskBit48 = 0x1000000000000 ++ CBitFieldMaskBit49 = 0x2000000000000 ++ CBitFieldMaskBit50 = 0x4000000000000 ++ CBitFieldMaskBit51 = 0x8000000000000 ++ CBitFieldMaskBit52 = 0x10000000000000 ++ CBitFieldMaskBit53 = 0x20000000000000 ++ CBitFieldMaskBit54 = 0x40000000000000 ++ CBitFieldMaskBit55 = 0x80000000000000 ++ CBitFieldMaskBit56 = 0x100000000000000 ++ CBitFieldMaskBit57 = 0x200000000000000 ++ CBitFieldMaskBit58 = 0x400000000000000 ++ CBitFieldMaskBit59 = 0x800000000000000 ++ CBitFieldMaskBit60 = 0x1000000000000000 ++ CBitFieldMaskBit61 = 0x2000000000000000 ++ CBitFieldMaskBit62 = 0x4000000000000000 ++ CBitFieldMaskBit63 = 0x8000000000000000 ++) ++ ++type SockaddrStorage struct { ++ Family uint16 ++ _ [118]int8 ++ _ uint64 ++} ++ ++type HDGeometry struct { ++ Heads uint8 ++ Sectors uint8 ++ Cylinders uint16 ++ Start uint64 ++} ++ ++type Statfs_t struct { ++ Type int64 ++ Bsize int64 ++ Blocks uint64 ++ Bfree uint64 ++ Bavail uint64 ++ Files uint64 ++ Ffree uint64 ++ Fsid Fsid ++ Namelen int64 ++ Frsize int64 ++ Flags int64 ++ Spare [4]int64 ++} ++ ++type TpacketHdr struct { ++ Status uint64 ++ Len uint32 ++ Snaplen uint32 ++ Mac uint16 ++ Net uint16 ++ Sec uint32 ++ Usec uint32 ++ _ [4]byte ++} ++ ++const ( ++ SizeofTpacketHdr = 0x20 ++) ++ ++type RTCPLLInfo struct { ++ Ctrl int32 ++ Value int32 ++ Max int32 ++ Min int32 ++ Posmult int32 ++ Negmult int32 ++ Clock int64 ++} ++ ++type BlkpgPartition struct { ++ Start int64 ++ Length int64 ++ Pno int32 ++ Devname [64]uint8 ++ Volname [64]uint8 ++ _ [4]byte ++} ++ ++const ( ++ BLKPG = 0x1269 ++) ++ ++type XDPUmemReg struct { ++ Addr uint64 ++ Len uint64 ++ Size uint32 ++ Headroom uint32 ++ Flags uint32 ++ _ [4]byte ++} ++ ++type CryptoUserAlg struct { ++ Name [64]int8 ++ Driver_name [64]int8 ++ Module_name [64]int8 ++ Type uint32 ++ Mask uint32 ++ Refcnt uint32 ++ Flags uint32 ++} ++ ++type CryptoStatAEAD struct { ++ Type [64]int8 ++ Encrypt_cnt uint64 ++ Encrypt_tlen uint64 ++ Decrypt_cnt uint64 ++ Decrypt_tlen uint64 ++ Err_cnt uint64 ++} ++ ++type CryptoStatAKCipher struct { ++ Type [64]int8 ++ Encrypt_cnt uint64 ++ Encrypt_tlen uint64 ++ Decrypt_cnt uint64 ++ Decrypt_tlen uint64 ++ Verify_cnt uint64 ++ Sign_cnt uint64 ++ Err_cnt uint64 ++} ++ ++type CryptoStatCipher struct { ++ Type [64]int8 ++ Encrypt_cnt uint64 ++ Encrypt_tlen uint64 ++ Decrypt_cnt uint64 ++ Decrypt_tlen uint64 ++ Err_cnt uint64 ++} ++ ++type CryptoStatCompress struct { ++ Type [64]int8 ++ Compress_cnt uint64 ++ Compress_tlen uint64 ++ Decompress_cnt uint64 ++ Decompress_tlen uint64 ++ Err_cnt uint64 ++} ++ ++type CryptoStatHash struct { ++ Type [64]int8 ++ Hash_cnt uint64 ++ Hash_tlen uint64 ++ Err_cnt uint64 ++} ++ ++type CryptoStatKPP struct { ++ Type [64]int8 ++ Setsecret_cnt uint64 ++ Generate_public_key_cnt uint64 ++ Compute_shared_secret_cnt uint64 ++ Err_cnt uint64 ++} ++ ++type CryptoStatRNG struct { ++ Type [64]int8 ++ Generate_cnt uint64 ++ Generate_tlen uint64 ++ Seed_cnt uint64 ++ Err_cnt uint64 ++} ++ ++type CryptoStatLarval struct { ++ Type [64]int8 ++} ++ ++type CryptoReportLarval struct { ++ Type [64]int8 ++} ++ ++type CryptoReportHash struct { ++ Type [64]int8 ++ Blocksize uint32 ++ Digestsize uint32 ++} ++ ++type CryptoReportCipher struct { ++ Type [64]int8 ++ Blocksize uint32 ++ Min_keysize uint32 ++ Max_keysize uint32 ++} ++ ++type CryptoReportBlkCipher struct { ++ Type [64]int8 ++ Geniv [64]int8 ++ Blocksize uint32 ++ Min_keysize uint32 ++ Max_keysize uint32 ++ Ivsize uint32 ++} ++ ++type CryptoReportAEAD struct { ++ Type [64]int8 ++ Geniv [64]int8 ++ Blocksize uint32 ++ Maxauthsize uint32 ++ Ivsize uint32 ++} ++ ++type CryptoReportComp struct { ++ Type [64]int8 ++} ++ ++type CryptoReportRNG struct { ++ Type [64]int8 ++ Seedsize uint32 ++} ++ ++type CryptoReportAKCipher struct { ++ Type [64]int8 ++} ++ ++type CryptoReportKPP struct { ++ Type [64]int8 ++} ++ ++type CryptoReportAcomp struct { ++ Type [64]int8 ++} ++ ++type LoopInfo struct { ++ Number int32 ++ Device uint32 ++ Inode uint64 ++ Rdevice uint32 ++ Offset int32 ++ Encrypt_type int32 ++ Encrypt_key_size int32 ++ Flags int32 ++ Name [64]int8 ++ Encrypt_key [32]uint8 ++ Init [2]uint64 ++ Reserved [4]int8 ++ _ [4]byte ++} ++ ++type TIPCSubscr struct { ++ Seq TIPCServiceRange ++ Timeout uint32 ++ Filter uint32 ++ Handle [8]int8 ++} ++ ++type TIPCSIOCLNReq struct { ++ Peer uint32 ++ Id uint32 ++ Linkname [68]int8 ++} ++ ++type TIPCSIOCNodeIDReq struct { ++ Peer uint32 ++ Id [16]int8 ++} ++ ++type PPSKInfo struct { ++ Assert_sequence uint32 ++ Clear_sequence uint32 ++ Assert_tu PPSKTime ++ Clear_tu PPSKTime ++ Current_mode int32 ++ _ [4]byte ++} ++ ++const ( ++ PPS_GETPARAMS = 0x800870a1 ++ PPS_SETPARAMS = 0x400870a2 ++ PPS_GETCAP = 0x800870a3 ++ PPS_FETCH = 0xc00870a4 ++) ++ ++const ( ++ PIDFD_NONBLOCK = 0x800 ++) ++ ++type SysvIpcPerm struct { ++ Key int32 ++ Uid uint32 ++ Gid uint32 ++ Cuid uint32 ++ Cgid uint32 ++ Mode uint32 ++ _ [0]uint8 ++ Seq uint16 ++ _ uint16 ++ _ uint64 ++ _ uint64 ++} ++type SysvShmDesc struct { ++ Perm SysvIpcPerm ++ Segsz uint64 ++ Atime int64 ++ Dtime int64 ++ Ctime int64 ++ Cpid int32 ++ Lpid int32 ++ Nattch uint64 ++ _ uint64 ++ _ uint64 ++} +-- +2.27.0 + diff --git a/0033-cmd-vendor-update-vendored-golang.org-x-tools-to-sup.patch b/0033-cmd-vendor-update-vendored-golang.org-x-tools-to-sup.patch new file mode 100644 index 0000000..9d3f908 --- /dev/null +++ b/0033-cmd-vendor-update-vendored-golang.org-x-tools-to-sup.patch @@ -0,0 +1,52 @@ +From b0c76871c931dc6c048318447d0a7e8d09a71c66 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Tue, 19 Oct 2021 15:37:32 +0800 +Subject: [PATCH 33/56] cmd/vendor: update vendored golang.org/x/tools to + support loong64 + +Change-Id: I3501138fb0b37f0b4872596dd317f8e51af63b28 +--- + .../x/tools/go/analysis/passes/asmdecl/asmdecl.go | 13 +++++++++++++ + 1 file changed, 13 insertions(+) + +diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go +index eb0016b..8f8da98 100644 +--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go ++++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go +@@ -81,6 +81,7 @@ var ( + asmArchArm = asmArch{name: "arm", bigEndian: false, stack: "R13", lr: true} + asmArchArm64 = asmArch{name: "arm64", bigEndian: false, stack: "RSP", lr: true} + asmArchAmd64 = asmArch{name: "amd64", bigEndian: false, stack: "SP", lr: false} ++ asmArchLoong64 = asmArch{name: "loong64", bigEndian: false, stack: "R3", lr: true} + asmArchMips = asmArch{name: "mips", bigEndian: true, stack: "R29", lr: true} + asmArchMipsLE = asmArch{name: "mipsle", bigEndian: false, stack: "R29", lr: true} + asmArchMips64 = asmArch{name: "mips64", bigEndian: true, stack: "R29", lr: true} +@@ -96,6 +97,7 @@ var ( + &asmArchArm, + &asmArchArm64, + &asmArchAmd64, ++ &asmArchLoong64, + &asmArchMips, + &asmArchMipsLE, + &asmArchMips64, +@@ -721,6 +723,17 @@ func asmCheckVar(badf func(string, ...interface{}), fn *asmFunc, line, expr stri + case "MOVV", "MOVD": + src = 8 + } ++ case "loong64": ++ switch op { ++ case "MOVB", "MOVBU": ++ src = 1 ++ case "MOVH", "MOVHU": ++ src = 2 ++ case "MOVW", "MOVWU", "MOVF": ++ src = 4 ++ case "MOVV", "MOVD": ++ src = 8 ++ } + case "s390x": + switch op { + case "MOVB", "MOVBZ": +-- +2.27.0 + diff --git a/0034-internal-bytealg-support-basic-byte-operation-on-loo.patch b/0034-internal-bytealg-support-basic-byte-operation-on-loo.patch new file mode 100644 index 0000000..e0c0026 --- /dev/null +++ b/0034-internal-bytealg-support-basic-byte-operation-on-loo.patch @@ -0,0 +1,297 @@ +From 669169e703d9ba7faf2b4aba7929d0828dfb08da Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 16:31:52 +0800 +Subject: [PATCH 34/56] internal/bytealg: support basic byte operation on + loong64 + +Change-Id: I4ac6d38dc632abfa0b698325ca0ae349c0d7ecd3 +--- + src/internal/bytealg/compare_generic.go | 4 +- + src/internal/bytealg/compare_loong64.s | 88 +++++++++++++++++++++++ + src/internal/bytealg/compare_native.go | 4 +- + src/internal/bytealg/equal_loong64.s | 54 ++++++++++++++ + src/internal/bytealg/indexbyte_generic.go | 4 +- + src/internal/bytealg/indexbyte_loong64.s | 54 ++++++++++++++ + src/internal/bytealg/indexbyte_native.go | 4 +- + 7 files changed, 204 insertions(+), 8 deletions(-) + create mode 100644 src/internal/bytealg/compare_loong64.s + create mode 100644 src/internal/bytealg/equal_loong64.s + create mode 100644 src/internal/bytealg/indexbyte_loong64.s + +diff --git a/src/internal/bytealg/compare_generic.go b/src/internal/bytealg/compare_generic.go +index 0690d0c..70428ba 100644 +--- a/src/internal/bytealg/compare_generic.go ++++ b/src/internal/bytealg/compare_generic.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build !386 && !amd64 && !s390x && !arm && !arm64 && !ppc64 && !ppc64le && !mips && !mipsle && !wasm && !mips64 && !mips64le +-// +build !386,!amd64,!s390x,!arm,!arm64,!ppc64,!ppc64le,!mips,!mipsle,!wasm,!mips64,!mips64le ++//go:build !386 && !amd64 && !s390x && !arm && !arm64 && !ppc64 && !ppc64le && !mips && !mipsle && !wasm && !mips64 && !mips64le && !loong64 ++// +build !386,!amd64,!s390x,!arm,!arm64,!ppc64,!ppc64le,!mips,!mipsle,!wasm,!mips64,!mips64le,!loong64 + + package bytealg + +diff --git a/src/internal/bytealg/compare_loong64.s b/src/internal/bytealg/compare_loong64.s +new file mode 100644 +index 0000000..853bab3 +--- /dev/null ++++ b/src/internal/bytealg/compare_loong64.s +@@ -0,0 +1,88 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build loong64 ++ ++#include "go_asm.h" ++#include "textflag.h" ++ ++TEXT ·Compare(SB),NOSPLIT,$0-56 ++ MOVV a_base+0(FP), R6 ++ MOVV b_base+24(FP), R7 ++ MOVV a_len+8(FP), R4 ++ MOVV b_len+32(FP), R5 ++ MOVV $ret+48(FP), R13 ++ JMP cmpbody<>(SB) ++ ++TEXT runtime·cmpstring(SB),NOSPLIT,$0-40 ++ MOVV a_base+0(FP), R6 ++ MOVV b_base+16(FP), R7 ++ MOVV a_len+8(FP), R4 ++ MOVV b_len+24(FP), R5 ++ MOVV $ret+32(FP), R13 ++ JMP cmpbody<>(SB) ++ ++// On entry: ++// R4 length of a ++// R5 length of b ++// R6 points to the start of a ++// R7 points to the start of b ++// R13 points to the return value (-1/0/1) ++TEXT cmpbody<>(SB),NOSPLIT|NOFRAME,$0 ++ BEQ R6, R7, samebytes // same start of a and b ++ ++ SGTU R4, R5, R9 ++ BNE R0, R9, r2_lt_r1 ++ MOVV R4, R14 ++ JMP entry ++r2_lt_r1: ++ MOVV R5, R14 // R14 is min(R4, R5) ++entry: ++ ADDV R6, R14, R12 // R6 start of a, R14 end of a ++ BEQ R6, R12, samebytes // length is 0 ++ ++ SRLV $4, R14 // R14 is number of chunks ++ BEQ R0, R14, byte_loop ++ ++ // make sure both a and b are aligned. ++ OR R6, R7, R15 ++ AND $7, R15 ++ BNE R0, R15, byte_loop ++ ++chunk16_loop: ++ BEQ R0, R14, byte_loop ++ MOVV (R6), R8 ++ MOVV (R7), R9 ++ BNE R8, R9, byte_loop ++ MOVV 8(R6), R16 ++ MOVV 8(R7), R17 ++ ADDV $16, R6 ++ ADDV $16, R7 ++ SUBVU $1, R14 ++ BEQ R16, R17, chunk16_loop ++ SUBV $8, R6 ++ SUBV $8, R7 ++ ++byte_loop: ++ BEQ R6, R12, samebytes ++ MOVBU (R6), R8 ++ ADDVU $1, R6 ++ MOVBU (R7), R9 ++ ADDVU $1, R7 ++ BEQ R8, R9, byte_loop ++ ++byte_cmp: ++ SGTU R8, R9, R12 // R12 = 1 if (R8 > R9) ++ BNE R0, R12, ret ++ MOVV $-1, R12 ++ JMP ret ++ ++samebytes: ++ SGTU R4, R5, R8 ++ SGTU R5, R4, R9 ++ SUBV R9, R8, R12 ++ ++ret: ++ MOVV R12, (R13) ++ RET +diff --git a/src/internal/bytealg/compare_native.go b/src/internal/bytealg/compare_native.go +index baa188f..c473cca 100644 +--- a/src/internal/bytealg/compare_native.go ++++ b/src/internal/bytealg/compare_native.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build 386 || amd64 || s390x || arm || arm64 || ppc64 || ppc64le || mips || mipsle || wasm || mips64 || mips64le +-// +build 386 amd64 s390x arm arm64 ppc64 ppc64le mips mipsle wasm mips64 mips64le ++//go:build 386 || amd64 || s390x || arm || arm64 || ppc64 || ppc64le || mips || mipsle || wasm || mips64 || mips64le || loong64 ++// +build 386 amd64 s390x arm arm64 ppc64 ppc64le mips mipsle wasm mips64 mips64le loong64 + + package bytealg + +diff --git a/src/internal/bytealg/equal_loong64.s b/src/internal/bytealg/equal_loong64.s +new file mode 100644 +index 0000000..a954407 +--- /dev/null ++++ b/src/internal/bytealg/equal_loong64.s +@@ -0,0 +1,54 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build loong64 ++ ++#include "go_asm.h" ++#include "textflag.h" ++ ++#define REGCTXT R29 ++ ++// memequal(a, b unsafe.Pointer, size uintptr) bool ++TEXT runtime·memequal(SB),NOSPLIT|NOFRAME,$0-25 ++ MOVV a+0(FP), R4 ++ MOVV b+8(FP), R5 ++ BEQ R4, R5, eq ++ MOVV size+16(FP), R6 ++ ADDV R4, R6, R7 ++loop: ++ BNE R4, R7, test ++ MOVV $1, R4 ++ MOVB R4, ret+24(FP) ++ RET ++test: ++ MOVBU (R4), R9 ++ ADDV $1, R4 ++ MOVBU (R5), R10 ++ ADDV $1, R5 ++ BEQ R9, R10, loop ++ ++ MOVB R0, ret+24(FP) ++ RET ++eq: ++ MOVV $1, R4 ++ MOVB R4, ret+24(FP) ++ RET ++ ++// memequal_varlen(a, b unsafe.Pointer) bool ++TEXT runtime·memequal_varlen(SB),NOSPLIT,$40-17 ++ MOVV a+0(FP), R4 ++ MOVV b+8(FP), R5 ++ BEQ R4, R5, eq ++ MOVV 8(REGCTXT), R6 // compiler stores size at offset 8 in the closure ++ MOVV R4, 8(R3) ++ MOVV R5, 16(R3) ++ MOVV R6, 24(R3) ++ JAL runtime·memequal(SB) ++ MOVBU 32(R3), R4 ++ MOVB R4, ret+16(FP) ++ RET ++eq: ++ MOVV $1, R4 ++ MOVB R4, ret+16(FP) ++ RET +diff --git a/src/internal/bytealg/indexbyte_generic.go b/src/internal/bytealg/indexbyte_generic.go +index 6ef639f..95123fd 100644 +--- a/src/internal/bytealg/indexbyte_generic.go ++++ b/src/internal/bytealg/indexbyte_generic.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build !386 && !amd64 && !s390x && !arm && !arm64 && !ppc64 && !ppc64le && !mips && !mipsle && !mips64 && !mips64le && !riscv64 && !wasm +-// +build !386,!amd64,!s390x,!arm,!arm64,!ppc64,!ppc64le,!mips,!mipsle,!mips64,!mips64le,!riscv64,!wasm ++//go:build !386 && !amd64 && !s390x && !arm && !arm64 && !ppc64 && !ppc64le && !mips && !mipsle && !mips64 && !mips64le && !riscv64 && !wasm && !loong64 ++// +build !386,!amd64,!s390x,!arm,!arm64,!ppc64,!ppc64le,!mips,!mipsle,!mips64,!mips64le,!riscv64,!wasm,!loong64 + + package bytealg + +diff --git a/src/internal/bytealg/indexbyte_loong64.s b/src/internal/bytealg/indexbyte_loong64.s +new file mode 100644 +index 0000000..a585cdb +--- /dev/null ++++ b/src/internal/bytealg/indexbyte_loong64.s +@@ -0,0 +1,54 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build loong64 ++ ++#include "go_asm.h" ++#include "textflag.h" ++ ++TEXT ·IndexByte(SB),NOSPLIT,$0-40 ++ MOVV b_base+0(FP), R4 ++ MOVV b_len+8(FP), R5 ++ MOVBU c+24(FP), R6 // byte to find ++ MOVV R4, R7 // store base for later ++ ADDV R4, R5 // end ++ ADDV $-1, R4 ++ ++loop: ++ ADDV $1, R4 ++ BEQ R4, R5, notfound ++ MOVBU (R4), R8 ++ BNE R6, R8, loop ++ ++ SUBV R7, R4 // remove base ++ MOVV R4, ret+32(FP) ++ RET ++ ++notfound: ++ MOVV $-1, R4 ++ MOVV R4, ret+32(FP) ++ RET ++ ++TEXT ·IndexByteString(SB),NOSPLIT,$0-32 ++ MOVV s_base+0(FP), R4 ++ MOVV s_len+8(FP), R5 ++ MOVBU c+16(FP), R6 // byte to find ++ MOVV R4, R7 // store base for later ++ ADDV R4, R5 // end ++ ADDV $-1, R4 ++ ++loop: ++ ADDV $1, R4 ++ BEQ R4, R5, notfound ++ MOVBU (R4), R8 ++ BNE R6, R8, loop ++ ++ SUBV R7, R4 // remove base ++ MOVV R4, ret+24(FP) ++ RET ++ ++notfound: ++ MOVV $-1, R4 ++ MOVV R4, ret+24(FP) ++ RET +diff --git a/src/internal/bytealg/indexbyte_native.go b/src/internal/bytealg/indexbyte_native.go +index 965f38f..6775b3e 100644 +--- a/src/internal/bytealg/indexbyte_native.go ++++ b/src/internal/bytealg/indexbyte_native.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build 386 || amd64 || s390x || arm || arm64 || ppc64 || ppc64le || mips || mipsle || mips64 || mips64le || riscv64 || wasm +-// +build 386 amd64 s390x arm arm64 ppc64 ppc64le mips mipsle mips64 mips64le riscv64 wasm ++//go:build 386 || amd64 || s390x || arm || arm64 || ppc64 || ppc64le || mips || mipsle || mips64 || mips64le || riscv64 || wasm || loong64 ++// +build 386 amd64 s390x arm arm64 ppc64 ppc64le mips mipsle mips64 mips64le riscv64 wasm loong64 + + package bytealg + +-- +2.27.0 + diff --git a/0035-debug-go-math-os-reflect-vendor-support-standard-lib.patch b/0035-debug-go-math-os-reflect-vendor-support-standard-lib.patch new file mode 100644 index 0000000..bd963f7 --- /dev/null +++ b/0035-debug-go-math-os-reflect-vendor-support-standard-lib.patch @@ -0,0 +1,390 @@ +From 466de79686d6ca9141595b0c2e21cf121f15ecb7 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 17:11:14 +0800 +Subject: [PATCH 35/56] debug, go, math, os, reflect, vendor: support standard + library for loong64 + +Change-Id: I2ad9ed01fc913b90e75023ac0fa70de87a9f5de1 +--- + src/debug/elf/elf.go | 114 ++++++++++++++++++ + src/debug/elf/file.go | 50 ++++++++ + src/go/types/gccgosizes.go | 1 + + src/go/types/sizes.go | 3 +- + src/math/big/arith_loong64.s | 37 ++++++ + src/os/endian_little.go | 4 +- + src/reflect/asm_loong64.s | 42 +++++++ + .../golang.org/x/sys/cpu/cpu_loong64.go | 14 +++ + 8 files changed, 262 insertions(+), 3 deletions(-) + create mode 100644 src/math/big/arith_loong64.s + create mode 100644 src/reflect/asm_loong64.s + create mode 100644 src/vendor/golang.org/x/sys/cpu/cpu_loong64.go + +diff --git a/src/debug/elf/elf.go b/src/debug/elf/elf.go +index 9f8399d..972a75e 100644 +--- a/src/debug/elf/elf.go ++++ b/src/debug/elf/elf.go +@@ -384,6 +384,7 @@ const ( + EM_RISCV Machine = 243 /* RISC-V */ + EM_LANAI Machine = 244 /* Lanai 32-bit processor */ + EM_BPF Machine = 247 /* Linux BPF – in-kernel virtual machine */ ++ EM_LOONGARCH Machine = 258 /* LoongArch */ + + /* Non-standard or deprecated. */ + EM_486 Machine = 6 /* Intel i486. */ +@@ -575,6 +576,7 @@ var machineStrings = []intName{ + {243, "EM_RISCV"}, + {244, "EM_LANAI"}, + {247, "EM_BPF"}, ++ {258, "EM_LOONGARCH"}, + + /* Non-standard or deprecated. */ + {6, "EM_486"}, +@@ -2150,6 +2152,118 @@ var rmipsStrings = []intName{ + func (i R_MIPS) String() string { return stringName(uint32(i), rmipsStrings, false) } + func (i R_MIPS) GoString() string { return stringName(uint32(i), rmipsStrings, true) } + ++// Relocation types for LARCH. ++type R_LARCH int ++ ++const ( ++ R_LARCH_NONE R_LARCH = 0 ++ R_LARCH_32 R_LARCH = 1 ++ R_LARCH_64 R_LARCH = 2 ++ R_LARCH_RELATIVE R_LARCH = 3 ++ R_LARCH_COPY R_LARCH = 4 ++ R_LARCH_JUMP_SLOT R_LARCH = 5 ++ R_LARCH_TLS_DTPMOD32 R_LARCH = 6 ++ R_LARCH_TLS_DTPMOD64 R_LARCH = 7 ++ R_LARCH_TLS_DTPREL32 R_LARCH = 8 ++ R_LARCH_TLS_DTPREL64 R_LARCH = 9 ++ R_LARCH_TLS_TPREL32 R_LARCH = 10 ++ R_LARCH_TLS_TPREL64 R_LARCH = 11 ++ R_LARCH_IRELATIVE R_LARCH = 12 ++ R_LARCH_MARK_LA R_LARCH = 20 ++ R_LARCH_MARK_PCREL R_LARCH = 21 ++ R_LARCH_SOP_PUSH_PCREL R_LARCH = 22 ++ R_LARCH_SOP_PUSH_ABSOLUTE R_LARCH = 23 ++ R_LARCH_SOP_PUSH_DUP R_LARCH = 24 ++ R_LARCH_SOP_PUSH_GPREL R_LARCH = 25 ++ R_LARCH_SOP_PUSH_TLS_TPREL R_LARCH = 26 ++ R_LARCH_SOP_PUSH_TLS_GOT R_LARCH = 27 ++ R_LARCH_SOP_PUSH_TLS_GD R_LARCH = 28 ++ R_LARCH_SOP_PUSH_PLT_PCREL R_LARCH = 29 ++ R_LARCH_SOP_ASSERT R_LARCH = 30 ++ R_LARCH_SOP_NOT R_LARCH = 31 ++ R_LARCH_SOP_SUB R_LARCH = 32 ++ R_LARCH_SOP_SL R_LARCH = 33 ++ R_LARCH_SOP_SR R_LARCH = 34 ++ R_LARCH_SOP_ADD R_LARCH = 35 ++ R_LARCH_SOP_AND R_LARCH = 36 ++ R_LARCH_SOP_IF_ELSE R_LARCH = 37 ++ R_LARCH_SOP_POP_32_S_10_5 R_LARCH = 38 ++ R_LARCH_SOP_POP_32_U_10_12 R_LARCH = 39 ++ R_LARCH_SOP_POP_32_S_10_12 R_LARCH = 40 ++ R_LARCH_SOP_POP_32_S_10_16 R_LARCH = 41 ++ R_LARCH_SOP_POP_32_S_10_16_S2 R_LARCH = 42 ++ R_LARCH_SOP_POP_32_S_5_20 R_LARCH = 43 ++ R_LARCH_SOP_POP_32_S_0_5_10_16_S2 R_LARCH = 44 ++ R_LARCH_SOP_POP_32_S_0_10_10_16_S2 R_LARCH = 45 ++ R_LARCH_SOP_POP_32_U R_LARCH = 46 ++ R_LARCH_ADD8 R_LARCH = 47 ++ R_LARCH_ADD16 R_LARCH = 48 ++ R_LARCH_ADD24 R_LARCH = 49 ++ R_LARCH_ADD32 R_LARCH = 50 ++ R_LARCH_ADD64 R_LARCH = 51 ++ R_LARCH_SUB8 R_LARCH = 52 ++ R_LARCH_SUB16 R_LARCH = 53 ++ R_LARCH_SUB24 R_LARCH = 54 ++ R_LARCH_SUB32 R_LARCH = 55 ++ R_LARCH_SUB64 R_LARCH = 56 ++) ++ ++var rlarchStrings = []intName{ ++ {0, "R_LARCH_NONE"}, ++ {1, "R_LARCH_32"}, ++ {2, "R_LARCH_64"}, ++ {3, "R_LARCH_RELATIVE"}, ++ {4, "R_LARCH_COPY"}, ++ {5, "R_LARCH_JUMP_SLOT"}, ++ {6, "R_LARCH_TLS_DTPMOD32"}, ++ {7, "R_LARCH_TLS_DTPMOD64"}, ++ {8, "R_LARCH_TLS_DTPREL32"}, ++ {9, "R_LARCH_TLS_DTPREL64"}, ++ {10, "R_LARCH_TLS_TPREL32"}, ++ {11, "R_LARCH_TLS_TPREL64"}, ++ {12, "R_LARCH_IRELATIVE"}, ++ {20, "R_LARCH_MARK_LA"}, ++ {21, "R_LARCH_MARK_PCREL"}, ++ {22, "R_LARCH_SOP_PUSH_PCREL"}, ++ {23, "R_LARCH_SOP_PUSH_ABSOLUTE"}, ++ {24, "R_LARCH_SOP_PUSH_DUP"}, ++ {25, "R_LARCH_SOP_PUSH_GPREL"}, ++ {26, "R_LARCH_SOP_PUSH_TLS_TPREL"}, ++ {27, "R_LARCH_SOP_PUSH_TLS_GOT"}, ++ {28, "R_LARCH_SOP_PUSH_TLS_GD"}, ++ {29, "R_LARCH_SOP_PUSH_PLT_PCREL"}, ++ {30, "R_LARCH_SOP_ASSERT"}, ++ {31, "R_LARCH_SOP_NOT"}, ++ {32, "R_LARCH_SOP_SUB"}, ++ {33, "R_LARCH_SOP_SL"}, ++ {34, "R_LARCH_SOP_SR"}, ++ {35, "R_LARCH_SOP_ADD"}, ++ {36, "R_LARCH_SOP_AND"}, ++ {37, "R_LARCH_SOP_IF_ELSE"}, ++ {38, "R_LARCH_SOP_POP_32_S_10_5"}, ++ {39, "R_LARCH_SOP_POP_32_U_10_12"}, ++ {40, "R_LARCH_SOP_POP_32_S_10_12"}, ++ {41, "R_LARCH_SOP_POP_32_S_10_16"}, ++ {42, "R_LARCH_SOP_POP_32_S_10_16_S2"}, ++ {43, "R_LARCH_SOP_POP_32_S_5_20"}, ++ {44, "R_LARCH_SOP_POP_32_S_0_5_10_16_S2"}, ++ {45, "R_LARCH_SOP_POP_32_S_0_10_10_16_S2"}, ++ {46, "R_LARCH_SOP_POP_32_U"}, ++ {47, "R_LARCH_ADD8"}, ++ {48, "R_LARCH_ADD16"}, ++ {49, "R_LARCH_ADD24"}, ++ {50, "R_LARCH_ADD32"}, ++ {51, "R_LARCH_ADD64"}, ++ {52, "R_LARCH_SUB8"}, ++ {53, "R_LARCH_SUB16"}, ++ {54, "R_LARCH_SUB24"}, ++ {55, "R_LARCH_SUB32"}, ++ {56, "R_LARCH_SUB64"}, ++} ++ ++func (i R_LARCH) String() string { return stringName(uint32(i), rlarchStrings, false) } ++func (i R_LARCH) GoString() string { return stringName(uint32(i), rlarchStrings, true) } ++ + // Relocation types for PowerPC. + // + // Values that are shared by both R_PPC and R_PPC64 are prefixed with +diff --git a/src/debug/elf/file.go b/src/debug/elf/file.go +index b25d820..37739fb 100644 +--- a/src/debug/elf/file.go ++++ b/src/debug/elf/file.go +@@ -617,6 +617,8 @@ func (f *File) applyRelocations(dst []byte, rels []byte) error { + return f.applyRelocationsMIPS(dst, rels) + case f.Class == ELFCLASS64 && f.Machine == EM_MIPS: + return f.applyRelocationsMIPS64(dst, rels) ++ case f.Class == ELFCLASS64 && f.Machine == EM_LOONGARCH: ++ return f.applyRelocationsLOONG64(dst, rels) + case f.Class == ELFCLASS64 && f.Machine == EM_RISCV: + return f.applyRelocationsRISCV64(dst, rels) + case f.Class == ELFCLASS64 && f.Machine == EM_S390: +@@ -990,6 +992,54 @@ func (f *File) applyRelocationsMIPS64(dst []byte, rels []byte) error { + return nil + } + ++func (f *File) applyRelocationsLOONG64(dst []byte, rels []byte) error { ++ // 24 is the size of Rela64. ++ if len(rels)%24 != 0 { ++ return errors.New("length of relocation section is not a multiple of 24") ++ } ++ ++ symbols, _, err := f.getSymbols(SHT_SYMTAB) ++ if err != nil { ++ return err ++ } ++ ++ b := bytes.NewReader(rels) ++ var rela Rela64 ++ ++ for b.Len() > 0 { ++ binary.Read(b, f.ByteOrder, &rela) ++ var symNo uint64 ++ var t R_LARCH ++ symNo = rela.Info >> 32 ++ t = R_LARCH(rela.Info & 0xffff) ++ ++ if symNo == 0 || symNo > uint64(len(symbols)) { ++ continue ++ } ++ sym := &symbols[symNo-1] ++ if !canApplyRelocation(sym) { ++ continue ++ } ++ ++ switch t { ++ case R_LARCH_64: ++ if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { ++ continue ++ } ++ val64 := sym.Value + uint64(rela.Addend) ++ f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) ++ case R_LARCH_32: ++ if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { ++ continue ++ } ++ val32 := uint32(sym.Value) + uint32(rela.Addend) ++ f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) ++ } ++ } ++ ++ return nil ++} ++ + func (f *File) applyRelocationsRISCV64(dst []byte, rels []byte) error { + // 24 is the size of Rela64. + if len(rels)%24 != 0 { +diff --git a/src/go/types/gccgosizes.go b/src/go/types/gccgosizes.go +index d5c92c6..9d077cc 100644 +--- a/src/go/types/gccgosizes.go ++++ b/src/go/types/gccgosizes.go +@@ -17,6 +17,7 @@ var gccgoArchSizes = map[string]*StdSizes{ + "arm64": {8, 8}, + "arm64be": {8, 8}, + "ia64": {8, 8}, ++ "loong64": {8, 8}, + "m68k": {4, 2}, + "mips": {4, 8}, + "mipsle": {4, 8}, +diff --git a/src/go/types/sizes.go b/src/go/types/sizes.go +index 67052bb..9d6cabf 100644 +--- a/src/go/types/sizes.go ++++ b/src/go/types/sizes.go +@@ -163,6 +163,7 @@ var gcArchSizes = map[string]*StdSizes{ + "arm64": {8, 8}, + "amd64": {8, 8}, + "amd64p32": {4, 8}, ++ "loong64": {8, 8}, + "mips": {4, 4}, + "mipsle": {4, 4}, + "mips64": {8, 8}, +@@ -181,7 +182,7 @@ var gcArchSizes = map[string]*StdSizes{ + // The result is nil if a compiler/architecture pair is not known. + // + // Supported architectures for compiler "gc": +-// "386", "arm", "arm64", "amd64", "amd64p32", "mips", "mipsle", ++// "386", "arm", "arm64", "amd64", "amd64p32", "loong64", "mips", "mipsle", + // "mips64", "mips64le", "ppc64", "ppc64le", "riscv64", "s390x", "sparc64", "wasm". + func SizesFor(compiler, arch string) Sizes { + var m map[string]*StdSizes +diff --git a/src/math/big/arith_loong64.s b/src/math/big/arith_loong64.s +new file mode 100644 +index 0000000..fdc31bf +--- /dev/null ++++ b/src/math/big/arith_loong64.s +@@ -0,0 +1,37 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build !math_big_pure_go,loong64 ++ ++#include "textflag.h" ++ ++// This file provides fast assembly versions for the elementary ++// arithmetic operations on vectors implemented in arith.go. ++ ++TEXT ·mulWW(SB),NOSPLIT,$0 ++ JMP ·mulWW_g(SB) ++ ++TEXT ·addVV(SB),NOSPLIT,$0 ++ JMP ·addVV_g(SB) ++ ++TEXT ·subVV(SB),NOSPLIT,$0 ++ JMP ·subVV_g(SB) ++ ++TEXT ·addVW(SB),NOSPLIT,$0 ++ JMP ·addVW_g(SB) ++ ++TEXT ·subVW(SB),NOSPLIT,$0 ++ JMP ·subVW_g(SB) ++ ++TEXT ·shlVU(SB),NOSPLIT,$0 ++ JMP ·shlVU_g(SB) ++ ++TEXT ·shrVU(SB),NOSPLIT,$0 ++ JMP ·shrVU_g(SB) ++ ++TEXT ·mulAddVWW(SB),NOSPLIT,$0 ++ JMP ·mulAddVWW_g(SB) ++ ++TEXT ·addMulVVW(SB),NOSPLIT,$0 ++ JMP ·addMulVVW_g(SB) +diff --git a/src/os/endian_little.go b/src/os/endian_little.go +index 6be6020..e38a42e 100644 +--- a/src/os/endian_little.go ++++ b/src/os/endian_little.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + // +-//go:build 386 || amd64 || arm || arm64 || ppc64le || mips64le || mipsle || riscv64 || wasm +-// +build 386 amd64 arm arm64 ppc64le mips64le mipsle riscv64 wasm ++//go:build 386 || amd64 || arm || arm64 || ppc64le || mips64le || mipsle || riscv64 || wasm || loong64 ++// +build 386 amd64 arm arm64 ppc64le mips64le mipsle riscv64 wasm loong64 + + package os + +diff --git a/src/reflect/asm_loong64.s b/src/reflect/asm_loong64.s +new file mode 100644 +index 0000000..e2bb861 +--- /dev/null ++++ b/src/reflect/asm_loong64.s +@@ -0,0 +1,42 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build loong64 ++ ++#include "textflag.h" ++#include "funcdata.h" ++ ++#define REGCTXT R29 ++ ++// makeFuncStub is the code half of the function returned by MakeFunc. ++// See the comment on the declaration of makeFuncStub in makefunc.go ++// for more details. ++// No arg size here, runtime pulls arg map out of the func value. ++TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$40 ++ NO_LOCAL_POINTERS ++ MOVV REGCTXT, 8(R3) ++ MOVV $argframe+0(FP), R19 ++ MOVV R19, 16(R3) ++ MOVB R0, 40(R3) ++ ADDV $40, R3, R19 ++ MOVV R19, 24(R3) ++ MOVV R0, 32(R3) ++ JAL ·callReflect(SB) ++ RET ++ ++// methodValueCall is the code half of the function returned by makeMethodValue. ++// See the comment on the declaration of methodValueCall in makefunc.go ++// for more details. ++// No arg size here; runtime pulls arg map out of the func value. ++TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$40 ++ NO_LOCAL_POINTERS ++ MOVV REGCTXT, 8(R3) ++ MOVV $argframe+0(FP), R19 ++ MOVV R19, 16(R3) ++ MOVB R0, 40(R3) ++ ADDV $40, R3, R19 ++ MOVV R19, 24(R3) ++ MOVV R0, 32(R3) ++ JAL ·callMethod(SB) ++ RET +diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/src/vendor/golang.org/x/sys/cpu/cpu_loong64.go +new file mode 100644 +index 0000000..e39ecae +--- /dev/null ++++ b/src/vendor/golang.org/x/sys/cpu/cpu_loong64.go +@@ -0,0 +1,14 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++//go:build loong64 ++// +build loong64 ++ ++package cpu ++ ++const cacheLineSize = 64 ++ ++func initOptions() { ++ options = []option{} ++} +-- +2.27.0 + diff --git a/0036-syscall-add-syscall-support-for-linux-loong64.patch b/0036-syscall-add-syscall-support-for-linux-loong64.patch new file mode 100644 index 0000000..8a411e5 --- /dev/null +++ b/0036-syscall-add-syscall-support-for-linux-loong64.patch @@ -0,0 +1,5006 @@ +From 238e09620ee6639f4d44e8222be6da94b7f5fa49 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 16:45:11 +0800 +Subject: [PATCH 36/56] syscall: add syscall support for linux/loong64 + +Change-Id: Ia676bd2875701639314cadbd39d97620afb3f0a2 +--- + .../internal/syscall/asm_linux_loong64.s | 29 + + src/syscall/asm_linux_loong64.s | 147 ++ + src/syscall/endian_little.go | 4 +- + src/syscall/mkall.sh | 10 + + src/syscall/syscall_dup3_linux.go | 4 +- + src/syscall/syscall_linux_loong64.go | 213 ++ + src/syscall/types_linux.go | 4 +- + src/syscall/zerrors_linux_loong64.go | 2001 +++++++++++++++++ + src/syscall/zsyscall_linux_loong64.go | 1564 +++++++++++++ + src/syscall/zsysnum_linux_loong64.go | 308 +++ + src/syscall/ztypes_linux_loong64.go | 599 +++++ + 11 files changed, 4877 insertions(+), 6 deletions(-) + create mode 100644 src/runtime/internal/syscall/asm_linux_loong64.s + create mode 100644 src/syscall/asm_linux_loong64.s + create mode 100644 src/syscall/syscall_linux_loong64.go + create mode 100644 src/syscall/zerrors_linux_loong64.go + create mode 100644 src/syscall/zsyscall_linux_loong64.go + create mode 100644 src/syscall/zsysnum_linux_loong64.go + create mode 100644 src/syscall/ztypes_linux_loong64.go + +diff --git a/src/runtime/internal/syscall/asm_linux_loong64.s b/src/runtime/internal/syscall/asm_linux_loong64.s +new file mode 100644 +index 0000000..39bf5b1 +--- /dev/null ++++ b/src/runtime/internal/syscall/asm_linux_loong64.s +@@ -0,0 +1,29 @@ ++// Copyright 2022 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++#include "textflag.h" ++ ++// func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr) ++TEXT ·Syscall6(SB),NOSPLIT,$0-80 ++ MOVV num+0(FP), R11 // syscall entry ++ MOVV a1+8(FP), R4 ++ MOVV a2+16(FP), R5 ++ MOVV a3+24(FP), R6 ++ MOVV a4+32(FP), R7 ++ MOVV a5+40(FP), R8 ++ MOVV a6+48(FP), R9 ++ SYSCALL ++ MOVW $-4096, R12 ++ BGEU R12, R4, ok ++ MOVV $-1, R12 ++ MOVV R12, r1+56(FP) // r1 ++ MOVV R0, r2+64(FP) // r2 ++ SUBVU R4, R0, R4 ++ MOVV R4, err+72(FP) // errno ++ RET ++ok: ++ MOVV R4, r1+56(FP) // r1 ++ MOVV R5, r2+64(FP) // r2 ++ MOVV R0, err+72(FP) // errno ++ RET +diff --git a/src/syscall/asm_linux_loong64.s b/src/syscall/asm_linux_loong64.s +new file mode 100644 +index 0000000..09f3f97 +--- /dev/null ++++ b/src/syscall/asm_linux_loong64.s +@@ -0,0 +1,147 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build linux ++// +build loong64 ++ ++#include "textflag.h" ++ ++// ++// System calls for loong64, Linux ++// ++ ++// func Syscall(trap int64, a1, a2, a3 int64) (r1, r2, err int64); ++// Trap # in R11, args in R4-R9, return in R4 ++TEXT ·Syscall(SB),NOSPLIT,$0-56 ++ JAL runtime·entersyscall(SB) ++ MOVV a1+8(FP), R4 ++ MOVV a2+16(FP), R5 ++ MOVV a3+24(FP), R6 ++ MOVV R0, R7 ++ MOVV R0, R8 ++ MOVV R0, R9 ++ MOVV trap+0(FP), R11 // syscall entry ++ SYSCALL ++ MOVW $-4096, R12 ++ BGEU R12, R4, ok ++ MOVV $-1, R12 ++ MOVV R12, r1+32(FP) // r1 ++ MOVV R0, r2+40(FP) // r2 ++ SUBVU R4, R0, R4 ++ MOVV R4, err+48(FP) // errno ++ JAL runtime·exitsyscall(SB) ++ RET ++ok: ++ MOVV R4, r1+32(FP) // r1 ++ MOVV R5, r2+40(FP) // r2 ++ MOVV R0, err+48(FP) // errno ++ JAL runtime·exitsyscall(SB) ++ RET ++ ++TEXT ·Syscall6(SB),NOSPLIT,$0-80 ++ JAL runtime·entersyscall(SB) ++ MOVV a1+8(FP), R4 ++ MOVV a2+16(FP), R5 ++ MOVV a3+24(FP), R6 ++ MOVV a4+32(FP), R7 ++ MOVV a5+40(FP), R8 ++ MOVV a6+48(FP), R9 ++ MOVV trap+0(FP), R11 // syscall entry ++ SYSCALL ++ MOVW $-4096, R12 ++ BGEU R12, R4, ok ++ MOVV $-1, R12 ++ MOVV R12, r1+56(FP) // r1 ++ MOVV R0, r2+64(FP) // r2 ++ SUBVU R4, R0, R4 ++ MOVV R4, err+72(FP) // errno ++ JAL runtime·exitsyscall(SB) ++ RET ++ok: ++ MOVV R4, r1+56(FP) // r1 ++ MOVV R5, r2+64(FP) // r2 ++ MOVV R0, err+72(FP) // errno ++ JAL runtime·exitsyscall(SB) ++ RET ++ ++TEXT ·RawSyscall(SB),NOSPLIT,$0-56 ++ MOVV a1+8(FP), R4 ++ MOVV a2+16(FP), R5 ++ MOVV a3+24(FP), R6 ++ MOVV R0, R7 ++ MOVV R0, R8 ++ MOVV R0, R9 ++ MOVV trap+0(FP), R11 // syscall entry ++ SYSCALL ++ MOVW $-4096, R12 ++ BGEU R12, R4, ok ++ MOVV $-1, R12 ++ MOVV R12, r1+32(FP) // r1 ++ MOVV R0, r2+40(FP) // r2 ++ SUBVU R4, R0, R4 ++ MOVV R4, err+48(FP) // errno ++ RET ++ok: ++ MOVV R4, r1+32(FP) // r1 ++ MOVV R5, r2+40(FP) // r2 ++ MOVV R0, err+48(FP) // errno ++ RET ++ ++TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 ++ MOVV a1+8(FP), R4 ++ MOVV a2+16(FP), R5 ++ MOVV a3+24(FP), R6 ++ MOVV a4+32(FP), R7 ++ MOVV a5+40(FP), R8 ++ MOVV a6+48(FP), R9 ++ MOVV trap+0(FP), R11 // syscall entry ++ SYSCALL ++ MOVW $-4096, R12 ++ BGEU R12, R4, ok ++ MOVV $-1, R12 ++ MOVV R12, r1+56(FP) // r1 ++ MOVV R0, r2+64(FP) // r2 ++ SUBVU R4, R0, R4 ++ MOVV R4, err+72(FP) // errno ++ RET ++ok: ++ MOVV R4, r1+56(FP) // r1 ++ MOVV R5, r2+64(FP) // r2 ++ MOVV R0, err+72(FP) // errno ++ RET ++ ++// func rawVforkSyscall(trap, a1 uintptr) (r1, err uintptr) ++TEXT ·rawVforkSyscall(SB),NOSPLIT,$0-32 ++ MOVV a1+8(FP), R4 ++ MOVV $0, R5 ++ MOVV $0, R6 ++ MOVV $0, R7 ++ MOVV $0, R8 ++ MOVV $0, R9 ++ MOVV trap+0(FP), R11 // syscall entry ++ SYSCALL ++ MOVW $-4096, R12 ++ BGEU R12, R4, ok ++ MOVV $-1, R12 ++ MOVV R12, r1+16(FP) // r1 ++ SUBVU R4, R0, R4 ++ MOVV R4, err+24(FP) // errno ++ RET ++ok: ++ MOVV R4, r1+16(FP) // r1 ++ MOVV R0, err+24(FP) // errno ++ RET ++ ++TEXT ·rawSyscallNoError(SB),NOSPLIT,$0-48 ++ MOVV a1+8(FP), R4 ++ MOVV a2+16(FP), R5 ++ MOVV a3+24(FP), R6 ++ MOVV R0, R7 ++ MOVV R0, R8 ++ MOVV R0, R9 ++ MOVV trap+0(FP), R11 // syscall entry ++ SYSCALL ++ MOVV R4, r1+32(FP) ++ MOVV R5, r2+40(FP) ++ RET +diff --git a/src/syscall/endian_little.go b/src/syscall/endian_little.go +index a894445..c62fe9e 100644 +--- a/src/syscall/endian_little.go ++++ b/src/syscall/endian_little.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + // +-//go:build 386 || amd64 || arm || arm64 || ppc64le || mips64le || mipsle || riscv64 || wasm +-// +build 386 amd64 arm arm64 ppc64le mips64le mipsle riscv64 wasm ++//go:build 386 || amd64 || arm || arm64 || ppc64le || mips64le || mipsle || riscv64 || wasm || loong64 ++// +build 386 amd64 arm arm64 ppc64le mips64le mipsle riscv64 wasm loong64 + + package syscall + +diff --git a/src/syscall/mkall.sh b/src/syscall/mkall.sh +index dffb528..f03c5e4 100755 +--- a/src/syscall/mkall.sh ++++ b/src/syscall/mkall.sh +@@ -258,6 +258,16 @@ linux_s390x) + mksysnum="./mksysnum_linux.pl $unistd_h" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; ++linux_loong64) ++ GOOSARCH_in=syscall_linux_loong64.go ++ unistd_h=$(ls -1 /usr/include/asm/unistd.h /usr/include/asm-generic/unistd.h 2>/dev/null | head -1) ++ if [ "$unistd_h" = "" ]; then ++ echo >&2 cannot find unistd.h ++ exit 1 ++ fi ++ mksysnum="./mksysnum_linux.pl $unistd_h" ++ mktypes="GOARCH=$GOARCH go tool cgo -godefs" ++ ;; + netbsd_386) + mkerrors="$mkerrors -m32" + mksyscall="./mksyscall.pl -l32 -netbsd" +diff --git a/src/syscall/syscall_dup3_linux.go b/src/syscall/syscall_dup3_linux.go +index 66ec67b..e32a90b 100644 +--- a/src/syscall/syscall_dup3_linux.go ++++ b/src/syscall/syscall_dup3_linux.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build android || arm64 || riscv64 +-// +build android arm64 riscv64 ++//go:build android || arm64 || riscv64 || loong64 ++// +build android arm64 riscv64 loong64 + + package syscall + +diff --git a/src/syscall/syscall_linux_loong64.go b/src/syscall/syscall_linux_loong64.go +new file mode 100644 +index 0000000..2bad1fc +--- /dev/null ++++ b/src/syscall/syscall_linux_loong64.go +@@ -0,0 +1,213 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package syscall ++ ++import "unsafe" ++ ++// archHonorsR2 captures the fact that r2 is honored by the ++// runtime.GOARCH. Syscall conventions are generally r1, r2, err := ++// syscall(trap, ...). Not all architectures define r2 in their ++// ABI. See "man syscall". ++const archHonorsR2 = false ++ ++const _SYS_setgroups = SYS_SETGROUPS ++ ++func EpollCreate(size int) (fd int, err error) { ++ if size <= 0 { ++ return -1, EINVAL ++ } ++ return EpollCreate1(0) ++} ++ ++//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT ++//sys Fchown(fd int, uid int, gid int) (err error) ++//sys Fstat(fd int, stat *Stat_t) (err error) ++//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) ++//sys fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) ++//sys Fstatfs(fd int, buf *Statfs_t) (err error) ++//sys Ftruncate(fd int, length int64) (err error) ++//sysnb Getegid() (egid int) ++//sysnb Geteuid() (euid int) ++//sysnb Getgid() (gid int) ++//sysnb Getuid() (uid int) ++//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) ++//sys Listen(s int, n int) (err error) ++//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 ++//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 ++//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) = SYS_RENAMEAT2 ++//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK ++//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) ++//sys Setfsgid(gid int) (err error) ++//sys Setfsuid(uid int) (err error) ++//sys Setrlimit(resource int, rlim *Rlimit) (err error) ++//sys Shutdown(fd int, how int) (err error) ++//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) ++ ++func Stat(path string, stat *Stat_t) (err error) { ++ return Fstatat(_AT_FDCWD, path, stat, 0) ++} ++ ++func Lchown(path string, uid int, gid int) (err error) { ++ return Fchownat(_AT_FDCWD, path, uid, gid, _AT_SYMLINK_NOFOLLOW) ++} ++ ++func Lstat(path string, stat *Stat_t) (err error) { ++ return Fstatat(_AT_FDCWD, path, stat, _AT_SYMLINK_NOFOLLOW) ++} ++ ++//sys Statfs(path string, buf *Statfs_t) (err error) ++//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) ++//sys Truncate(path string, length int64) (err error) ++//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) ++//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) ++//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) ++//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) ++//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) ++//sysnb setgroups(n int, list *_Gid_t) (err error) ++//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) ++//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) ++//sysnb socket(domain int, typ int, proto int) (fd int, err error) ++//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) ++//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) ++//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) ++//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) ++//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) ++//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) ++//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) ++//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) ++ ++type sigset_t struct { ++ X__val [16]uint64 ++} ++ ++//sys pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *sigset_t) (n int, err error) = SYS_PSELECT6 ++ ++func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { ++ var ts *Timespec ++ if timeout != nil { ++ ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} ++ } ++ return pselect(nfd, r, w, e, ts, nil) ++} ++ ++//sysnb Gettimeofday(tv *Timeval) (err error) ++ ++func setTimespec(sec, nsec int64) Timespec { ++ return Timespec{Sec: sec, Nsec: nsec} ++} ++ ++func setTimeval(sec, usec int64) Timeval { ++ return Timeval{Sec: sec, Usec: usec} ++} ++ ++func futimesat(dirfd int, path string, tv *[2]Timeval) (err error) { ++ if tv == nil { ++ return utimensat(dirfd, path, nil, 0) ++ } ++ ++ ts := []Timespec{ ++ NsecToTimespec(TimevalToNsec(tv[0])), ++ NsecToTimespec(TimevalToNsec(tv[1])), ++ } ++ return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) ++} ++ ++func Time(t *Time_t) (Time_t, error) { ++ var tv Timeval ++ err := Gettimeofday(&tv) ++ if err != nil { ++ return 0, err ++ } ++ if t != nil { ++ *t = Time_t(tv.Sec) ++ } ++ return Time_t(tv.Sec), nil ++} ++ ++func Utime(path string, buf *Utimbuf) error { ++ tv := []Timeval{ ++ {Sec: buf.Actime}, ++ {Sec: buf.Modtime}, ++ } ++ return Utimes(path, tv) ++} ++ ++func utimes(path string, tv *[2]Timeval) (err error) { ++ if tv == nil { ++ return utimensat(_AT_FDCWD, path, nil, 0) ++ } ++ ++ ts := []Timespec{ ++ NsecToTimespec(TimevalToNsec(tv[0])), ++ NsecToTimespec(TimevalToNsec(tv[1])), ++ } ++ return utimensat(_AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) ++} ++ ++// Getrlimit prefers the prlimit64 system call. ++func Getrlimit(resource int, rlim *Rlimit) error { ++ return prlimit(0, resource, nil, rlim) ++} ++ ++// Setrlimit prefers the prlimit64 system call. ++func Setrlimit(resource int, rlim *Rlimit) error { ++ return prlimit(0, resource, rlim, nil) ++} ++ ++func (r *PtraceRegs) GetEra() uint64 { return r.Era } ++ ++func (r *PtraceRegs) SetEra(era uint64) { r.Era = era } ++ ++func (iov *Iovec) SetLen(length int) { ++ iov.Len = uint64(length) ++} ++ ++func (msghdr *Msghdr) SetControllen(length int) { ++ msghdr.Controllen = uint64(length) ++} ++ ++func (cmsg *Cmsghdr) SetLen(length int) { ++ cmsg.Len = uint64(length) ++} ++ ++func InotifyInit() (fd int, err error) { ++ return InotifyInit1(0) ++} ++ ++//sys ppoll(fds *pollFd, nfds int, timeout *Timespec, sigmask *sigset_t) (n int, err error) ++ ++func Pause() error { ++ _, err := ppoll(nil, 0, nil, nil) ++ return err ++} ++ ++func rawVforkSyscall(trap, a1 uintptr) (r1 uintptr, err Errno) ++ ++func Pipe(p []int) (err error) { ++ if len(p) != 2 { ++ return EINVAL ++ } ++ var pp [2]_C_int ++ err = pipe2(&pp, 0) ++ p[0] = int(pp[0]) ++ p[1] = int(pp[1]) ++ return ++} ++ ++ ++//sysnb pipe2(p *[2]_C_int, flags int) (err error) ++ ++func Pipe2(p []int, flags int) (err error) { ++ if len(p) != 2 { ++ return EINVAL ++ } ++ ++ var pp [2]_C_int ++ err = pipe2(&pp, flags) ++ p[0] = int(pp[0]) ++ p[1] = int(pp[1]) ++ return ++} ++ +diff --git a/src/syscall/types_linux.go b/src/syscall/types_linux.go +index bf76be9..1ca8912 100644 +--- a/src/syscall/types_linux.go ++++ b/src/syscall/types_linux.go +@@ -88,7 +88,7 @@ struct my_sockaddr_un { + + #ifdef __ARM_EABI__ + typedef struct user_regs PtraceRegs; +-#elif defined(__aarch64__) ++#elif defined(__aarch64__) || defined(__loongarch64) + typedef struct user_pt_regs PtraceRegs; + #elif defined(__powerpc64__) + typedef struct pt_regs PtraceRegs; +@@ -119,7 +119,7 @@ struct my_epoll_event { + int32_t padFd; + #endif + #if defined(__powerpc64__) || defined(__s390x__) || (defined(__riscv_xlen) && __riscv_xlen == 64) \ +- || (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI64) ++ || (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI64) || defined(__loongarch64) + int32_t _padFd; + #endif + int32_t fd; +diff --git a/src/syscall/zerrors_linux_loong64.go b/src/syscall/zerrors_linux_loong64.go +new file mode 100644 +index 0000000..1281e7d +--- /dev/null ++++ b/src/syscall/zerrors_linux_loong64.go +@@ -0,0 +1,2001 @@ ++// mkerrors.sh ++// Code generated by the command above; DO NOT EDIT. ++ ++// Code generated by cmd/cgo -godefs; DO NOT EDIT. ++// cgo -godefs -- _const.go ++ ++package syscall ++ ++const ( ++ AF_ALG = 0x26 ++ AF_APPLETALK = 0x5 ++ AF_ASH = 0x12 ++ AF_ATMPVC = 0x8 ++ AF_ATMSVC = 0x14 ++ AF_AX25 = 0x3 ++ AF_BLUETOOTH = 0x1f ++ AF_BRIDGE = 0x7 ++ AF_CAIF = 0x25 ++ AF_CAN = 0x1d ++ AF_DECnet = 0xc ++ AF_ECONET = 0x13 ++ AF_FILE = 0x1 ++ AF_IB = 0x1b ++ AF_IEEE802154 = 0x24 ++ AF_INET = 0x2 ++ AF_INET6 = 0xa ++ AF_IPX = 0x4 ++ AF_IRDA = 0x17 ++ AF_ISDN = 0x22 ++ AF_IUCV = 0x20 ++ AF_KCM = 0x29 ++ AF_KEY = 0xf ++ AF_LLC = 0x1a ++ AF_LOCAL = 0x1 ++ AF_MAX = 0x2c ++ AF_MPLS = 0x1c ++ AF_NETBEUI = 0xd ++ AF_NETLINK = 0x10 ++ AF_NETROM = 0x6 ++ AF_NFC = 0x27 ++ AF_PACKET = 0x11 ++ AF_PHONET = 0x23 ++ AF_PPPOX = 0x18 ++ AF_QIPCRTR = 0x2a ++ AF_RDS = 0x15 ++ AF_ROSE = 0xb ++ AF_ROUTE = 0x10 ++ AF_RXRPC = 0x21 ++ AF_SECURITY = 0xe ++ AF_SMC = 0x2b ++ AF_SNA = 0x16 ++ AF_TIPC = 0x1e ++ AF_UNIX = 0x1 ++ AF_UNSPEC = 0x0 ++ AF_VSOCK = 0x28 ++ AF_WANPIPE = 0x19 ++ AF_X25 = 0x9 ++ ARPHRD_6LOWPAN = 0x339 ++ ARPHRD_ADAPT = 0x108 ++ ARPHRD_APPLETLK = 0x8 ++ ARPHRD_ARCNET = 0x7 ++ ARPHRD_ASH = 0x30d ++ ARPHRD_ATM = 0x13 ++ ARPHRD_AX25 = 0x3 ++ ARPHRD_BIF = 0x307 ++ ARPHRD_CAIF = 0x336 ++ ARPHRD_CAN = 0x118 ++ ARPHRD_CHAOS = 0x5 ++ ARPHRD_CISCO = 0x201 ++ ARPHRD_CSLIP = 0x101 ++ ARPHRD_CSLIP6 = 0x103 ++ ARPHRD_DDCMP = 0x205 ++ ARPHRD_DLCI = 0xf ++ ARPHRD_ECONET = 0x30e ++ ARPHRD_EETHER = 0x2 ++ ARPHRD_ETHER = 0x1 ++ ARPHRD_EUI64 = 0x1b ++ ARPHRD_FCAL = 0x311 ++ ARPHRD_FCFABRIC = 0x313 ++ ARPHRD_FCPL = 0x312 ++ ARPHRD_FCPP = 0x310 ++ ARPHRD_FDDI = 0x306 ++ ARPHRD_FRAD = 0x302 ++ ARPHRD_HDLC = 0x201 ++ ARPHRD_HIPPI = 0x30c ++ ARPHRD_HWX25 = 0x110 ++ ARPHRD_IEEE1394 = 0x18 ++ ARPHRD_IEEE802 = 0x6 ++ ARPHRD_IEEE80211 = 0x321 ++ ARPHRD_IEEE80211_PRISM = 0x322 ++ ARPHRD_IEEE80211_RADIOTAP = 0x323 ++ ARPHRD_IEEE802154 = 0x324 ++ ARPHRD_IEEE802154_MONITOR = 0x325 ++ ARPHRD_IEEE802_TR = 0x320 ++ ARPHRD_INFINIBAND = 0x20 ++ ARPHRD_IP6GRE = 0x337 ++ ARPHRD_IPDDP = 0x309 ++ ARPHRD_IPGRE = 0x30a ++ ARPHRD_IRDA = 0x30f ++ ARPHRD_LAPB = 0x204 ++ ARPHRD_LOCALTLK = 0x305 ++ ARPHRD_LOOPBACK = 0x304 ++ ARPHRD_METRICOM = 0x17 ++ ARPHRD_NETLINK = 0x338 ++ ARPHRD_NETROM = 0x0 ++ ARPHRD_NONE = 0xfffe ++ ARPHRD_PHONET = 0x334 ++ ARPHRD_PHONET_PIPE = 0x335 ++ ARPHRD_PIMREG = 0x30b ++ ARPHRD_PPP = 0x200 ++ ARPHRD_PRONET = 0x4 ++ ARPHRD_RAWHDLC = 0x206 ++ ARPHRD_RAWIP = 0x207 ++ ARPHRD_ROSE = 0x10e ++ ARPHRD_RSRVD = 0x104 ++ ARPHRD_SIT = 0x308 ++ ARPHRD_SKIP = 0x303 ++ ARPHRD_SLIP = 0x100 ++ ARPHRD_SLIP6 = 0x102 ++ ARPHRD_TUNNEL = 0x300 ++ ARPHRD_TUNNEL6 = 0x301 ++ ARPHRD_VOID = 0xffff ++ ARPHRD_VSOCKMON = 0x33a ++ ARPHRD_X25 = 0x10f ++ B0 = 0x0 ++ B1000000 = 0x1008 ++ B110 = 0x3 ++ B115200 = 0x1002 ++ B1152000 = 0x1009 ++ B1200 = 0x9 ++ B134 = 0x4 ++ B150 = 0x5 ++ B1500000 = 0x100a ++ B1800 = 0xa ++ B19200 = 0xe ++ B200 = 0x6 ++ B2000000 = 0x100b ++ B230400 = 0x1003 ++ B2400 = 0xb ++ B2500000 = 0x100c ++ B300 = 0x7 ++ B3000000 = 0x100d ++ B3500000 = 0x100e ++ B38400 = 0xf ++ B4000000 = 0x100f ++ B460800 = 0x1004 ++ B4800 = 0xc ++ B50 = 0x1 ++ B500000 = 0x1005 ++ B57600 = 0x1001 ++ B576000 = 0x1006 ++ B600 = 0x8 ++ B75 = 0x2 ++ B921600 = 0x1007 ++ B9600 = 0xd ++ BPF_A = 0x10 ++ BPF_ABS = 0x20 ++ BPF_ADD = 0x0 ++ BPF_ALU = 0x4 ++ BPF_AND = 0x50 ++ BPF_B = 0x10 ++ BPF_DIV = 0x30 ++ BPF_H = 0x8 ++ BPF_IMM = 0x0 ++ BPF_IND = 0x40 ++ BPF_JA = 0x0 ++ BPF_JEQ = 0x10 ++ BPF_JGE = 0x30 ++ BPF_JGT = 0x20 ++ BPF_JMP = 0x5 ++ BPF_JSET = 0x40 ++ BPF_K = 0x0 ++ BPF_LD = 0x0 ++ BPF_LDX = 0x1 ++ BPF_LEN = 0x80 ++ BPF_LL_OFF = -0x200000 ++ BPF_LSH = 0x60 ++ BPF_MAJOR_VERSION = 0x1 ++ BPF_MAXINSNS = 0x1000 ++ BPF_MEM = 0x60 ++ BPF_MEMWORDS = 0x10 ++ BPF_MINOR_VERSION = 0x1 ++ BPF_MISC = 0x7 ++ BPF_MOD = 0x90 ++ BPF_MSH = 0xa0 ++ BPF_MUL = 0x20 ++ BPF_NEG = 0x80 ++ BPF_NET_OFF = -0x100000 ++ BPF_OR = 0x40 ++ BPF_RET = 0x6 ++ BPF_RSH = 0x70 ++ BPF_ST = 0x2 ++ BPF_STX = 0x3 ++ BPF_SUB = 0x10 ++ BPF_TAX = 0x0 ++ BPF_TXA = 0x80 ++ BPF_W = 0x0 ++ BPF_X = 0x8 ++ BPF_XOR = 0xa0 ++ BRKINT = 0x2 ++ CFLUSH = 0xf ++ CLOCAL = 0x800 ++ CLONE_CHILD_CLEARTID = 0x200000 ++ CLONE_CHILD_SETTID = 0x1000000 ++ CLONE_DETACHED = 0x400000 ++ CLONE_FILES = 0x400 ++ CLONE_FS = 0x200 ++ CLONE_IO = 0x80000000 ++ CLONE_NEWCGROUP = 0x2000000 ++ CLONE_NEWIPC = 0x8000000 ++ CLONE_NEWNET = 0x40000000 ++ CLONE_NEWNS = 0x20000 ++ CLONE_NEWPID = 0x20000000 ++ CLONE_NEWUSER = 0x10000000 ++ CLONE_NEWUTS = 0x4000000 ++ CLONE_PARENT = 0x8000 ++ CLONE_PARENT_SETTID = 0x100000 ++ CLONE_PTRACE = 0x2000 ++ CLONE_SETTLS = 0x80000 ++ CLONE_SIGHAND = 0x800 ++ CLONE_SYSVSEM = 0x40000 ++ CLONE_THREAD = 0x10000 ++ CLONE_UNTRACED = 0x800000 ++ CLONE_VFORK = 0x4000 ++ CLONE_VM = 0x100 ++ CREAD = 0x80 ++ CS5 = 0x0 ++ CS6 = 0x10 ++ CS7 = 0x20 ++ CS8 = 0x30 ++ CSIGNAL = 0xff ++ CSIZE = 0x30 ++ CSTART = 0x11 ++ CSTATUS = 0x0 ++ CSTOP = 0x13 ++ CSTOPB = 0x40 ++ CSUSP = 0x1a ++ DT_BLK = 0x6 ++ DT_CHR = 0x2 ++ DT_DIR = 0x4 ++ DT_FIFO = 0x1 ++ DT_LNK = 0xa ++ DT_REG = 0x8 ++ DT_SOCK = 0xc ++ DT_UNKNOWN = 0x0 ++ DT_WHT = 0xe ++ ECHO = 0x8 ++ ECHOCTL = 0x200 ++ ECHOE = 0x10 ++ ECHOK = 0x20 ++ ECHOKE = 0x800 ++ ECHONL = 0x40 ++ ECHOPRT = 0x400 ++ ENCODING_DEFAULT = 0x0 ++ ENCODING_FM_MARK = 0x3 ++ ENCODING_FM_SPACE = 0x4 ++ ENCODING_MANCHESTER = 0x5 ++ ENCODING_NRZ = 0x1 ++ ENCODING_NRZI = 0x2 ++ EPOLLERR = 0x8 ++ EPOLLET = 0x80000000 ++ EPOLLEXCLUSIVE = 0x10000000 ++ EPOLLHUP = 0x10 ++ EPOLLIN = 0x1 ++ EPOLLMSG = 0x400 ++ EPOLLONESHOT = 0x40000000 ++ EPOLLOUT = 0x4 ++ EPOLLPRI = 0x2 ++ EPOLLRDBAND = 0x80 ++ EPOLLRDHUP = 0x2000 ++ EPOLLRDNORM = 0x40 ++ EPOLLWAKEUP = 0x20000000 ++ EPOLLWRBAND = 0x200 ++ EPOLLWRNORM = 0x100 ++ EPOLL_CLOEXEC = 0x80000 ++ EPOLL_CTL_ADD = 0x1 ++ EPOLL_CTL_DEL = 0x2 ++ EPOLL_CTL_MOD = 0x3 ++ ETH_P_1588 = 0x88f7 ++ ETH_P_8021AD = 0x88a8 ++ ETH_P_8021AH = 0x88e7 ++ ETH_P_8021Q = 0x8100 ++ ETH_P_80221 = 0x8917 ++ ETH_P_802_2 = 0x4 ++ ETH_P_802_3 = 0x1 ++ ETH_P_802_3_MIN = 0x600 ++ ETH_P_802_EX1 = 0x88b5 ++ ETH_P_AARP = 0x80f3 ++ ETH_P_AF_IUCV = 0xfbfb ++ ETH_P_ALL = 0x3 ++ ETH_P_AOE = 0x88a2 ++ ETH_P_ARCNET = 0x1a ++ ETH_P_ARP = 0x806 ++ ETH_P_ATALK = 0x809b ++ ETH_P_ATMFATE = 0x8884 ++ ETH_P_ATMMPOA = 0x884c ++ ETH_P_AX25 = 0x2 ++ ETH_P_BATMAN = 0x4305 ++ ETH_P_BPQ = 0x8ff ++ ETH_P_CAIF = 0xf7 ++ ETH_P_CAN = 0xc ++ ETH_P_CANFD = 0xd ++ ETH_P_CONTROL = 0x16 ++ ETH_P_CUST = 0x6006 ++ ETH_P_DDCMP = 0x6 ++ ETH_P_DEC = 0x6000 ++ ETH_P_DIAG = 0x6005 ++ ETH_P_DNA_DL = 0x6001 ++ ETH_P_DNA_RC = 0x6002 ++ ETH_P_DNA_RT = 0x6003 ++ ETH_P_DSA = 0x1b ++ ETH_P_ECONET = 0x18 ++ ETH_P_EDSA = 0xdada ++ ETH_P_ERSPAN = 0x88be ++ ETH_P_ERSPAN2 = 0x22eb ++ ETH_P_FCOE = 0x8906 ++ ETH_P_FIP = 0x8914 ++ ETH_P_HDLC = 0x19 ++ ETH_P_HSR = 0x892f ++ ETH_P_IBOE = 0x8915 ++ ETH_P_IEEE802154 = 0xf6 ++ ETH_P_IEEEPUP = 0xa00 ++ ETH_P_IEEEPUPAT = 0xa01 ++ ETH_P_IFE = 0xed3e ++ ETH_P_IP = 0x800 ++ ETH_P_IPV6 = 0x86dd ++ ETH_P_IPX = 0x8137 ++ ETH_P_IRDA = 0x17 ++ ETH_P_LAT = 0x6004 ++ ETH_P_LINK_CTL = 0x886c ++ ETH_P_LOCALTALK = 0x9 ++ ETH_P_LOOP = 0x60 ++ ETH_P_LOOPBACK = 0x9000 ++ ETH_P_MACSEC = 0x88e5 ++ ETH_P_MAP = 0xf9 ++ ETH_P_MOBITEX = 0x15 ++ ETH_P_MPLS_MC = 0x8848 ++ ETH_P_MPLS_UC = 0x8847 ++ ETH_P_MVRP = 0x88f5 ++ ETH_P_NCSI = 0x88f8 ++ ETH_P_NSH = 0x894f ++ ETH_P_PAE = 0x888e ++ ETH_P_PAUSE = 0x8808 ++ ETH_P_PHONET = 0xf5 ++ ETH_P_PPPTALK = 0x10 ++ ETH_P_PPP_DISC = 0x8863 ++ ETH_P_PPP_MP = 0x8 ++ ETH_P_PPP_SES = 0x8864 ++ ETH_P_PREAUTH = 0x88c7 ++ ETH_P_PRP = 0x88fb ++ ETH_P_PUP = 0x200 ++ ETH_P_PUPAT = 0x201 ++ ETH_P_QINQ1 = 0x9100 ++ ETH_P_QINQ2 = 0x9200 ++ ETH_P_QINQ3 = 0x9300 ++ ETH_P_RARP = 0x8035 ++ ETH_P_SCA = 0x6007 ++ ETH_P_SLOW = 0x8809 ++ ETH_P_SNAP = 0x5 ++ ETH_P_TDLS = 0x890d ++ ETH_P_TEB = 0x6558 ++ ETH_P_TIPC = 0x88ca ++ ETH_P_TRAILER = 0x1c ++ ETH_P_TR_802_2 = 0x11 ++ ETH_P_TSN = 0x22f0 ++ ETH_P_WAN_PPP = 0x7 ++ ETH_P_WCCP = 0x883e ++ ETH_P_X25 = 0x805 ++ ETH_P_XDSA = 0xf8 ++ EXTA = 0xe ++ EXTB = 0xf ++ EXTPROC = 0x10000 ++ FD_CLOEXEC = 0x1 ++ FD_SETSIZE = 0x400 ++ FLUSHO = 0x1000 ++ F_ADD_SEALS = 0x409 ++ F_DUPFD = 0x0 ++ F_DUPFD_CLOEXEC = 0x406 ++ F_EXLCK = 0x4 ++ F_GETFD = 0x1 ++ F_GETFL = 0x3 ++ F_GETLEASE = 0x401 ++ F_GETLK = 0x5 ++ F_GETLK64 = 0x5 ++ F_GETOWN = 0x9 ++ F_GETOWN_EX = 0x10 ++ F_GETPIPE_SZ = 0x408 ++ F_GETSIG = 0xb ++ F_GET_FILE_RW_HINT = 0x40d ++ F_GET_RW_HINT = 0x40b ++ F_GET_SEALS = 0x40a ++ F_LOCK = 0x1 ++ F_NOTIFY = 0x402 ++ F_OFD_GETLK = 0x24 ++ F_OFD_SETLK = 0x25 ++ F_OFD_SETLKW = 0x26 ++ F_OK = 0x0 ++ F_RDLCK = 0x0 ++ F_SEAL_GROW = 0x4 ++ F_SEAL_SEAL = 0x1 ++ F_SEAL_SHRINK = 0x2 ++ F_SEAL_WRITE = 0x8 ++ F_SETFD = 0x2 ++ F_SETFL = 0x4 ++ F_SETLEASE = 0x400 ++ F_SETLK = 0x6 ++ F_SETLK64 = 0x6 ++ F_SETLKW = 0x7 ++ F_SETLKW64 = 0x7 ++ F_SETOWN = 0x8 ++ F_SETOWN_EX = 0xf ++ F_SETPIPE_SZ = 0x407 ++ F_SETSIG = 0xa ++ F_SET_FILE_RW_HINT = 0x40e ++ F_SET_RW_HINT = 0x40c ++ F_SHLCK = 0x8 ++ F_TEST = 0x3 ++ F_TLOCK = 0x2 ++ F_ULOCK = 0x0 ++ F_UNLCK = 0x2 ++ F_WRLCK = 0x1 ++ HUPCL = 0x400 ++ ICANON = 0x2 ++ ICMPV6_FILTER = 0x1 ++ ICRNL = 0x100 ++ IEXTEN = 0x8000 ++ IFA_F_DADFAILED = 0x8 ++ IFA_F_DEPRECATED = 0x20 ++ IFA_F_HOMEADDRESS = 0x10 ++ IFA_F_MANAGETEMPADDR = 0x100 ++ IFA_F_MCAUTOJOIN = 0x400 ++ IFA_F_NODAD = 0x2 ++ IFA_F_NOPREFIXROUTE = 0x200 ++ IFA_F_OPTIMISTIC = 0x4 ++ IFA_F_PERMANENT = 0x80 ++ IFA_F_SECONDARY = 0x1 ++ IFA_F_STABLE_PRIVACY = 0x800 ++ IFA_F_TEMPORARY = 0x1 ++ IFA_F_TENTATIVE = 0x40 ++ IFA_MAX = 0x9 ++ IFF_ALLMULTI = 0x200 ++ IFF_ATTACH_QUEUE = 0x200 ++ IFF_AUTOMEDIA = 0x4000 ++ IFF_BROADCAST = 0x2 ++ IFF_DEBUG = 0x4 ++ IFF_DETACH_QUEUE = 0x400 ++ IFF_DORMANT = 0x20000 ++ IFF_DYNAMIC = 0x8000 ++ IFF_ECHO = 0x40000 ++ IFF_LOOPBACK = 0x8 ++ IFF_LOWER_UP = 0x10000 ++ IFF_MASTER = 0x400 ++ IFF_MULTICAST = 0x1000 ++ IFF_MULTI_QUEUE = 0x100 ++ IFF_NAPI = 0x10 ++ IFF_NAPI_FRAGS = 0x20 ++ IFF_NOARP = 0x80 ++ IFF_NOFILTER = 0x1000 ++ IFF_NOTRAILERS = 0x20 ++ IFF_NO_PI = 0x1000 ++ IFF_ONE_QUEUE = 0x2000 ++ IFF_PERSIST = 0x800 ++ IFF_POINTOPOINT = 0x10 ++ IFF_PORTSEL = 0x2000 ++ IFF_PROMISC = 0x100 ++ IFF_RUNNING = 0x40 ++ IFF_SLAVE = 0x800 ++ IFF_TAP = 0x2 ++ IFF_TUN = 0x1 ++ IFF_TUN_EXCL = 0x8000 ++ IFF_UP = 0x1 ++ IFF_VNET_HDR = 0x4000 ++ IFF_VOLATILE = 0x70c5a ++ IFNAMSIZ = 0x10 ++ IGNBRK = 0x1 ++ IGNCR = 0x80 ++ IGNPAR = 0x4 ++ IMAXBEL = 0x2000 ++ INLCR = 0x40 ++ INPCK = 0x10 ++ IN_ACCESS = 0x1 ++ IN_ALL_EVENTS = 0xfff ++ IN_ATTRIB = 0x4 ++ IN_CLASSA_HOST = 0xffffff ++ IN_CLASSA_MAX = 0x80 ++ IN_CLASSA_NET = 0xff000000 ++ IN_CLASSA_NSHIFT = 0x18 ++ IN_CLASSB_HOST = 0xffff ++ IN_CLASSB_MAX = 0x10000 ++ IN_CLASSB_NET = 0xffff0000 ++ IN_CLASSB_NSHIFT = 0x10 ++ IN_CLASSC_HOST = 0xff ++ IN_CLASSC_NET = 0xffffff00 ++ IN_CLASSC_NSHIFT = 0x8 ++ IN_CLOEXEC = 0x80000 ++ IN_CLOSE = 0x18 ++ IN_CLOSE_NOWRITE = 0x10 ++ IN_CLOSE_WRITE = 0x8 ++ IN_CREATE = 0x100 ++ IN_DELETE = 0x200 ++ IN_DELETE_SELF = 0x400 ++ IN_DONT_FOLLOW = 0x2000000 ++ IN_EXCL_UNLINK = 0x4000000 ++ IN_IGNORED = 0x8000 ++ IN_ISDIR = 0x40000000 ++ IN_LOOPBACKNET = 0x7f ++ IN_MASK_ADD = 0x20000000 ++ IN_MODIFY = 0x2 ++ IN_MOVE = 0xc0 ++ IN_MOVED_FROM = 0x40 ++ IN_MOVED_TO = 0x80 ++ IN_MOVE_SELF = 0x800 ++ IN_NONBLOCK = 0x800 ++ IN_ONESHOT = 0x80000000 ++ IN_ONLYDIR = 0x1000000 ++ IN_OPEN = 0x20 ++ IN_Q_OVERFLOW = 0x4000 ++ IN_UNMOUNT = 0x2000 ++ IPPROTO_AH = 0x33 ++ IPPROTO_BEETPH = 0x5e ++ IPPROTO_COMP = 0x6c ++ IPPROTO_DCCP = 0x21 ++ IPPROTO_DSTOPTS = 0x3c ++ IPPROTO_EGP = 0x8 ++ IPPROTO_ENCAP = 0x62 ++ IPPROTO_ESP = 0x32 ++ IPPROTO_FRAGMENT = 0x2c ++ IPPROTO_GRE = 0x2f ++ IPPROTO_HOPOPTS = 0x0 ++ IPPROTO_ICMP = 0x1 ++ IPPROTO_ICMPV6 = 0x3a ++ IPPROTO_IDP = 0x16 ++ IPPROTO_IGMP = 0x2 ++ IPPROTO_IP = 0x0 ++ IPPROTO_IPIP = 0x4 ++ IPPROTO_IPV6 = 0x29 ++ IPPROTO_MH = 0x87 ++ IPPROTO_MPLS = 0x89 ++ IPPROTO_MTP = 0x5c ++ IPPROTO_NONE = 0x3b ++ IPPROTO_PIM = 0x67 ++ IPPROTO_PUP = 0xc ++ IPPROTO_RAW = 0xff ++ IPPROTO_ROUTING = 0x2b ++ IPPROTO_RSVP = 0x2e ++ IPPROTO_SCTP = 0x84 ++ IPPROTO_TCP = 0x6 ++ IPPROTO_TP = 0x1d ++ IPPROTO_UDP = 0x11 ++ IPPROTO_UDPLITE = 0x88 ++ IPV6_2292DSTOPTS = 0x4 ++ IPV6_2292HOPLIMIT = 0x8 ++ IPV6_2292HOPOPTS = 0x3 ++ IPV6_2292PKTINFO = 0x2 ++ IPV6_2292PKTOPTIONS = 0x6 ++ IPV6_2292RTHDR = 0x5 ++ IPV6_ADDRFORM = 0x1 ++ IPV6_ADDR_PREFERENCES = 0x48 ++ IPV6_ADD_MEMBERSHIP = 0x14 ++ IPV6_AUTHHDR = 0xa ++ IPV6_AUTOFLOWLABEL = 0x46 ++ IPV6_CHECKSUM = 0x7 ++ IPV6_DONTFRAG = 0x3e ++ IPV6_DROP_MEMBERSHIP = 0x15 ++ IPV6_DSTOPTS = 0x3b ++ IPV6_FREEBIND = 0x4e ++ IPV6_HDRINCL = 0x24 ++ IPV6_HOPLIMIT = 0x34 ++ IPV6_HOPOPTS = 0x36 ++ IPV6_IPSEC_POLICY = 0x22 ++ IPV6_JOIN_ANYCAST = 0x1b ++ IPV6_JOIN_GROUP = 0x14 ++ IPV6_LEAVE_ANYCAST = 0x1c ++ IPV6_LEAVE_GROUP = 0x15 ++ IPV6_MINHOPCOUNT = 0x49 ++ IPV6_MTU = 0x18 ++ IPV6_MTU_DISCOVER = 0x17 ++ IPV6_MULTICAST_HOPS = 0x12 ++ IPV6_MULTICAST_IF = 0x11 ++ IPV6_MULTICAST_LOOP = 0x13 ++ IPV6_NEXTHOP = 0x9 ++ IPV6_ORIGDSTADDR = 0x4a ++ IPV6_PATHMTU = 0x3d ++ IPV6_PKTINFO = 0x32 ++ IPV6_PMTUDISC_DO = 0x2 ++ IPV6_PMTUDISC_DONT = 0x0 ++ IPV6_PMTUDISC_INTERFACE = 0x4 ++ IPV6_PMTUDISC_OMIT = 0x5 ++ IPV6_PMTUDISC_PROBE = 0x3 ++ IPV6_PMTUDISC_WANT = 0x1 ++ IPV6_RECVDSTOPTS = 0x3a ++ IPV6_RECVERR = 0x19 ++ IPV6_RECVFRAGSIZE = 0x4d ++ IPV6_RECVHOPLIMIT = 0x33 ++ IPV6_RECVHOPOPTS = 0x35 ++ IPV6_RECVORIGDSTADDR = 0x4a ++ IPV6_RECVPATHMTU = 0x3c ++ IPV6_RECVPKTINFO = 0x31 ++ IPV6_RECVRTHDR = 0x38 ++ IPV6_RECVTCLASS = 0x42 ++ IPV6_ROUTER_ALERT = 0x16 ++ IPV6_RTHDR = 0x39 ++ IPV6_RTHDRDSTOPTS = 0x37 ++ IPV6_RTHDR_LOOSE = 0x0 ++ IPV6_RTHDR_STRICT = 0x1 ++ IPV6_RTHDR_TYPE_0 = 0x0 ++ IPV6_RXDSTOPTS = 0x3b ++ IPV6_RXHOPOPTS = 0x36 ++ IPV6_TCLASS = 0x43 ++ IPV6_TRANSPARENT = 0x4b ++ IPV6_UNICAST_HOPS = 0x10 ++ IPV6_UNICAST_IF = 0x4c ++ IPV6_V6ONLY = 0x1a ++ IPV6_XFRM_POLICY = 0x23 ++ IP_ADD_MEMBERSHIP = 0x23 ++ IP_ADD_SOURCE_MEMBERSHIP = 0x27 ++ IP_BIND_ADDRESS_NO_PORT = 0x18 ++ IP_BLOCK_SOURCE = 0x26 ++ IP_CHECKSUM = 0x17 ++ IP_DEFAULT_MULTICAST_LOOP = 0x1 ++ IP_DEFAULT_MULTICAST_TTL = 0x1 ++ IP_DF = 0x4000 ++ IP_DROP_MEMBERSHIP = 0x24 ++ IP_DROP_SOURCE_MEMBERSHIP = 0x28 ++ IP_FREEBIND = 0xf ++ IP_HDRINCL = 0x3 ++ IP_IPSEC_POLICY = 0x10 ++ IP_MAXPACKET = 0xffff ++ IP_MAX_MEMBERSHIPS = 0x14 ++ IP_MF = 0x2000 ++ IP_MINTTL = 0x15 ++ IP_MSFILTER = 0x29 ++ IP_MSS = 0x240 ++ IP_MTU = 0xe ++ IP_MTU_DISCOVER = 0xa ++ IP_MULTICAST_ALL = 0x31 ++ IP_MULTICAST_IF = 0x20 ++ IP_MULTICAST_LOOP = 0x22 ++ IP_MULTICAST_TTL = 0x21 ++ IP_NODEFRAG = 0x16 ++ IP_OFFMASK = 0x1fff ++ IP_OPTIONS = 0x4 ++ IP_ORIGDSTADDR = 0x14 ++ IP_PASSSEC = 0x12 ++ IP_PKTINFO = 0x8 ++ IP_PKTOPTIONS = 0x9 ++ IP_PMTUDISC = 0xa ++ IP_PMTUDISC_DO = 0x2 ++ IP_PMTUDISC_DONT = 0x0 ++ IP_PMTUDISC_INTERFACE = 0x4 ++ IP_PMTUDISC_OMIT = 0x5 ++ IP_PMTUDISC_PROBE = 0x3 ++ IP_PMTUDISC_WANT = 0x1 ++ IP_RECVERR = 0xb ++ IP_RECVFRAGSIZE = 0x19 ++ IP_RECVOPTS = 0x6 ++ IP_RECVORIGDSTADDR = 0x14 ++ IP_RECVRETOPTS = 0x7 ++ IP_RECVTOS = 0xd ++ IP_RECVTTL = 0xc ++ IP_RETOPTS = 0x7 ++ IP_RF = 0x8000 ++ IP_ROUTER_ALERT = 0x5 ++ IP_TOS = 0x1 ++ IP_TRANSPARENT = 0x13 ++ IP_TTL = 0x2 ++ IP_UNBLOCK_SOURCE = 0x25 ++ IP_UNICAST_IF = 0x32 ++ IP_XFRM_POLICY = 0x11 ++ ISIG = 0x1 ++ ISTRIP = 0x20 ++ IUTF8 = 0x4000 ++ IXANY = 0x800 ++ IXOFF = 0x1000 ++ IXON = 0x400 ++ LINUX_REBOOT_CMD_CAD_OFF = 0x0 ++ LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef ++ LINUX_REBOOT_CMD_HALT = 0xcdef0123 ++ LINUX_REBOOT_CMD_KEXEC = 0x45584543 ++ LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc ++ LINUX_REBOOT_CMD_RESTART = 0x1234567 ++ LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 ++ LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 ++ LINUX_REBOOT_MAGIC1 = 0xfee1dead ++ LINUX_REBOOT_MAGIC2 = 0x28121969 ++ LOCK_EX = 0x2 ++ LOCK_NB = 0x4 ++ LOCK_SH = 0x1 ++ LOCK_UN = 0x8 ++ MADV_DODUMP = 0x11 ++ MADV_DOFORK = 0xb ++ MADV_DONTDUMP = 0x10 ++ MADV_DONTFORK = 0xa ++ MADV_DONTNEED = 0x4 ++ MADV_FREE = 0x8 ++ MADV_HUGEPAGE = 0xe ++ MADV_HWPOISON = 0x64 ++ MADV_KEEPONFORK = 0x13 ++ MADV_MERGEABLE = 0xc ++ MADV_NOHUGEPAGE = 0xf ++ MADV_NORMAL = 0x0 ++ MADV_RANDOM = 0x1 ++ MADV_REMOVE = 0x9 ++ MADV_SEQUENTIAL = 0x2 ++ MADV_UNMERGEABLE = 0xd ++ MADV_WILLNEED = 0x3 ++ MADV_WIPEONFORK = 0x12 ++ MAP_ANON = 0x20 ++ MAP_ANONYMOUS = 0x20 ++ MAP_DENYWRITE = 0x800 ++ MAP_EXECUTABLE = 0x1000 ++ MAP_FILE = 0x0 ++ MAP_FIXED = 0x10 ++ MAP_FIXED_NOREPLACE = 0x100000 ++ MAP_GROWSDOWN = 0x100 ++ MAP_HUGETLB = 0x40000 ++ MAP_HUGE_MASK = 0x3f ++ MAP_HUGE_SHIFT = 0x1a ++ MAP_LOCKED = 0x2000 ++ MAP_NONBLOCK = 0x10000 ++ MAP_NORESERVE = 0x4000 ++ MAP_POPULATE = 0x8000 ++ MAP_PRIVATE = 0x2 ++ MAP_SHARED = 0x1 ++ MAP_SHARED_VALIDATE = 0x3 ++ MAP_STACK = 0x20000 ++ MAP_SYNC = 0x80000 ++ MAP_TYPE = 0xf ++ MCL_CURRENT = 0x1 ++ MCL_FUTURE = 0x2 ++ MCL_ONFAULT = 0x4 ++ MNT_DETACH = 0x2 ++ MNT_EXPIRE = 0x4 ++ MNT_FORCE = 0x1 ++ MSG_BATCH = 0x40000 ++ MSG_CMSG_CLOEXEC = 0x40000000 ++ MSG_CONFIRM = 0x800 ++ MSG_CTRUNC = 0x8 ++ MSG_DONTROUTE = 0x4 ++ MSG_DONTWAIT = 0x40 ++ MSG_EOR = 0x80 ++ MSG_ERRQUEUE = 0x2000 ++ MSG_FASTOPEN = 0x20000000 ++ MSG_FIN = 0x200 ++ MSG_MORE = 0x8000 ++ MSG_NOSIGNAL = 0x4000 ++ MSG_OOB = 0x1 ++ MSG_PEEK = 0x2 ++ MSG_PROXY = 0x10 ++ MSG_RST = 0x1000 ++ MSG_SYN = 0x400 ++ MSG_TRUNC = 0x20 ++ MSG_TRYHARD = 0x4 ++ MSG_WAITALL = 0x100 ++ MSG_WAITFORONE = 0x10000 ++ MSG_ZEROCOPY = 0x4000000 ++ MS_ACTIVE = 0x40000000 ++ MS_ASYNC = 0x1 ++ MS_BIND = 0x1000 ++ MS_DIRSYNC = 0x80 ++ MS_INVALIDATE = 0x2 ++ MS_I_VERSION = 0x800000 ++ MS_KERNMOUNT = 0x400000 ++ MS_LAZYTIME = 0x2000000 ++ MS_MANDLOCK = 0x40 ++ MS_MGC_MSK = 0xffff0000 ++ MS_MGC_VAL = 0xc0ed0000 ++ MS_MOVE = 0x2000 ++ MS_NOATIME = 0x400 ++ MS_NODEV = 0x4 ++ MS_NODIRATIME = 0x800 ++ MS_NOEXEC = 0x8 ++ MS_NOSUID = 0x2 ++ MS_NOUSER = -0x80000000 ++ MS_POSIXACL = 0x10000 ++ MS_PRIVATE = 0x40000 ++ MS_RDONLY = 0x1 ++ MS_REC = 0x4000 ++ MS_RELATIME = 0x200000 ++ MS_REMOUNT = 0x20 ++ MS_RMT_MASK = 0x2800051 ++ MS_SHARED = 0x100000 ++ MS_SILENT = 0x8000 ++ MS_SLAVE = 0x80000 ++ MS_STRICTATIME = 0x1000000 ++ MS_SYNC = 0x4 ++ MS_SYNCHRONOUS = 0x10 ++ MS_UNBINDABLE = 0x20000 ++ NAME_MAX = 0xff ++ NETLINK_ADD_MEMBERSHIP = 0x1 ++ NETLINK_AUDIT = 0x9 ++ NETLINK_BROADCAST_ERROR = 0x4 ++ NETLINK_CAP_ACK = 0xa ++ NETLINK_CONNECTOR = 0xb ++ NETLINK_CRYPTO = 0x15 ++ NETLINK_DNRTMSG = 0xe ++ NETLINK_DROP_MEMBERSHIP = 0x2 ++ NETLINK_ECRYPTFS = 0x13 ++ NETLINK_EXT_ACK = 0xb ++ NETLINK_FIB_LOOKUP = 0xa ++ NETLINK_FIREWALL = 0x3 ++ NETLINK_GENERIC = 0x10 ++ NETLINK_INET_DIAG = 0x4 ++ NETLINK_IP6_FW = 0xd ++ NETLINK_ISCSI = 0x8 ++ NETLINK_KOBJECT_UEVENT = 0xf ++ NETLINK_LISTEN_ALL_NSID = 0x8 ++ NETLINK_LIST_MEMBERSHIPS = 0x9 ++ NETLINK_NETFILTER = 0xc ++ NETLINK_NFLOG = 0x5 ++ NETLINK_NO_ENOBUFS = 0x5 ++ NETLINK_PKTINFO = 0x3 ++ NETLINK_RDMA = 0x14 ++ NETLINK_ROUTE = 0x0 ++ NETLINK_RX_RING = 0x6 ++ NETLINK_SCSITRANSPORT = 0x12 ++ NETLINK_SELINUX = 0x7 ++ NETLINK_SMC = 0x16 ++ NETLINK_SOCK_DIAG = 0x4 ++ NETLINK_TX_RING = 0x7 ++ NETLINK_UNUSED = 0x1 ++ NETLINK_USERSOCK = 0x2 ++ NETLINK_XFRM = 0x6 ++ NLA_ALIGNTO = 0x4 ++ NLA_F_NESTED = 0x8000 ++ NLA_F_NET_BYTEORDER = 0x4000 ++ NLA_HDRLEN = 0x4 ++ NLMSG_ALIGNTO = 0x4 ++ NLMSG_DONE = 0x3 ++ NLMSG_ERROR = 0x2 ++ NLMSG_HDRLEN = 0x10 ++ NLMSG_MIN_TYPE = 0x10 ++ NLMSG_NOOP = 0x1 ++ NLMSG_OVERRUN = 0x4 ++ NLM_F_ACK = 0x4 ++ NLM_F_ACK_TLVS = 0x200 ++ NLM_F_APPEND = 0x800 ++ NLM_F_ATOMIC = 0x400 ++ NLM_F_CAPPED = 0x100 ++ NLM_F_CREATE = 0x400 ++ NLM_F_DUMP = 0x300 ++ NLM_F_DUMP_FILTERED = 0x20 ++ NLM_F_DUMP_INTR = 0x10 ++ NLM_F_ECHO = 0x8 ++ NLM_F_EXCL = 0x200 ++ NLM_F_MATCH = 0x200 ++ NLM_F_MULTI = 0x2 ++ NLM_F_NONREC = 0x100 ++ NLM_F_REPLACE = 0x100 ++ NLM_F_REQUEST = 0x1 ++ NLM_F_ROOT = 0x100 ++ NOFLSH = 0x80 ++ OCRNL = 0x8 ++ OFDEL = 0x80 ++ OFILL = 0x40 ++ ONLCR = 0x4 ++ ONLRET = 0x20 ++ ONOCR = 0x10 ++ OPOST = 0x1 ++ O_ACCMODE = 0x3 ++ O_APPEND = 0x400 ++ O_ASYNC = 0x2000 ++ O_CLOEXEC = 0x80000 ++ O_CREAT = 0x40 ++ O_DIRECT = 0x4000 ++ O_DIRECTORY = 0x10000 ++ O_DSYNC = 0x1000 ++ O_EXCL = 0x80 ++ O_FSYNC = 0x101000 ++ O_LARGEFILE = 0x0 ++ O_NDELAY = 0x800 ++ O_NOATIME = 0x40000 ++ O_NOCTTY = 0x100 ++ O_NOFOLLOW = 0x20000 ++ O_NONBLOCK = 0x800 ++ O_PATH = 0x200000 ++ O_RDONLY = 0x0 ++ O_RDWR = 0x2 ++ O_RSYNC = 0x101000 ++ O_SYNC = 0x101000 ++ O_TMPFILE = 0x410000 ++ O_TRUNC = 0x200 ++ O_WRONLY = 0x1 ++ PACKET_ADD_MEMBERSHIP = 0x1 ++ PACKET_AUXDATA = 0x8 ++ PACKET_BROADCAST = 0x1 ++ PACKET_COPY_THRESH = 0x7 ++ PACKET_DROP_MEMBERSHIP = 0x2 ++ PACKET_FANOUT = 0x12 ++ PACKET_FANOUT_CBPF = 0x6 ++ PACKET_FANOUT_CPU = 0x2 ++ PACKET_FANOUT_DATA = 0x16 ++ PACKET_FANOUT_EBPF = 0x7 ++ PACKET_FANOUT_FLAG_DEFRAG = 0x8000 ++ PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 ++ PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 ++ PACKET_FANOUT_HASH = 0x0 ++ PACKET_FANOUT_LB = 0x1 ++ PACKET_FANOUT_QM = 0x5 ++ PACKET_FANOUT_RND = 0x4 ++ PACKET_FANOUT_ROLLOVER = 0x3 ++ PACKET_FASTROUTE = 0x6 ++ PACKET_HDRLEN = 0xb ++ PACKET_HOST = 0x0 ++ PACKET_KERNEL = 0x7 ++ PACKET_LOOPBACK = 0x5 ++ PACKET_LOSS = 0xe ++ PACKET_MR_ALLMULTI = 0x2 ++ PACKET_MR_MULTICAST = 0x0 ++ PACKET_MR_PROMISC = 0x1 ++ PACKET_MR_UNICAST = 0x3 ++ PACKET_MULTICAST = 0x2 ++ PACKET_ORIGDEV = 0x9 ++ PACKET_OTHERHOST = 0x3 ++ PACKET_OUTGOING = 0x4 ++ PACKET_QDISC_BYPASS = 0x14 ++ PACKET_RECV_OUTPUT = 0x3 ++ PACKET_RESERVE = 0xc ++ PACKET_ROLLOVER_STATS = 0x15 ++ PACKET_RX_RING = 0x5 ++ PACKET_STATISTICS = 0x6 ++ PACKET_TIMESTAMP = 0x11 ++ PACKET_TX_HAS_OFF = 0x13 ++ PACKET_TX_RING = 0xd ++ PACKET_TX_TIMESTAMP = 0x10 ++ PACKET_USER = 0x6 ++ PACKET_VERSION = 0xa ++ PACKET_VNET_HDR = 0xf ++ PARENB = 0x100 ++ PARITY_CRC16_PR0 = 0x2 ++ PARITY_CRC16_PR0_CCITT = 0x4 ++ PARITY_CRC16_PR1 = 0x3 ++ PARITY_CRC16_PR1_CCITT = 0x5 ++ PARITY_CRC32_PR0_CCITT = 0x6 ++ PARITY_CRC32_PR1_CCITT = 0x7 ++ PARITY_DEFAULT = 0x0 ++ PARITY_NONE = 0x1 ++ PARMRK = 0x8 ++ PARODD = 0x200 ++ PENDIN = 0x4000 ++ PRIO_PGRP = 0x1 ++ PRIO_PROCESS = 0x0 ++ PRIO_USER = 0x2 ++ PROT_EXEC = 0x4 ++ PROT_GROWSDOWN = 0x1000000 ++ PROT_GROWSUP = 0x2000000 ++ PROT_NONE = 0x0 ++ PROT_READ = 0x1 ++ PROT_WRITE = 0x2 ++ PR_CAPBSET_DROP = 0x18 ++ PR_CAPBSET_READ = 0x17 ++ PR_CAP_AMBIENT = 0x2f ++ PR_CAP_AMBIENT_CLEAR_ALL = 0x4 ++ PR_CAP_AMBIENT_IS_SET = 0x1 ++ PR_CAP_AMBIENT_LOWER = 0x3 ++ PR_CAP_AMBIENT_RAISE = 0x2 ++ PR_ENDIAN_BIG = 0x0 ++ PR_ENDIAN_LITTLE = 0x1 ++ PR_ENDIAN_PPC_LITTLE = 0x2 ++ PR_FPEMU_NOPRINT = 0x1 ++ PR_FPEMU_SIGFPE = 0x2 ++ PR_FP_EXC_ASYNC = 0x2 ++ PR_FP_EXC_DISABLED = 0x0 ++ PR_FP_EXC_DIV = 0x10000 ++ PR_FP_EXC_INV = 0x100000 ++ PR_FP_EXC_NONRECOV = 0x1 ++ PR_FP_EXC_OVF = 0x20000 ++ PR_FP_EXC_PRECISE = 0x3 ++ PR_FP_EXC_RES = 0x80000 ++ PR_FP_EXC_SW_ENABLE = 0x80 ++ PR_FP_EXC_UND = 0x40000 ++ PR_FP_MODE_FR = 0x1 ++ PR_FP_MODE_FRE = 0x2 ++ PR_GET_CHILD_SUBREAPER = 0x25 ++ PR_GET_DUMPABLE = 0x3 ++ PR_GET_ENDIAN = 0x13 ++ PR_GET_FPEMU = 0x9 ++ PR_GET_FPEXC = 0xb ++ PR_GET_FP_MODE = 0x2e ++ PR_GET_KEEPCAPS = 0x7 ++ PR_GET_NAME = 0x10 ++ PR_GET_NO_NEW_PRIVS = 0x27 ++ PR_GET_PDEATHSIG = 0x2 ++ PR_GET_SECCOMP = 0x15 ++ PR_GET_SECUREBITS = 0x1b ++ PR_GET_SPECULATION_CTRL = 0x34 ++ PR_GET_THP_DISABLE = 0x2a ++ PR_GET_TID_ADDRESS = 0x28 ++ PR_GET_TIMERSLACK = 0x1e ++ PR_GET_TIMING = 0xd ++ PR_GET_TSC = 0x19 ++ PR_GET_UNALIGN = 0x5 ++ PR_MCE_KILL = 0x21 ++ PR_MCE_KILL_CLEAR = 0x0 ++ PR_MCE_KILL_DEFAULT = 0x2 ++ PR_MCE_KILL_EARLY = 0x1 ++ PR_MCE_KILL_GET = 0x22 ++ PR_MCE_KILL_LATE = 0x0 ++ PR_MCE_KILL_SET = 0x1 ++ PR_MPX_DISABLE_MANAGEMENT = 0x2c ++ PR_MPX_ENABLE_MANAGEMENT = 0x2b ++ PR_SET_CHILD_SUBREAPER = 0x24 ++ PR_SET_DUMPABLE = 0x4 ++ PR_SET_ENDIAN = 0x14 ++ PR_SET_FPEMU = 0xa ++ PR_SET_FPEXC = 0xc ++ PR_SET_FP_MODE = 0x2d ++ PR_SET_KEEPCAPS = 0x8 ++ PR_SET_MM = 0x23 ++ PR_SET_MM_ARG_END = 0x9 ++ PR_SET_MM_ARG_START = 0x8 ++ PR_SET_MM_AUXV = 0xc ++ PR_SET_MM_BRK = 0x7 ++ PR_SET_MM_END_CODE = 0x2 ++ PR_SET_MM_END_DATA = 0x4 ++ PR_SET_MM_ENV_END = 0xb ++ PR_SET_MM_ENV_START = 0xa ++ PR_SET_MM_EXE_FILE = 0xd ++ PR_SET_MM_MAP = 0xe ++ PR_SET_MM_MAP_SIZE = 0xf ++ PR_SET_MM_START_BRK = 0x6 ++ PR_SET_MM_START_CODE = 0x1 ++ PR_SET_MM_START_DATA = 0x3 ++ PR_SET_MM_START_STACK = 0x5 ++ PR_SET_NAME = 0xf ++ PR_SET_NO_NEW_PRIVS = 0x26 ++ PR_SET_PDEATHSIG = 0x1 ++ PR_SET_PTRACER = 0x59616d61 ++ PR_SET_PTRACER_ANY = 0xffffffffffffffff ++ PR_SET_SECCOMP = 0x16 ++ PR_SET_SECUREBITS = 0x1c ++ PR_SET_SPECULATION_CTRL = 0x35 ++ PR_SET_THP_DISABLE = 0x29 ++ PR_SET_TIMERSLACK = 0x1d ++ PR_SET_TIMING = 0xe ++ PR_SET_TSC = 0x1a ++ PR_SET_UNALIGN = 0x6 ++ PR_SPEC_DISABLE = 0x4 ++ PR_SPEC_ENABLE = 0x2 ++ PR_SPEC_FORCE_DISABLE = 0x8 ++ PR_SPEC_INDIRECT_BRANCH = 0x1 ++ PR_SPEC_NOT_AFFECTED = 0x0 ++ PR_SPEC_PRCTL = 0x1 ++ PR_SPEC_STORE_BYPASS = 0x0 ++ PR_SVE_GET_VL = 0x33 ++ PR_SVE_SET_VL = 0x32 ++ PR_SVE_SET_VL_ONEXEC = 0x40000 ++ PR_SVE_VL_INHERIT = 0x20000 ++ PR_SVE_VL_LEN_MASK = 0xffff ++ PR_TASK_PERF_EVENTS_DISABLE = 0x1f ++ PR_TASK_PERF_EVENTS_ENABLE = 0x20 ++ PR_TIMING_STATISTICAL = 0x0 ++ PR_TIMING_TIMESTAMP = 0x1 ++ PR_TSC_ENABLE = 0x1 ++ PR_TSC_SIGSEGV = 0x2 ++ PR_UNALIGN_NOPRINT = 0x1 ++ PR_UNALIGN_SIGBUS = 0x2 ++ PTRACE_ATTACH = 0x10 ++ PTRACE_CONT = 0x7 ++ PTRACE_DETACH = 0x11 ++ PTRACE_EVENT_CLONE = 0x3 ++ PTRACE_EVENT_EXEC = 0x4 ++ PTRACE_EVENT_EXIT = 0x6 ++ PTRACE_EVENT_FORK = 0x1 ++ PTRACE_EVENT_SECCOMP = 0x7 ++ PTRACE_EVENT_STOP = 0x80 ++ PTRACE_EVENT_VFORK = 0x2 ++ PTRACE_EVENT_VFORK_DONE = 0x5 ++ PTRACE_GETEVENTMSG = 0x4201 ++ PTRACE_GETREGS = 0xc ++ PTRACE_GETREGSET = 0x4204 ++ PTRACE_GETSIGINFO = 0x4202 ++ PTRACE_GETSIGMASK = 0x420a ++ PTRACE_INTERRUPT = 0x4207 ++ PTRACE_KILL = 0x8 ++ PTRACE_LISTEN = 0x4208 ++ PTRACE_O_EXITKILL = 0x100000 ++ PTRACE_O_MASK = 0x3000ff ++ PTRACE_O_SUSPEND_SECCOMP = 0x200000 ++ PTRACE_O_TRACECLONE = 0x8 ++ PTRACE_O_TRACEEXEC = 0x10 ++ PTRACE_O_TRACEEXIT = 0x40 ++ PTRACE_O_TRACEFORK = 0x2 ++ PTRACE_O_TRACESECCOMP = 0x80 ++ PTRACE_O_TRACESYSGOOD = 0x1 ++ PTRACE_O_TRACEVFORK = 0x4 ++ PTRACE_O_TRACEVFORKDONE = 0x20 ++ PTRACE_PEEKDATA = 0x2 ++ PTRACE_PEEKSIGINFO = 0x4209 ++ PTRACE_PEEKSIGINFO_SHARED = 0x1 ++ PTRACE_PEEKTEXT = 0x1 ++ PTRACE_PEEKUSR = 0x3 ++ PTRACE_POKEDATA = 0x5 ++ PTRACE_POKETEXT = 0x4 ++ PTRACE_POKEUSR = 0x6 ++ PTRACE_SECCOMP_GET_FILTER = 0x420c ++ PTRACE_SECCOMP_GET_METADATA = 0x420d ++ PTRACE_SEIZE = 0x4206 ++ PTRACE_SETOPTIONS = 0x4200 ++ PTRACE_SETREGS = 0xd ++ PTRACE_SETREGSET = 0x4205 ++ PTRACE_SETSIGINFO = 0x4203 ++ PTRACE_SETSIGMASK = 0x420b ++ PTRACE_SINGLESTEP = 0x9 ++ PTRACE_SYSCALL = 0x18 ++ PTRACE_TRACEME = 0x0 ++ RLIMIT_AS = 0x9 ++ RLIMIT_CORE = 0x4 ++ RLIMIT_CPU = 0x0 ++ RLIMIT_DATA = 0x2 ++ RLIMIT_FSIZE = 0x1 ++ RLIMIT_NOFILE = 0x7 ++ RLIMIT_STACK = 0x3 ++ RLIM_INFINITY = -0x1 ++ RTAX_ADVMSS = 0x8 ++ RTAX_CC_ALGO = 0x10 ++ RTAX_CWND = 0x7 ++ RTAX_FASTOPEN_NO_COOKIE = 0x11 ++ RTAX_FEATURES = 0xc ++ RTAX_FEATURE_ALLFRAG = 0x8 ++ RTAX_FEATURE_ECN = 0x1 ++ RTAX_FEATURE_MASK = 0xf ++ RTAX_FEATURE_SACK = 0x2 ++ RTAX_FEATURE_TIMESTAMP = 0x4 ++ RTAX_HOPLIMIT = 0xa ++ RTAX_INITCWND = 0xb ++ RTAX_INITRWND = 0xe ++ RTAX_LOCK = 0x1 ++ RTAX_MAX = 0x11 ++ RTAX_MTU = 0x2 ++ RTAX_QUICKACK = 0xf ++ RTAX_REORDERING = 0x9 ++ RTAX_RTO_MIN = 0xd ++ RTAX_RTT = 0x4 ++ RTAX_RTTVAR = 0x5 ++ RTAX_SSTHRESH = 0x6 ++ RTAX_UNSPEC = 0x0 ++ RTAX_WINDOW = 0x3 ++ RTA_ALIGNTO = 0x4 ++ RTA_MAX = 0x1d ++ RTCF_DIRECTSRC = 0x4000000 ++ RTCF_DOREDIRECT = 0x1000000 ++ RTCF_LOG = 0x2000000 ++ RTCF_MASQ = 0x400000 ++ RTCF_NAT = 0x800000 ++ RTCF_VALVE = 0x200000 ++ RTF_ADDRCLASSMASK = 0xf8000000 ++ RTF_ADDRCONF = 0x40000 ++ RTF_ALLONLINK = 0x20000 ++ RTF_BROADCAST = 0x10000000 ++ RTF_CACHE = 0x1000000 ++ RTF_DEFAULT = 0x10000 ++ RTF_DYNAMIC = 0x10 ++ RTF_FLOW = 0x2000000 ++ RTF_GATEWAY = 0x2 ++ RTF_HOST = 0x4 ++ RTF_INTERFACE = 0x40000000 ++ RTF_IRTT = 0x100 ++ RTF_LINKRT = 0x100000 ++ RTF_LOCAL = 0x80000000 ++ RTF_MODIFIED = 0x20 ++ RTF_MSS = 0x40 ++ RTF_MTU = 0x40 ++ RTF_MULTICAST = 0x20000000 ++ RTF_NAT = 0x8000000 ++ RTF_NOFORWARD = 0x1000 ++ RTF_NONEXTHOP = 0x200000 ++ RTF_NOPMTUDISC = 0x4000 ++ RTF_POLICY = 0x4000000 ++ RTF_REINSTATE = 0x8 ++ RTF_REJECT = 0x200 ++ RTF_STATIC = 0x400 ++ RTF_THROW = 0x2000 ++ RTF_UP = 0x1 ++ RTF_WINDOW = 0x80 ++ RTF_XRESOLVE = 0x800 ++ RTM_BASE = 0x10 ++ RTM_DELACTION = 0x31 ++ RTM_DELADDR = 0x15 ++ RTM_DELADDRLABEL = 0x49 ++ RTM_DELCHAIN = 0x65 ++ RTM_DELLINK = 0x11 ++ RTM_DELMDB = 0x55 ++ RTM_DELNEIGH = 0x1d ++ RTM_DELNETCONF = 0x51 ++ RTM_DELNSID = 0x59 ++ RTM_DELQDISC = 0x25 ++ RTM_DELROUTE = 0x19 ++ RTM_DELRULE = 0x21 ++ RTM_DELTCLASS = 0x29 ++ RTM_DELTFILTER = 0x2d ++ RTM_F_CLONED = 0x200 ++ RTM_F_EQUALIZE = 0x400 ++ RTM_F_FIB_MATCH = 0x2000 ++ RTM_F_LOOKUP_TABLE = 0x1000 ++ RTM_F_NOTIFY = 0x100 ++ RTM_F_PREFIX = 0x800 ++ RTM_GETACTION = 0x32 ++ RTM_GETADDR = 0x16 ++ RTM_GETADDRLABEL = 0x4a ++ RTM_GETANYCAST = 0x3e ++ RTM_GETCHAIN = 0x66 ++ RTM_GETDCB = 0x4e ++ RTM_GETLINK = 0x12 ++ RTM_GETMDB = 0x56 ++ RTM_GETMULTICAST = 0x3a ++ RTM_GETNEIGH = 0x1e ++ RTM_GETNEIGHTBL = 0x42 ++ RTM_GETNETCONF = 0x52 ++ RTM_GETNSID = 0x5a ++ RTM_GETQDISC = 0x26 ++ RTM_GETROUTE = 0x1a ++ RTM_GETRULE = 0x22 ++ RTM_GETSTATS = 0x5e ++ RTM_GETTCLASS = 0x2a ++ RTM_GETTFILTER = 0x2e ++ RTM_MAX = 0x67 ++ RTM_NEWACTION = 0x30 ++ RTM_NEWADDR = 0x14 ++ RTM_NEWADDRLABEL = 0x48 ++ RTM_NEWCACHEREPORT = 0x60 ++ RTM_NEWCHAIN = 0x64 ++ RTM_NEWLINK = 0x10 ++ RTM_NEWMDB = 0x54 ++ RTM_NEWNDUSEROPT = 0x44 ++ RTM_NEWNEIGH = 0x1c ++ RTM_NEWNEIGHTBL = 0x40 ++ RTM_NEWNETCONF = 0x50 ++ RTM_NEWNSID = 0x58 ++ RTM_NEWPREFIX = 0x34 ++ RTM_NEWQDISC = 0x24 ++ RTM_NEWROUTE = 0x18 ++ RTM_NEWRULE = 0x20 ++ RTM_NEWSTATS = 0x5c ++ RTM_NEWTCLASS = 0x28 ++ RTM_NEWTFILTER = 0x2c ++ RTM_NR_FAMILIES = 0x16 ++ RTM_NR_MSGTYPES = 0x58 ++ RTM_SETDCB = 0x4f ++ RTM_SETLINK = 0x13 ++ RTM_SETNEIGHTBL = 0x43 ++ RTNH_ALIGNTO = 0x4 ++ RTNH_COMPARE_MASK = 0x19 ++ RTNH_F_DEAD = 0x1 ++ RTNH_F_LINKDOWN = 0x10 ++ RTNH_F_OFFLOAD = 0x8 ++ RTNH_F_ONLINK = 0x4 ++ RTNH_F_PERVASIVE = 0x2 ++ RTNH_F_UNRESOLVED = 0x20 ++ RTN_MAX = 0xb ++ RTPROT_BABEL = 0x2a ++ RTPROT_BGP = 0xba ++ RTPROT_BIRD = 0xc ++ RTPROT_BOOT = 0x3 ++ RTPROT_DHCP = 0x10 ++ RTPROT_DNROUTED = 0xd ++ RTPROT_EIGRP = 0xc0 ++ RTPROT_GATED = 0x8 ++ RTPROT_ISIS = 0xbb ++ RTPROT_KERNEL = 0x2 ++ RTPROT_MROUTED = 0x11 ++ RTPROT_MRT = 0xa ++ RTPROT_NTK = 0xf ++ RTPROT_OSPF = 0xbc ++ RTPROT_RA = 0x9 ++ RTPROT_REDIRECT = 0x1 ++ RTPROT_RIP = 0xbd ++ RTPROT_STATIC = 0x4 ++ RTPROT_UNSPEC = 0x0 ++ RTPROT_XORP = 0xe ++ RTPROT_ZEBRA = 0xb ++ RT_CLASS_DEFAULT = 0xfd ++ RT_CLASS_LOCAL = 0xff ++ RT_CLASS_MAIN = 0xfe ++ RT_CLASS_MAX = 0xff ++ RT_CLASS_UNSPEC = 0x0 ++ RUSAGE_CHILDREN = -0x1 ++ RUSAGE_SELF = 0x0 ++ RUSAGE_THREAD = 0x1 ++ SCM_CREDENTIALS = 0x2 ++ SCM_RIGHTS = 0x1 ++ SCM_TIMESTAMP = 0x1d ++ SCM_TIMESTAMPING = 0x25 ++ SCM_TIMESTAMPING_OPT_STATS = 0x36 ++ SCM_TIMESTAMPING_PKTINFO = 0x3a ++ SCM_TIMESTAMPNS = 0x23 ++ SCM_TXTIME = 0x3d ++ SCM_WIFI_STATUS = 0x29 ++ SHUT_RD = 0x0 ++ SHUT_RDWR = 0x2 ++ SHUT_WR = 0x1 ++ SIOCADDDLCI = 0x8980 ++ SIOCADDMULTI = 0x8931 ++ SIOCADDRT = 0x890b ++ SIOCATMARK = 0x8905 ++ SIOCDARP = 0x8953 ++ SIOCDELDLCI = 0x8981 ++ SIOCDELMULTI = 0x8932 ++ SIOCDELRT = 0x890c ++ SIOCDEVPRIVATE = 0x89f0 ++ SIOCDIFADDR = 0x8936 ++ SIOCDRARP = 0x8960 ++ SIOCGARP = 0x8954 ++ SIOCGIFADDR = 0x8915 ++ SIOCGIFBR = 0x8940 ++ SIOCGIFBRDADDR = 0x8919 ++ SIOCGIFCONF = 0x8912 ++ SIOCGIFCOUNT = 0x8938 ++ SIOCGIFDSTADDR = 0x8917 ++ SIOCGIFENCAP = 0x8925 ++ SIOCGIFFLAGS = 0x8913 ++ SIOCGIFHWADDR = 0x8927 ++ SIOCGIFINDEX = 0x8933 ++ SIOCGIFMAP = 0x8970 ++ SIOCGIFMEM = 0x891f ++ SIOCGIFMETRIC = 0x891d ++ SIOCGIFMTU = 0x8921 ++ SIOCGIFNAME = 0x8910 ++ SIOCGIFNETMASK = 0x891b ++ SIOCGIFPFLAGS = 0x8935 ++ SIOCGIFSLAVE = 0x8929 ++ SIOCGIFTXQLEN = 0x8942 ++ SIOCGPGRP = 0x8904 ++ SIOCGRARP = 0x8961 ++ SIOCGSTAMP = 0x8906 ++ SIOCGSTAMPNS = 0x8907 ++ SIOCPROTOPRIVATE = 0x89e0 ++ SIOCRTMSG = 0x890d ++ SIOCSARP = 0x8955 ++ SIOCSIFADDR = 0x8916 ++ SIOCSIFBR = 0x8941 ++ SIOCSIFBRDADDR = 0x891a ++ SIOCSIFDSTADDR = 0x8918 ++ SIOCSIFENCAP = 0x8926 ++ SIOCSIFFLAGS = 0x8914 ++ SIOCSIFHWADDR = 0x8924 ++ SIOCSIFHWBROADCAST = 0x8937 ++ SIOCSIFLINK = 0x8911 ++ SIOCSIFMAP = 0x8971 ++ SIOCSIFMEM = 0x8920 ++ SIOCSIFMETRIC = 0x891e ++ SIOCSIFMTU = 0x8922 ++ SIOCSIFNAME = 0x8923 ++ SIOCSIFNETMASK = 0x891c ++ SIOCSIFPFLAGS = 0x8934 ++ SIOCSIFSLAVE = 0x8930 ++ SIOCSIFTXQLEN = 0x8943 ++ SIOCSPGRP = 0x8902 ++ SIOCSRARP = 0x8962 ++ SOCK_CLOEXEC = 0x80000 ++ SOCK_DCCP = 0x6 ++ SOCK_DGRAM = 0x2 ++ SOCK_NONBLOCK = 0x800 ++ SOCK_PACKET = 0xa ++ SOCK_RAW = 0x3 ++ SOCK_RDM = 0x4 ++ SOCK_SEQPACKET = 0x5 ++ SOCK_STREAM = 0x1 ++ SOL_AAL = 0x109 ++ SOL_ALG = 0x117 ++ SOL_ATM = 0x108 ++ SOL_BLUETOOTH = 0x112 ++ SOL_CAIF = 0x116 ++ SOL_DCCP = 0x10d ++ SOL_DECNET = 0x105 ++ SOL_ICMPV6 = 0x3a ++ SOL_IP = 0x0 ++ SOL_IPV6 = 0x29 ++ SOL_IRDA = 0x10a ++ SOL_IUCV = 0x115 ++ SOL_KCM = 0x119 ++ SOL_LLC = 0x10c ++ SOL_NETBEUI = 0x10b ++ SOL_NETLINK = 0x10e ++ SOL_NFC = 0x118 ++ SOL_PACKET = 0x107 ++ SOL_PNPIPE = 0x113 ++ SOL_PPPOL2TP = 0x111 ++ SOL_RAW = 0xff ++ SOL_RDS = 0x114 ++ SOL_RXRPC = 0x110 ++ SOL_SOCKET = 0x1 ++ SOL_TCP = 0x6 ++ SOL_TIPC = 0x10f ++ SOL_TLS = 0x11a ++ SOL_X25 = 0x106 ++ SOMAXCONN = 0x80 ++ SO_ACCEPTCONN = 0x1e ++ SO_ATTACH_BPF = 0x32 ++ SO_ATTACH_FILTER = 0x1a ++ SO_ATTACH_REUSEPORT_CBPF = 0x33 ++ SO_ATTACH_REUSEPORT_EBPF = 0x34 ++ SO_BINDTODEVICE = 0x19 ++ SO_BPF_EXTENSIONS = 0x30 ++ SO_BROADCAST = 0x6 ++ SO_BSDCOMPAT = 0xe ++ SO_BUSY_POLL = 0x2e ++ SO_CNX_ADVICE = 0x35 ++ SO_COOKIE = 0x39 ++ SO_DEBUG = 0x1 ++ SO_DETACH_BPF = 0x1b ++ SO_DETACH_FILTER = 0x1b ++ SO_DOMAIN = 0x27 ++ SO_DONTROUTE = 0x5 ++ SO_ERROR = 0x4 ++ SO_GET_FILTER = 0x1a ++ SO_INCOMING_CPU = 0x31 ++ SO_INCOMING_NAPI_ID = 0x38 ++ SO_KEEPALIVE = 0x9 ++ SO_LINGER = 0xd ++ SO_LOCK_FILTER = 0x2c ++ SO_MARK = 0x24 ++ SO_MAX_PACING_RATE = 0x2f ++ SO_MEMINFO = 0x37 ++ SO_NOFCS = 0x2b ++ SO_NO_CHECK = 0xb ++ SO_OOBINLINE = 0xa ++ SO_PASSCRED = 0x10 ++ SO_PASSSEC = 0x22 ++ SO_PEEK_OFF = 0x2a ++ SO_PEERCRED = 0x11 ++ SO_PEERGROUPS = 0x3b ++ SO_PEERNAME = 0x1c ++ SO_PEERSEC = 0x1f ++ SO_PRIORITY = 0xc ++ SO_PROTOCOL = 0x26 ++ SO_RCVBUF = 0x8 ++ SO_RCVBUFFORCE = 0x21 ++ SO_RCVLOWAT = 0x12 ++ SO_RCVTIMEO = 0x14 ++ SO_REUSEADDR = 0x2 ++ SO_REUSEPORT = 0xf ++ SO_RXQ_OVFL = 0x28 ++ SO_SECURITY_AUTHENTICATION = 0x16 ++ SO_SECURITY_ENCRYPTION_NETWORK = 0x18 ++ SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 ++ SO_SELECT_ERR_QUEUE = 0x2d ++ SO_SNDBUF = 0x7 ++ SO_SNDBUFFORCE = 0x20 ++ SO_SNDLOWAT = 0x13 ++ SO_SNDTIMEO = 0x15 ++ SO_TIMESTAMP = 0x1d ++ SO_TIMESTAMPING = 0x25 ++ SO_TIMESTAMPNS = 0x23 ++ SO_TXTIME = 0x3d ++ SO_TYPE = 0x3 ++ SO_WIFI_STATUS = 0x29 ++ SO_ZEROCOPY = 0x3c ++ S_BLKSIZE = 0x200 ++ S_IEXEC = 0x40 ++ S_IFBLK = 0x6000 ++ S_IFCHR = 0x2000 ++ S_IFDIR = 0x4000 ++ S_IFIFO = 0x1000 ++ S_IFLNK = 0xa000 ++ S_IFMT = 0xf000 ++ S_IFREG = 0x8000 ++ S_IFSOCK = 0xc000 ++ S_IREAD = 0x100 ++ S_IRGRP = 0x20 ++ S_IROTH = 0x4 ++ S_IRUSR = 0x100 ++ S_IRWXG = 0x38 ++ S_IRWXO = 0x7 ++ S_IRWXU = 0x1c0 ++ S_ISGID = 0x400 ++ S_ISUID = 0x800 ++ S_ISVTX = 0x200 ++ S_IWGRP = 0x10 ++ S_IWOTH = 0x2 ++ S_IWRITE = 0x80 ++ S_IWUSR = 0x80 ++ S_IXGRP = 0x8 ++ S_IXOTH = 0x1 ++ S_IXUSR = 0x40 ++ TCFLSH = 0x540b ++ TCIFLUSH = 0x0 ++ TCIOFLUSH = 0x2 ++ TCOFLUSH = 0x1 ++ TCP_CC_INFO = 0x1a ++ TCP_CONGESTION = 0xd ++ TCP_COOKIE_IN_ALWAYS = 0x1 ++ TCP_COOKIE_MAX = 0x10 ++ TCP_COOKIE_MIN = 0x8 ++ TCP_COOKIE_OUT_NEVER = 0x2 ++ TCP_COOKIE_PAIR_SIZE = 0x20 ++ TCP_COOKIE_TRANSACTIONS = 0xf ++ TCP_CORK = 0x3 ++ TCP_DEFER_ACCEPT = 0x9 ++ TCP_FASTOPEN = 0x17 ++ TCP_FASTOPEN_CONNECT = 0x1e ++ TCP_FASTOPEN_KEY = 0x21 ++ TCP_FASTOPEN_NO_COOKIE = 0x22 ++ TCP_INFO = 0xb ++ TCP_KEEPCNT = 0x6 ++ TCP_KEEPIDLE = 0x4 ++ TCP_KEEPINTVL = 0x5 ++ TCP_LINGER2 = 0x8 ++ TCP_MAXSEG = 0x2 ++ TCP_MAXWIN = 0xffff ++ TCP_MAX_WINSHIFT = 0xe ++ TCP_MD5SIG = 0xe ++ TCP_MD5SIG_EXT = 0x20 ++ TCP_MD5SIG_FLAG_PREFIX = 0x1 ++ TCP_MD5SIG_MAXKEYLEN = 0x50 ++ TCP_MSS = 0x200 ++ TCP_MSS_DEFAULT = 0x218 ++ TCP_MSS_DESIRED = 0x4c4 ++ TCP_NODELAY = 0x1 ++ TCP_NOTSENT_LOWAT = 0x19 ++ TCP_QUEUE_SEQ = 0x15 ++ TCP_QUICKACK = 0xc ++ TCP_REPAIR = 0x13 ++ TCP_REPAIR_OPTIONS = 0x16 ++ TCP_REPAIR_QUEUE = 0x14 ++ TCP_REPAIR_WINDOW = 0x1d ++ TCP_SAVED_SYN = 0x1c ++ TCP_SAVE_SYN = 0x1b ++ TCP_SYNCNT = 0x7 ++ TCP_S_DATA_IN = 0x4 ++ TCP_S_DATA_OUT = 0x8 ++ TCP_THIN_DUPACK = 0x11 ++ TCP_THIN_LINEAR_TIMEOUTS = 0x10 ++ TCP_TIMESTAMP = 0x18 ++ TCP_ULP = 0x1f ++ TCP_USER_TIMEOUT = 0x12 ++ TCP_WINDOW_CLAMP = 0xa ++ TCSAFLUSH = 0x2 ++ TIOCCBRK = 0x5428 ++ TIOCCONS = 0x541d ++ TIOCEXCL = 0x540c ++ TIOCGDEV = 0x80045432 ++ TIOCGETD = 0x5424 ++ TIOCGEXCL = 0x80045440 ++ TIOCGICOUNT = 0x545d ++ TIOCGLCKTRMIOS = 0x5456 ++ TIOCGPGRP = 0x540f ++ TIOCGPKT = 0x80045438 ++ TIOCGPTLCK = 0x80045439 ++ TIOCGPTN = 0x80045430 ++ TIOCGPTPEER = 0x5441 ++ TIOCGRS485 = 0x542e ++ TIOCGSERIAL = 0x541e ++ TIOCGSID = 0x5429 ++ TIOCGSOFTCAR = 0x5419 ++ TIOCGWINSZ = 0x5413 ++ TIOCINQ = 0x541b ++ TIOCLINUX = 0x541c ++ TIOCMBIC = 0x5417 ++ TIOCMBIS = 0x5416 ++ TIOCMGET = 0x5415 ++ TIOCMIWAIT = 0x545c ++ TIOCMSET = 0x5418 ++ TIOCM_CAR = 0x40 ++ TIOCM_CD = 0x40 ++ TIOCM_CTS = 0x20 ++ TIOCM_DSR = 0x100 ++ TIOCM_DTR = 0x2 ++ TIOCM_LE = 0x1 ++ TIOCM_RI = 0x80 ++ TIOCM_RNG = 0x80 ++ TIOCM_RTS = 0x4 ++ TIOCM_SR = 0x10 ++ TIOCM_ST = 0x8 ++ TIOCNOTTY = 0x5422 ++ TIOCNXCL = 0x540d ++ TIOCOUTQ = 0x5411 ++ TIOCPKT = 0x5420 ++ TIOCPKT_DATA = 0x0 ++ TIOCPKT_DOSTOP = 0x20 ++ TIOCPKT_FLUSHREAD = 0x1 ++ TIOCPKT_FLUSHWRITE = 0x2 ++ TIOCPKT_IOCTL = 0x40 ++ TIOCPKT_NOSTOP = 0x10 ++ TIOCPKT_START = 0x8 ++ TIOCPKT_STOP = 0x4 ++ TIOCSBRK = 0x5427 ++ TIOCSCTTY = 0x540e ++ TIOCSERCONFIG = 0x5453 ++ TIOCSERGETLSR = 0x5459 ++ TIOCSERGETMULTI = 0x545a ++ TIOCSERGSTRUCT = 0x5458 ++ TIOCSERGWILD = 0x5454 ++ TIOCSERSETMULTI = 0x545b ++ TIOCSERSWILD = 0x5455 ++ TIOCSER_TEMT = 0x1 ++ TIOCSETD = 0x5423 ++ TIOCSIG = 0x40045436 ++ TIOCSLCKTRMIOS = 0x5457 ++ TIOCSPGRP = 0x5410 ++ TIOCSPTLCK = 0x40045431 ++ TIOCSRS485 = 0x542f ++ TIOCSSERIAL = 0x541f ++ TIOCSSOFTCAR = 0x541a ++ TIOCSTI = 0x5412 ++ TIOCSWINSZ = 0x5414 ++ TIOCVHANGUP = 0x5437 ++ TOSTOP = 0x100 ++ TUNATTACHFILTER = 0x401054d5 ++ TUNDETACHFILTER = 0x401054d6 ++ TUNGETFEATURES = 0x800454cf ++ TUNGETFILTER = 0x801054db ++ TUNGETIFF = 0x800454d2 ++ TUNGETSNDBUF = 0x800454d3 ++ TUNGETVNETBE = 0x800454df ++ TUNGETVNETHDRSZ = 0x800454d7 ++ TUNGETVNETLE = 0x800454dd ++ TUNSETDEBUG = 0x400454c9 ++ TUNSETFILTEREBPF = 0x800454e1 ++ TUNSETGROUP = 0x400454ce ++ TUNSETIFF = 0x400454ca ++ TUNSETIFINDEX = 0x400454da ++ TUNSETLINK = 0x400454cd ++ TUNSETNOCSUM = 0x400454c8 ++ TUNSETOFFLOAD = 0x400454d0 ++ TUNSETOWNER = 0x400454cc ++ TUNSETPERSIST = 0x400454cb ++ TUNSETQUEUE = 0x400454d9 ++ TUNSETSNDBUF = 0x400454d4 ++ TUNSETSTEERINGEBPF = 0x800454e0 ++ TUNSETTXFILTER = 0x400454d1 ++ TUNSETVNETBE = 0x400454de ++ TUNSETVNETHDRSZ = 0x400454d8 ++ TUNSETVNETLE = 0x400454dc ++ VDISCARD = 0xd ++ VEOF = 0x4 ++ VEOL = 0xb ++ VEOL2 = 0x10 ++ VERASE = 0x2 ++ VINTR = 0x0 ++ VKILL = 0x3 ++ VLNEXT = 0xf ++ VMIN = 0x6 ++ VQUIT = 0x1 ++ VREPRINT = 0xc ++ VSTART = 0x8 ++ VSTOP = 0x9 ++ VSUSP = 0xa ++ VSWTC = 0x7 ++ VT0 = 0x0 ++ VT1 = 0x4000 ++ VTDLY = 0x4000 ++ VTIME = 0x5 ++ VWERASE = 0xe ++ WALL = 0x40000000 ++ WCLONE = 0x80000000 ++ WCONTINUED = 0x8 ++ WEXITED = 0x4 ++ WNOHANG = 0x1 ++ WNOTHREAD = 0x20000000 ++ WNOWAIT = 0x1000000 ++ WORDSIZE = 0x40 ++ WSTOPPED = 0x2 ++ WUNTRACED = 0x2 ++) ++ ++// Errors ++const ( ++ E2BIG = Errno(0x7) ++ EACCES = Errno(0xd) ++ EADDRINUSE = Errno(0x62) ++ EADDRNOTAVAIL = Errno(0x63) ++ EADV = Errno(0x44) ++ EAFNOSUPPORT = Errno(0x61) ++ EAGAIN = Errno(0xb) ++ EALREADY = Errno(0x72) ++ EBADE = Errno(0x34) ++ EBADF = Errno(0x9) ++ EBADFD = Errno(0x4d) ++ EBADMSG = Errno(0x4a) ++ EBADR = Errno(0x35) ++ EBADRQC = Errno(0x38) ++ EBADSLT = Errno(0x39) ++ EBFONT = Errno(0x3b) ++ EBUSY = Errno(0x10) ++ ECANCELED = Errno(0x7d) ++ ECHILD = Errno(0xa) ++ ECHRNG = Errno(0x2c) ++ ECOMM = Errno(0x46) ++ ECONNABORTED = Errno(0x67) ++ ECONNREFUSED = Errno(0x6f) ++ ECONNRESET = Errno(0x68) ++ EDEADLK = Errno(0x23) ++ EDEADLOCK = Errno(0x23) ++ EDESTADDRREQ = Errno(0x59) ++ EDOM = Errno(0x21) ++ EDOTDOT = Errno(0x49) ++ EDQUOT = Errno(0x7a) ++ EEXIST = Errno(0x11) ++ EFAULT = Errno(0xe) ++ EFBIG = Errno(0x1b) ++ EHOSTDOWN = Errno(0x70) ++ EHOSTUNREACH = Errno(0x71) ++ EHWPOISON = Errno(0x85) ++ EIDRM = Errno(0x2b) ++ EILSEQ = Errno(0x54) ++ EINPROGRESS = Errno(0x73) ++ EINTR = Errno(0x4) ++ EINVAL = Errno(0x16) ++ EIO = Errno(0x5) ++ EISCONN = Errno(0x6a) ++ EISDIR = Errno(0x15) ++ EISNAM = Errno(0x78) ++ EKEYEXPIRED = Errno(0x7f) ++ EKEYREJECTED = Errno(0x81) ++ EKEYREVOKED = Errno(0x80) ++ EL2HLT = Errno(0x33) ++ EL2NSYNC = Errno(0x2d) ++ EL3HLT = Errno(0x2e) ++ EL3RST = Errno(0x2f) ++ ELIBACC = Errno(0x4f) ++ ELIBBAD = Errno(0x50) ++ ELIBEXEC = Errno(0x53) ++ ELIBMAX = Errno(0x52) ++ ELIBSCN = Errno(0x51) ++ ELNRNG = Errno(0x30) ++ ELOOP = Errno(0x28) ++ EMEDIUMTYPE = Errno(0x7c) ++ EMFILE = Errno(0x18) ++ EMLINK = Errno(0x1f) ++ EMSGSIZE = Errno(0x5a) ++ EMULTIHOP = Errno(0x48) ++ ENAMETOOLONG = Errno(0x24) ++ ENAVAIL = Errno(0x77) ++ ENETDOWN = Errno(0x64) ++ ENETRESET = Errno(0x66) ++ ENETUNREACH = Errno(0x65) ++ ENFILE = Errno(0x17) ++ ENOANO = Errno(0x37) ++ ENOBUFS = Errno(0x69) ++ ENOCSI = Errno(0x32) ++ ENODATA = Errno(0x3d) ++ ENODEV = Errno(0x13) ++ ENOENT = Errno(0x2) ++ ENOEXEC = Errno(0x8) ++ ENOKEY = Errno(0x7e) ++ ENOLCK = Errno(0x25) ++ ENOLINK = Errno(0x43) ++ ENOMEDIUM = Errno(0x7b) ++ ENOMEM = Errno(0xc) ++ ENOMSG = Errno(0x2a) ++ ENONET = Errno(0x40) ++ ENOPKG = Errno(0x41) ++ ENOPROTOOPT = Errno(0x5c) ++ ENOSPC = Errno(0x1c) ++ ENOSR = Errno(0x3f) ++ ENOSTR = Errno(0x3c) ++ ENOSYS = Errno(0x26) ++ ENOTBLK = Errno(0xf) ++ ENOTCONN = Errno(0x6b) ++ ENOTDIR = Errno(0x14) ++ ENOTEMPTY = Errno(0x27) ++ ENOTNAM = Errno(0x76) ++ ENOTRECOVERABLE = Errno(0x83) ++ ENOTSOCK = Errno(0x58) ++ ENOTSUP = Errno(0x5f) ++ ENOTTY = Errno(0x19) ++ ENOTUNIQ = Errno(0x4c) ++ ENXIO = Errno(0x6) ++ EOPNOTSUPP = Errno(0x5f) ++ EOVERFLOW = Errno(0x4b) ++ EOWNERDEAD = Errno(0x82) ++ EPERM = Errno(0x1) ++ EPFNOSUPPORT = Errno(0x60) ++ EPIPE = Errno(0x20) ++ EPROTO = Errno(0x47) ++ EPROTONOSUPPORT = Errno(0x5d) ++ EPROTOTYPE = Errno(0x5b) ++ ERANGE = Errno(0x22) ++ EREMCHG = Errno(0x4e) ++ EREMOTE = Errno(0x42) ++ EREMOTEIO = Errno(0x79) ++ ERESTART = Errno(0x55) ++ ERFKILL = Errno(0x84) ++ EROFS = Errno(0x1e) ++ ESHUTDOWN = Errno(0x6c) ++ ESOCKTNOSUPPORT = Errno(0x5e) ++ ESPIPE = Errno(0x1d) ++ ESRCH = Errno(0x3) ++ ESRMNT = Errno(0x45) ++ ESTALE = Errno(0x74) ++ ESTRPIPE = Errno(0x56) ++ ETIME = Errno(0x3e) ++ ETIMEDOUT = Errno(0x6e) ++ ETOOMANYREFS = Errno(0x6d) ++ ETXTBSY = Errno(0x1a) ++ EUCLEAN = Errno(0x75) ++ EUNATCH = Errno(0x31) ++ EUSERS = Errno(0x57) ++ EWOULDBLOCK = Errno(0xb) ++ EXDEV = Errno(0x12) ++ EXFULL = Errno(0x36) ++) ++ ++// Signals ++const ( ++ SIGABRT = Signal(0x6) ++ SIGALRM = Signal(0xe) ++ SIGBUS = Signal(0x7) ++ SIGCHLD = Signal(0x11) ++ SIGCLD = Signal(0x11) ++ SIGCONT = Signal(0x12) ++ SIGFPE = Signal(0x8) ++ SIGHUP = Signal(0x1) ++ SIGILL = Signal(0x4) ++ SIGINT = Signal(0x2) ++ SIGIO = Signal(0x1d) ++ SIGIOT = Signal(0x6) ++ SIGKILL = Signal(0x9) ++ SIGPIPE = Signal(0xd) ++ SIGPOLL = Signal(0x1d) ++ SIGPROF = Signal(0x1b) ++ SIGPWR = Signal(0x1e) ++ SIGQUIT = Signal(0x3) ++ SIGSEGV = Signal(0xb) ++ SIGSTKFLT = Signal(0x10) ++ SIGSTOP = Signal(0x13) ++ SIGSYS = Signal(0x1f) ++ SIGTERM = Signal(0xf) ++ SIGTRAP = Signal(0x5) ++ SIGTSTP = Signal(0x14) ++ SIGTTIN = Signal(0x15) ++ SIGTTOU = Signal(0x16) ++ SIGURG = Signal(0x17) ++ SIGUSR1 = Signal(0xa) ++ SIGUSR2 = Signal(0xc) ++ SIGVTALRM = Signal(0x1a) ++ SIGWINCH = Signal(0x1c) ++ SIGXCPU = Signal(0x18) ++ SIGXFSZ = Signal(0x19) ++) ++ ++// Error table ++var errors = [...]string{ ++ 1: "operation not permitted", ++ 2: "no such file or directory", ++ 3: "no such process", ++ 4: "interrupted system call", ++ 5: "input/output error", ++ 6: "no such device or address", ++ 7: "argument list too long", ++ 8: "exec format error", ++ 9: "bad file descriptor", ++ 10: "no child processes", ++ 11: "resource temporarily unavailable", ++ 12: "cannot allocate memory", ++ 13: "permission denied", ++ 14: "bad address", ++ 15: "block device required", ++ 16: "device or resource busy", ++ 17: "file exists", ++ 18: "invalid cross-device link", ++ 19: "no such device", ++ 20: "not a directory", ++ 21: "is a directory", ++ 22: "invalid argument", ++ 23: "too many open files in system", ++ 24: "too many open files", ++ 25: "inappropriate ioctl for device", ++ 26: "text file busy", ++ 27: "file too large", ++ 28: "no space left on device", ++ 29: "illegal seek", ++ 30: "read-only file system", ++ 31: "too many links", ++ 32: "broken pipe", ++ 33: "numerical argument out of domain", ++ 34: "numerical result out of range", ++ 35: "resource deadlock avoided", ++ 36: "file name too long", ++ 37: "no locks available", ++ 38: "function not implemented", ++ 39: "directory not empty", ++ 40: "too many levels of symbolic links", ++ 42: "no message of desired type", ++ 43: "identifier removed", ++ 44: "channel number out of range", ++ 45: "level 2 not synchronized", ++ 46: "level 3 halted", ++ 47: "level 3 reset", ++ 48: "link number out of range", ++ 49: "protocol driver not attached", ++ 50: "no CSI structure available", ++ 51: "level 2 halted", ++ 52: "invalid exchange", ++ 53: "invalid request descriptor", ++ 54: "exchange full", ++ 55: "no anode", ++ 56: "invalid request code", ++ 57: "invalid slot", ++ 59: "bad font file format", ++ 60: "device not a stream", ++ 61: "no data available", ++ 62: "timer expired", ++ 63: "out of streams resources", ++ 64: "machine is not on the network", ++ 65: "package not installed", ++ 66: "object is remote", ++ 67: "link has been severed", ++ 68: "advertise error", ++ 69: "srmount error", ++ 70: "communication error on send", ++ 71: "protocol error", ++ 72: "multihop attempted", ++ 73: "RFS specific error", ++ 74: "bad message", ++ 75: "value too large for defined data type", ++ 76: "name not unique on network", ++ 77: "file descriptor in bad state", ++ 78: "remote address changed", ++ 79: "can not access a needed shared library", ++ 80: "accessing a corrupted shared library", ++ 81: ".lib section in a.out corrupted", ++ 82: "attempting to link in too many shared libraries", ++ 83: "cannot exec a shared library directly", ++ 84: "invalid or incomplete multibyte or wide character", ++ 85: "interrupted system call should be restarted", ++ 86: "streams pipe error", ++ 87: "too many users", ++ 88: "socket operation on non-socket", ++ 89: "destination address required", ++ 90: "message too long", ++ 91: "protocol wrong type for socket", ++ 92: "protocol not available", ++ 93: "protocol not supported", ++ 94: "socket type not supported", ++ 95: "operation not supported", ++ 96: "protocol family not supported", ++ 97: "address family not supported by protocol", ++ 98: "address already in use", ++ 99: "cannot assign requested address", ++ 100: "network is down", ++ 101: "network is unreachable", ++ 102: "network dropped connection on reset", ++ 103: "software caused connection abort", ++ 104: "connection reset by peer", ++ 105: "no buffer space available", ++ 106: "transport endpoint is already connected", ++ 107: "transport endpoint is not connected", ++ 108: "cannot send after transport endpoint shutdown", ++ 109: "too many references: cannot splice", ++ 110: "connection timed out", ++ 111: "connection refused", ++ 112: "host is down", ++ 113: "no route to host", ++ 114: "operation already in progress", ++ 115: "operation now in progress", ++ 116: "stale file handle", ++ 117: "structure needs cleaning", ++ 118: "not a XENIX named type file", ++ 119: "no XENIX semaphores available", ++ 120: "is a named type file", ++ 121: "remote I/O error", ++ 122: "disk quota exceeded", ++ 123: "no medium found", ++ 124: "wrong medium type", ++ 125: "operation canceled", ++ 126: "required key not available", ++ 127: "key has expired", ++ 128: "key has been revoked", ++ 129: "key was rejected by service", ++ 130: "owner died", ++ 131: "state not recoverable", ++ 132: "operation not possible due to RF-kill", ++ 133: "memory page has hardware error", ++} ++ ++// Signal table ++var signals = [...]string{ ++ 1: "hangup", ++ 2: "interrupt", ++ 3: "quit", ++ 4: "illegal instruction", ++ 5: "trace/breakpoint trap", ++ 6: "aborted", ++ 7: "bus error", ++ 8: "floating point exception", ++ 9: "killed", ++ 10: "user defined signal 1", ++ 11: "segmentation fault", ++ 12: "user defined signal 2", ++ 13: "broken pipe", ++ 14: "alarm clock", ++ 15: "terminated", ++ 16: "stack fault", ++ 17: "child exited", ++ 18: "continued", ++ 19: "stopped (signal)", ++ 20: "stopped", ++ 21: "stopped (tty input)", ++ 22: "stopped (tty output)", ++ 23: "urgent I/O condition", ++ 24: "CPU time limit exceeded", ++ 25: "file size limit exceeded", ++ 26: "virtual timer expired", ++ 27: "profiling timer expired", ++ 28: "window changed", ++ 29: "I/O possible", ++ 30: "power failure", ++ 31: "bad system call", ++} +diff --git a/src/syscall/zsyscall_linux_loong64.go b/src/syscall/zsyscall_linux_loong64.go +new file mode 100644 +index 0000000..48c58f3 +--- /dev/null ++++ b/src/syscall/zsyscall_linux_loong64.go +@@ -0,0 +1,1564 @@ ++// mksyscall.pl -tags linux,loong64 syscall_linux.go syscall_linux_loong64.go ++// Code generated by the command above; DO NOT EDIT. ++ ++//go:build linux && loong64 ++ ++package syscall ++ ++import "unsafe" ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func faccessat(dirfd int, path string, mode uint32) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func fchmodat(dirfd int, path string, mode uint32) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(oldpath) ++ if err != nil { ++ return ++ } ++ var _p1 *byte ++ _p1, err = BytePtrFromString(newpath) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) ++ fd = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func pipe2(p *[2]_C_int, flags int) (err error) { ++ _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func readlinkat(dirfd int, path string, buf []byte) (n int, err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ var _p1 unsafe.Pointer ++ if len(buf) > 0 { ++ _p1 = unsafe.Pointer(&buf[0]) ++ } else { ++ _p1 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func symlinkat(oldpath string, newdirfd int, newpath string) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(oldpath) ++ if err != nil { ++ return ++ } ++ var _p1 *byte ++ _p1, err = BytePtrFromString(newpath) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func unlinkat(dirfd int, path string, flags int) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flag), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getcwd(buf []byte) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(buf) > 0 { ++ _p0 = unsafe.Pointer(&buf[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { ++ r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) ++ wpid = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { ++ _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(arg) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(source) ++ if err != nil { ++ return ++ } ++ var _p1 *byte ++ _p1, err = BytePtrFromString(target) ++ if err != nil { ++ return ++ } ++ var _p2 *byte ++ _p2, err = BytePtrFromString(fstype) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Acct(path string) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Adjtimex(buf *Timex) (state int, err error) { ++ r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) ++ state = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Chdir(path string) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Chroot(path string) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Close(fd int) (err error) { ++ _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Dup(oldfd int) (fd int, err error) { ++ r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) ++ fd = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Dup3(oldfd int, newfd int, flags int) (err error) { ++ _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func EpollCreate1(flag int) (fd int, err error) { ++ r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) ++ fd = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { ++ _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { ++ _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fchdir(fd int) (err error) { ++ _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fchmod(fd int, mode uint32) (err error) { ++ _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func fcntl(fd int, cmd int, arg int) (val int, err error) { ++ r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) ++ val = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fdatasync(fd int) (err error) { ++ _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Flock(fd int, how int) (err error) { ++ _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fsync(fd int) (err error) { ++ _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getdents(fd int, buf []byte) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(buf) > 0 { ++ _p0 = unsafe.Pointer(&buf[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getpgid(pid int) (pgid int, err error) { ++ r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) ++ pgid = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getpid() (pid int) { ++ r0, _ := rawSyscallNoError(SYS_GETPID, 0, 0, 0) ++ pid = int(r0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getppid() (ppid int) { ++ r0, _ := rawSyscallNoError(SYS_GETPPID, 0, 0, 0) ++ ppid = int(r0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getpriority(which int, who int) (prio int, err error) { ++ r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) ++ prio = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getrusage(who int, rusage *Rusage) (err error) { ++ _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Gettid() (tid int) { ++ r0, _ := rawSyscallNoError(SYS_GETTID, 0, 0, 0) ++ tid = int(r0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getxattr(path string, attr string, dest []byte) (sz int, err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ var _p1 *byte ++ _p1, err = BytePtrFromString(attr) ++ if err != nil { ++ return ++ } ++ var _p2 unsafe.Pointer ++ if len(dest) > 0 { ++ _p2 = unsafe.Pointer(&dest[0]) ++ } else { ++ _p2 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) ++ sz = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(pathname) ++ if err != nil { ++ return ++ } ++ r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) ++ watchdesc = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func InotifyInit1(flags int) (fd int, err error) { ++ r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) ++ fd = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { ++ r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) ++ success = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Kill(pid int, sig Signal) (err error) { ++ _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Klogctl(typ int, buf []byte) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(buf) > 0 { ++ _p0 = unsafe.Pointer(&buf[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Listxattr(path string, dest []byte) (sz int, err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ var _p1 unsafe.Pointer ++ if len(dest) > 0 { ++ _p1 = unsafe.Pointer(&dest[0]) ++ } else { ++ _p1 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) ++ sz = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Mkdirat(dirfd int, path string, mode uint32) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Nanosleep(time *Timespec, leftover *Timespec) (err error) { ++ _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func PivotRoot(newroot string, putold string) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(newroot) ++ if err != nil { ++ return ++ } ++ var _p1 *byte ++ _p1, err = BytePtrFromString(putold) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { ++ _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func read(fd int, p []byte) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(p) > 0 { ++ _p0 = unsafe.Pointer(&p[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Removexattr(path string, attr string) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ var _p1 *byte ++ _p1, err = BytePtrFromString(attr) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Setdomainname(p []byte) (err error) { ++ var _p0 unsafe.Pointer ++ if len(p) > 0 { ++ _p0 = unsafe.Pointer(&p[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Sethostname(p []byte) (err error) { ++ var _p0 unsafe.Pointer ++ if len(p) > 0 { ++ _p0 = unsafe.Pointer(&p[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Setpgid(pid int, pgid int) (err error) { ++ _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Setsid() (pid int, err error) { ++ r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) ++ pid = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Settimeofday(tv *Timeval) (err error) { ++ _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Setpriority(which int, who int, prio int) (err error) { ++ _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Setxattr(path string, attr string, data []byte, flags int) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ var _p1 *byte ++ _p1, err = BytePtrFromString(attr) ++ if err != nil { ++ return ++ } ++ var _p2 unsafe.Pointer ++ if len(data) > 0 { ++ _p2 = unsafe.Pointer(&data[0]) ++ } else { ++ _p2 = unsafe.Pointer(&_zero) ++ } ++ _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Sync() { ++ Syscall(SYS_SYNC, 0, 0, 0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Sysinfo(info *Sysinfo_t) (err error) { ++ _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { ++ r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) ++ n = int64(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Tgkill(tgid int, tid int, sig Signal) (err error) { ++ _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Times(tms *Tms) (ticks uintptr, err error) { ++ r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) ++ ticks = uintptr(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Umask(mask int) (oldmask int) { ++ r0, _ := rawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) ++ oldmask = int(r0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Uname(buf *Utsname) (err error) { ++ _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Unmount(target string, flags int) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(target) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Unshare(flags int) (err error) { ++ _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func write(fd int, p []byte) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(p) > 0 { ++ _p0 = unsafe.Pointer(&p[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func exitThread(code int) (err error) { ++ _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func readlen(fd int, p *byte, np int) (n int, err error) { ++ r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func writelen(fd int, p *byte, np int) (n int, err error) { ++ r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func munmap(addr uintptr, length uintptr) (err error) { ++ _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Madvise(b []byte, advice int) (err error) { ++ var _p0 unsafe.Pointer ++ if len(b) > 0 { ++ _p0 = unsafe.Pointer(&b[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Mprotect(b []byte, prot int) (err error) { ++ var _p0 unsafe.Pointer ++ if len(b) > 0 { ++ _p0 = unsafe.Pointer(&b[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Mlock(b []byte) (err error) { ++ var _p0 unsafe.Pointer ++ if len(b) > 0 { ++ _p0 = unsafe.Pointer(&b[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Munlock(b []byte) (err error) { ++ var _p0 unsafe.Pointer ++ if len(b) > 0 { ++ _p0 = unsafe.Pointer(&b[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Mlockall(flags int) (err error) { ++ _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Munlockall() (err error) { ++ _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(events) > 0 { ++ _p0 = unsafe.Pointer(&events[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall6(SYS_EPOLL_PWAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fchown(fd int, uid int, gid int) (err error) { ++ _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fstat(fd int, stat *Stat_t) (err error) { ++ _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Fstatfs(fd int, buf *Statfs_t) (err error) { ++ _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Ftruncate(fd int, length int64) (err error) { ++ _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getegid() (egid int) { ++ r0, _ := rawSyscallNoError(SYS_GETEGID, 0, 0, 0) ++ egid = int(r0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Geteuid() (euid int) { ++ r0, _ := rawSyscallNoError(SYS_GETEUID, 0, 0, 0) ++ euid = int(r0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getgid() (gid int) { ++ r0, _ := rawSyscallNoError(SYS_GETGID, 0, 0, 0) ++ gid = int(r0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Getuid() (uid int) { ++ r0, _ := rawSyscallNoError(SYS_GETUID, 0, 0, 0) ++ uid = int(r0) ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Listen(s int, n int) (err error) { ++ _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Pread(fd int, p []byte, offset int64) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(p) > 0 { ++ _p0 = unsafe.Pointer(&p[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Pwrite(fd int, p []byte, offset int64) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(p) > 0 { ++ _p0 = unsafe.Pointer(&p[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(oldpath) ++ if err != nil { ++ return ++ } ++ var _p1 *byte ++ _p1, err = BytePtrFromString(newpath) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Seek(fd int, offset int64, whence int) (off int64, err error) { ++ r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) ++ off = int64(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { ++ r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) ++ written = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Setfsgid(gid int) (err error) { ++ _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Setfsuid(uid int) (err error) { ++ _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Shutdown(fd int, how int) (err error) { ++ _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { ++ r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) ++ n = int64(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Statfs(path string, buf *Statfs_t) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { ++ _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Truncate(path string, length int64) (err error) { ++ var _p0 *byte ++ _p0, err = BytePtrFromString(path) ++ if err != nil { ++ return ++ } ++ _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { ++ r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) ++ fd = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { ++ r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) ++ fd = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { ++ _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { ++ _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func getgroups(n int, list *_Gid_t) (nn int, err error) { ++ r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) ++ nn = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func setgroups(n int, list *_Gid_t) (err error) { ++ _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { ++ _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { ++ _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func socket(domain int, typ int, proto int) (fd int, err error) { ++ r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) ++ fd = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { ++ _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { ++ _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { ++ _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { ++ var _p0 unsafe.Pointer ++ if len(p) > 0 { ++ _p0 = unsafe.Pointer(&p[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { ++ var _p0 unsafe.Pointer ++ if len(buf) > 0 { ++ _p0 = unsafe.Pointer(&buf[0]) ++ } else { ++ _p0 = unsafe.Pointer(&_zero) ++ } ++ _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { ++ r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { ++ r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { ++ r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) ++ xaddr = uintptr(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *sigset_t) (n int, err error) { ++ r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func Gettimeofday(tv *Timeval) (err error) { ++ _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} ++ ++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT ++ ++func ppoll(fds *pollFd, nfds int, timeout *Timespec, sigmask *sigset_t) (n int, err error) { ++ r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) ++ n = int(r0) ++ if e1 != 0 { ++ err = errnoErr(e1) ++ } ++ return ++} +diff --git a/src/syscall/zsysnum_linux_loong64.go b/src/syscall/zsysnum_linux_loong64.go +new file mode 100644 +index 0000000..0821777 +--- /dev/null ++++ b/src/syscall/zsysnum_linux_loong64.go +@@ -0,0 +1,308 @@ ++// mksysnum_linux.pl /usr/include/asm-generic/unistd.h ++// Code generated by the command above; DO NOT EDIT. ++ ++package syscall ++ ++const ( ++ SYS_IO_SETUP = 0 ++ SYS_IO_DESTROY = 1 ++ SYS_IO_SUBMIT = 2 ++ SYS_IO_CANCEL = 3 ++ SYS_IO_GETEVENTS = 4 ++ SYS_SETXATTR = 5 ++ SYS_LSETXATTR = 6 ++ SYS_FSETXATTR = 7 ++ SYS_GETXATTR = 8 ++ SYS_LGETXATTR = 9 ++ SYS_FGETXATTR = 10 ++ SYS_LISTXATTR = 11 ++ SYS_LLISTXATTR = 12 ++ SYS_FLISTXATTR = 13 ++ SYS_REMOVEXATTR = 14 ++ SYS_LREMOVEXATTR = 15 ++ SYS_FREMOVEXATTR = 16 ++ SYS_GETCWD = 17 ++ SYS_LOOKUP_DCOOKIE = 18 ++ SYS_EVENTFD2 = 19 ++ SYS_EPOLL_CREATE1 = 20 ++ SYS_EPOLL_CTL = 21 ++ SYS_EPOLL_PWAIT = 22 ++ SYS_DUP = 23 ++ SYS_DUP3 = 24 ++ SYS_FCNTL = 25 ++ SYS_INOTIFY_INIT1 = 26 ++ SYS_INOTIFY_ADD_WATCH = 27 ++ SYS_INOTIFY_RM_WATCH = 28 ++ SYS_IOCTL = 29 ++ SYS_IOPRIO_SET = 30 ++ SYS_IOPRIO_GET = 31 ++ SYS_FLOCK = 32 ++ SYS_MKNODAT = 33 ++ SYS_MKDIRAT = 34 ++ SYS_UNLINKAT = 35 ++ SYS_SYMLINKAT = 36 ++ SYS_LINKAT = 37 ++ SYS_UMOUNT2 = 39 ++ SYS_MOUNT = 40 ++ SYS_PIVOT_ROOT = 41 ++ SYS_NFSSERVCTL = 42 ++ SYS_STATFS = 43 ++ SYS_FSTATFS = 44 ++ SYS_TRUNCATE = 45 ++ SYS_FTRUNCATE = 46 ++ SYS_FALLOCATE = 47 ++ SYS_FACCESSAT = 48 ++ SYS_CHDIR = 49 ++ SYS_FCHDIR = 50 ++ SYS_CHROOT = 51 ++ SYS_FCHMOD = 52 ++ SYS_FCHMODAT = 53 ++ SYS_FCHOWNAT = 54 ++ SYS_FCHOWN = 55 ++ SYS_OPENAT = 56 ++ SYS_CLOSE = 57 ++ SYS_VHANGUP = 58 ++ SYS_PIPE2 = 59 ++ SYS_QUOTACTL = 60 ++ SYS_GETDENTS64 = 61 ++ SYS_LSEEK = 62 ++ SYS_READ = 63 ++ SYS_WRITE = 64 ++ SYS_READV = 65 ++ SYS_WRITEV = 66 ++ SYS_PREAD64 = 67 ++ SYS_PWRITE64 = 68 ++ SYS_PREADV = 69 ++ SYS_PWRITEV = 70 ++ SYS_SENDFILE = 71 ++ SYS_PSELECT6 = 72 ++ SYS_PPOLL = 73 ++ SYS_SIGNALFD4 = 74 ++ SYS_VMSPLICE = 75 ++ SYS_SPLICE = 76 ++ SYS_TEE = 77 ++ SYS_READLINKAT = 78 ++ SYS_FSTATAT = 79 ++ SYS_FSTAT = 80 ++ SYS_SYNC = 81 ++ SYS_FSYNC = 82 ++ SYS_FDATASYNC = 83 ++ SYS_SYNC_FILE_RANGE = 84 ++ SYS_TIMERFD_CREATE = 85 ++ SYS_TIMERFD_SETTIME = 86 ++ SYS_TIMERFD_GETTIME = 87 ++ SYS_UTIMENSAT = 88 ++ SYS_ACCT = 89 ++ SYS_CAPGET = 90 ++ SYS_CAPSET = 91 ++ SYS_PERSONALITY = 92 ++ SYS_EXIT = 93 ++ SYS_EXIT_GROUP = 94 ++ SYS_WAITID = 95 ++ SYS_SET_TID_ADDRESS = 96 ++ SYS_UNSHARE = 97 ++ SYS_FUTEX = 98 ++ SYS_SET_ROBUST_LIST = 99 ++ SYS_GET_ROBUST_LIST = 100 ++ SYS_NANOSLEEP = 101 ++ SYS_GETITIMER = 102 ++ SYS_SETITIMER = 103 ++ SYS_KEXEC_LOAD = 104 ++ SYS_INIT_MODULE = 105 ++ SYS_DELETE_MODULE = 106 ++ SYS_TIMER_CREATE = 107 ++ SYS_TIMER_GETTIME = 108 ++ SYS_TIMER_GETOVERRUN = 109 ++ SYS_TIMER_SETTIME = 110 ++ SYS_TIMER_DELETE = 111 ++ SYS_CLOCK_SETTIME = 112 ++ SYS_CLOCK_GETTIME = 113 ++ SYS_CLOCK_GETRES = 114 ++ SYS_CLOCK_NANOSLEEP = 115 ++ SYS_SYSLOG = 116 ++ SYS_PTRACE = 117 ++ SYS_SCHED_SETPARAM = 118 ++ SYS_SCHED_SETSCHEDULER = 119 ++ SYS_SCHED_GETSCHEDULER = 120 ++ SYS_SCHED_GETPARAM = 121 ++ SYS_SCHED_SETAFFINITY = 122 ++ SYS_SCHED_GETAFFINITY = 123 ++ SYS_SCHED_YIELD = 124 ++ SYS_SCHED_GET_PRIORITY_MAX = 125 ++ SYS_SCHED_GET_PRIORITY_MIN = 126 ++ SYS_SCHED_RR_GET_INTERVAL = 127 ++ SYS_RESTART_SYSCALL = 128 ++ SYS_KILL = 129 ++ SYS_TKILL = 130 ++ SYS_TGKILL = 131 ++ SYS_SIGALTSTACK = 132 ++ SYS_RT_SIGSUSPEND = 133 ++ SYS_RT_SIGACTION = 134 ++ SYS_RT_SIGPROCMASK = 135 ++ SYS_RT_SIGPENDING = 136 ++ SYS_RT_SIGTIMEDWAIT = 137 ++ SYS_RT_SIGQUEUEINFO = 138 ++ SYS_RT_SIGRETURN = 139 ++ SYS_SETPRIORITY = 140 ++ SYS_GETPRIORITY = 141 ++ SYS_REBOOT = 142 ++ SYS_SETREGID = 143 ++ SYS_SETGID = 144 ++ SYS_SETREUID = 145 ++ SYS_SETUID = 146 ++ SYS_SETRESUID = 147 ++ SYS_GETRESUID = 148 ++ SYS_SETRESGID = 149 ++ SYS_GETRESGID = 150 ++ SYS_SETFSUID = 151 ++ SYS_SETFSGID = 152 ++ SYS_TIMES = 153 ++ SYS_SETPGID = 154 ++ SYS_GETPGID = 155 ++ SYS_GETSID = 156 ++ SYS_SETSID = 157 ++ SYS_GETGROUPS = 158 ++ SYS_SETGROUPS = 159 ++ SYS_UNAME = 160 ++ SYS_SETHOSTNAME = 161 ++ SYS_SETDOMAINNAME = 162 ++ SYS_GETRUSAGE = 165 ++ SYS_UMASK = 166 ++ SYS_PRCTL = 167 ++ SYS_GETCPU = 168 ++ SYS_GETTIMEOFDAY = 169 ++ SYS_SETTIMEOFDAY = 170 ++ SYS_ADJTIMEX = 171 ++ SYS_GETPID = 172 ++ SYS_GETPPID = 173 ++ SYS_GETUID = 174 ++ SYS_GETEUID = 175 ++ SYS_GETGID = 176 ++ SYS_GETEGID = 177 ++ SYS_GETTID = 178 ++ SYS_SYSINFO = 179 ++ SYS_MQ_OPEN = 180 ++ SYS_MQ_UNLINK = 181 ++ SYS_MQ_TIMEDSEND = 182 ++ SYS_MQ_TIMEDRECEIVE = 183 ++ SYS_MQ_NOTIFY = 184 ++ SYS_MQ_GETSETATTR = 185 ++ SYS_MSGGET = 186 ++ SYS_MSGCTL = 187 ++ SYS_MSGRCV = 188 ++ SYS_MSGSND = 189 ++ SYS_SEMGET = 190 ++ SYS_SEMCTL = 191 ++ SYS_SEMTIMEDOP = 192 ++ SYS_SEMOP = 193 ++ SYS_SHMGET = 194 ++ SYS_SHMCTL = 195 ++ SYS_SHMAT = 196 ++ SYS_SHMDT = 197 ++ SYS_SOCKET = 198 ++ SYS_SOCKETPAIR = 199 ++ SYS_BIND = 200 ++ SYS_LISTEN = 201 ++ SYS_ACCEPT = 202 ++ SYS_CONNECT = 203 ++ SYS_GETSOCKNAME = 204 ++ SYS_GETPEERNAME = 205 ++ SYS_SENDTO = 206 ++ SYS_RECVFROM = 207 ++ SYS_SETSOCKOPT = 208 ++ SYS_GETSOCKOPT = 209 ++ SYS_SHUTDOWN = 210 ++ SYS_SENDMSG = 211 ++ SYS_RECVMSG = 212 ++ SYS_READAHEAD = 213 ++ SYS_BRK = 214 ++ SYS_MUNMAP = 215 ++ SYS_MREMAP = 216 ++ SYS_ADD_KEY = 217 ++ SYS_REQUEST_KEY = 218 ++ SYS_KEYCTL = 219 ++ SYS_CLONE = 220 ++ SYS_EXECVE = 221 ++ SYS_MMAP = 222 ++ SYS_FADVISE64 = 223 ++ SYS_SWAPON = 224 ++ SYS_SWAPOFF = 225 ++ SYS_MPROTECT = 226 ++ SYS_MSYNC = 227 ++ SYS_MLOCK = 228 ++ SYS_MUNLOCK = 229 ++ SYS_MLOCKALL = 230 ++ SYS_MUNLOCKALL = 231 ++ SYS_MINCORE = 232 ++ SYS_MADVISE = 233 ++ SYS_REMAP_FILE_PAGES = 234 ++ SYS_MBIND = 235 ++ SYS_GET_MEMPOLICY = 236 ++ SYS_SET_MEMPOLICY = 237 ++ SYS_MIGRATE_PAGES = 238 ++ SYS_MOVE_PAGES = 239 ++ SYS_RT_TGSIGQUEUEINFO = 240 ++ SYS_PERF_EVENT_OPEN = 241 ++ SYS_ACCEPT4 = 242 ++ SYS_RECVMMSG = 243 ++ SYS_ARCH_SPECIFIC_SYSCALL = 244 ++ SYS_WAIT4 = 260 ++ SYS_PRLIMIT64 = 261 ++ SYS_FANOTIFY_INIT = 262 ++ SYS_FANOTIFY_MARK = 263 ++ SYS_NAME_TO_HANDLE_AT = 264 ++ SYS_OPEN_BY_HANDLE_AT = 265 ++ SYS_CLOCK_ADJTIME = 266 ++ SYS_SYNCFS = 267 ++ SYS_SETNS = 268 ++ SYS_SENDMMSG = 269 ++ SYS_PROCESS_VM_READV = 270 ++ SYS_PROCESS_VM_WRITEV = 271 ++ SYS_KCMP = 272 ++ SYS_FINIT_MODULE = 273 ++ SYS_SCHED_SETATTR = 274 ++ SYS_SCHED_GETATTR = 275 ++ SYS_RENAMEAT2 = 276 ++ SYS_SECCOMP = 277 ++ SYS_GETRANDOM = 278 ++ SYS_MEMFD_CREATE = 279 ++ SYS_BPF = 280 ++ SYS_EXECVEAT = 281 ++ SYS_USERFAULTFD = 282 ++ SYS_MEMBARRIER = 283 ++ SYS_MLOCK2 = 284 ++ SYS_COPY_FILE_RANGE = 285 ++ SYS_PREADV2 = 286 ++ SYS_PWRITEV2 = 287 ++ SYS_PKEY_MPROTECT = 288 ++ SYS_PKEY_ALLOC = 289 ++ SYS_PKEY_FREE = 290 ++ SYS_STATX = 291 ++ SYS_IO_PGETEVENTS = 292 ++ SYS_RSEQ = 293 ++ SYS_KEXEC_FILE_LOAD = 294 ++ SYS_PIDFD_SEND_SIGNAL = 424 ++ SYS_IO_URING_SETUP = 425 ++ SYS_IO_URING_ENTER = 426 ++ SYS_IO_URING_REGISTER = 427 ++ SYS_OPEN_TREE = 428 ++ SYS_MOVE_MOUNT = 429 ++ SYS_FSOPEN = 430 ++ SYS_FSCONFIG = 431 ++ SYS_FSMOUNT = 432 ++ SYS_FSPICK = 433 ++ SYS_PIDFD_OPEN = 434 ++ SYS_CLOSE_RANGE = 436 ++ SYS_OPENAT2 = 437 ++ SYS_PIDFD_GETFD = 438 ++ SYS_FACCESSAT2 = 439 ++ SYS_PROCESS_MADVISE = 440 ++ SYS_EPOLL_PWAIT2 = 441 ++ SYS_MOUNT_SETATTR = 442 ++ SYS_QUOTACTL_FD = 443 ++ SYS_LANDLOCK_CREATE_RULESET = 444 ++ SYS_LANDLOCK_ADD_RULE = 445 ++ SYS_LANDLOCK_RESTRICT_SELF = 446 ++ SYS_PROCESS_MRELEASE = 448 ++ SYS_FUTEX_WAITV = 449 ++) +diff --git a/src/syscall/ztypes_linux_loong64.go b/src/syscall/ztypes_linux_loong64.go +new file mode 100644 +index 0000000..ab2d721 +--- /dev/null ++++ b/src/syscall/ztypes_linux_loong64.go +@@ -0,0 +1,599 @@ ++// Code generated by cmd/cgo -godefs; DO NOT EDIT. ++// cgo -godefs -- types_linux.go ++ ++package syscall ++ ++const ( ++ sizeofPtr = 0x8 ++ sizeofShort = 0x2 ++ sizeofInt = 0x4 ++ sizeofLong = 0x8 ++ sizeofLongLong = 0x8 ++ PathMax = 0x1000 ++) ++ ++type ( ++ _C_short int16 ++ _C_int int32 ++ _C_long int64 ++ _C_long_long int64 ++) ++ ++type Timespec struct { ++ Sec int64 ++ Nsec int64 ++} ++ ++type Timeval struct { ++ Sec int64 ++ Usec int64 ++} ++ ++type Timex struct { ++ Modes uint32 ++ Offset int64 ++ Freq int64 ++ Maxerror int64 ++ Esterror int64 ++ Status int32 ++ Constant int64 ++ Precision int64 ++ Tolerance int64 ++ Time Timeval ++ Tick int64 ++ Ppsfreq int64 ++ Jitter int64 ++ Shift int32 ++ Stabil int64 ++ Jitcnt int64 ++ Calcnt int64 ++ Errcnt int64 ++ Stbcnt int64 ++ Tai int32 ++ Pad_cgo_0 [44]byte ++} ++ ++type Time_t int64 ++ ++type Tms struct { ++ Utime int64 ++ Stime int64 ++ Cutime int64 ++ Cstime int64 ++} ++ ++type Utimbuf struct { ++ Actime int64 ++ Modtime int64 ++} ++ ++type Rusage struct { ++ Utime Timeval ++ Stime Timeval ++ Maxrss int64 ++ Ixrss int64 ++ Idrss int64 ++ Isrss int64 ++ Minflt int64 ++ Majflt int64 ++ Nswap int64 ++ Inblock int64 ++ Oublock int64 ++ Msgsnd int64 ++ Msgrcv int64 ++ Nsignals int64 ++ Nvcsw int64 ++ Nivcsw int64 ++} ++ ++type Rlimit struct { ++ Cur uint64 ++ Max uint64 ++} ++ ++type _Gid_t uint32 ++ ++type Stat_t struct { ++ Dev uint64 ++ Ino uint64 ++ Mode uint32 ++ Nlink uint32 ++ Uid uint32 ++ Gid uint32 ++ Rdev uint64 ++ X__pad1 uint64 ++ Size int64 ++ Blksize int32 ++ X__pad2 int32 ++ Blocks int64 ++ Atim Timespec ++ Mtim Timespec ++ Ctim Timespec ++ X__glibc_reserved [2]int32 ++} ++ ++type Statfs_t struct { ++ Type int64 ++ Bsize int64 ++ Blocks uint64 ++ Bfree uint64 ++ Bavail uint64 ++ Files uint64 ++ Ffree uint64 ++ Fsid Fsid ++ Namelen int64 ++ Frsize int64 ++ Flags int64 ++ Spare [4]int64 ++} ++ ++type Dirent struct { ++ Ino uint64 ++ Off int64 ++ Reclen uint16 ++ Type uint8 ++ Name [256]int8 ++ Pad_cgo_0 [5]byte ++} ++ ++type Fsid struct { ++ X__val [2]int32 ++} ++ ++type Flock_t struct { ++ Type int16 ++ Whence int16 ++ Start int64 ++ Len int64 ++ Pid int32 ++ Pad_cgo_0 [4]byte ++} ++ ++type RawSockaddrInet4 struct { ++ Family uint16 ++ Port uint16 ++ Addr [4]byte /* in_addr */ ++ Zero [8]uint8 ++} ++ ++type RawSockaddrInet6 struct { ++ Family uint16 ++ Port uint16 ++ Flowinfo uint32 ++ Addr [16]byte /* in6_addr */ ++ Scope_id uint32 ++} ++ ++type RawSockaddrUnix struct { ++ Family uint16 ++ Path [108]int8 ++} ++ ++type RawSockaddrLinklayer struct { ++ Family uint16 ++ Protocol uint16 ++ Ifindex int32 ++ Hatype uint16 ++ Pkttype uint8 ++ Halen uint8 ++ Addr [8]uint8 ++} ++ ++type RawSockaddrNetlink struct { ++ Family uint16 ++ Pad uint16 ++ Pid uint32 ++ Groups uint32 ++} ++ ++type RawSockaddr struct { ++ Family uint16 ++ Data [14]int8 ++} ++ ++type RawSockaddrAny struct { ++ Addr RawSockaddr ++ Pad [96]int8 ++} ++ ++type _Socklen uint32 ++ ++type Linger struct { ++ Onoff int32 ++ Linger int32 ++} ++ ++type Iovec struct { ++ Base *byte ++ Len uint64 ++} ++ ++type IPMreq struct { ++ Multiaddr [4]byte /* in_addr */ ++ Interface [4]byte /* in_addr */ ++} ++ ++type IPMreqn struct { ++ Multiaddr [4]byte /* in_addr */ ++ Address [4]byte /* in_addr */ ++ Ifindex int32 ++} ++ ++type IPv6Mreq struct { ++ Multiaddr [16]byte /* in6_addr */ ++ Interface uint32 ++} ++ ++type Msghdr struct { ++ Name *byte ++ Namelen uint32 ++ Iov *Iovec ++ Iovlen uint64 ++ Control *byte ++ Controllen uint64 ++ Flags int32 ++ Pad_cgo_0 [4]byte ++} ++ ++type Cmsghdr struct { ++ Len uint64 ++ Level int32 ++ Type int32 ++} ++ ++type Inet4Pktinfo struct { ++ Ifindex int32 ++ Spec_dst [4]byte /* in_addr */ ++ Addr [4]byte /* in_addr */ ++} ++ ++type Inet6Pktinfo struct { ++ Addr [16]byte /* in6_addr */ ++ Ifindex uint32 ++} ++ ++type IPv6MTUInfo struct { ++ Addr RawSockaddrInet6 ++ Mtu uint32 ++} ++ ++type ICMPv6Filter struct { ++ Data [8]uint32 ++} ++ ++type Ucred struct { ++ Pid int32 ++ Uid uint32 ++ Gid uint32 ++} ++ ++type TCPInfo struct { ++ State uint8 ++ Ca_state uint8 ++ Retransmits uint8 ++ Probes uint8 ++ Backoff uint8 ++ Options uint8 ++ Rto uint32 ++ Ato uint32 ++ Snd_mss uint32 ++ Rcv_mss uint32 ++ Unacked uint32 ++ Sacked uint32 ++ Lost uint32 ++ Retrans uint32 ++ Fackets uint32 ++ Last_data_sent uint32 ++ Last_ack_sent uint32 ++ Last_data_recv uint32 ++ Last_ack_recv uint32 ++ Pmtu uint32 ++ Rcv_ssthresh uint32 ++ Rtt uint32 ++ Rttvar uint32 ++ Snd_ssthresh uint32 ++ Snd_cwnd uint32 ++ Advmss uint32 ++ Reordering uint32 ++ Rcv_rtt uint32 ++ Rcv_space uint32 ++ Total_retrans uint32 ++} ++ ++const ( ++ SizeofSockaddrInet4 = 0x10 ++ SizeofSockaddrInet6 = 0x1c ++ SizeofSockaddrAny = 0x70 ++ SizeofSockaddrUnix = 0x6e ++ SizeofSockaddrLinklayer = 0x14 ++ SizeofSockaddrNetlink = 0xc ++ SizeofLinger = 0x8 ++ SizeofIPMreq = 0x8 ++ SizeofIPMreqn = 0xc ++ SizeofIPv6Mreq = 0x14 ++ SizeofMsghdr = 0x38 ++ SizeofCmsghdr = 0x10 ++ SizeofInet4Pktinfo = 0xc ++ SizeofInet6Pktinfo = 0x14 ++ SizeofIPv6MTUInfo = 0x20 ++ SizeofICMPv6Filter = 0x20 ++ SizeofUcred = 0xc ++ SizeofTCPInfo = 0x68 ++) ++ ++const ( ++ IFA_UNSPEC = 0x0 ++ IFA_ADDRESS = 0x1 ++ IFA_LOCAL = 0x2 ++ IFA_LABEL = 0x3 ++ IFA_BROADCAST = 0x4 ++ IFA_ANYCAST = 0x5 ++ IFA_CACHEINFO = 0x6 ++ IFA_MULTICAST = 0x7 ++ IFLA_UNSPEC = 0x0 ++ IFLA_ADDRESS = 0x1 ++ IFLA_BROADCAST = 0x2 ++ IFLA_IFNAME = 0x3 ++ IFLA_MTU = 0x4 ++ IFLA_LINK = 0x5 ++ IFLA_QDISC = 0x6 ++ IFLA_STATS = 0x7 ++ IFLA_COST = 0x8 ++ IFLA_PRIORITY = 0x9 ++ IFLA_MASTER = 0xa ++ IFLA_WIRELESS = 0xb ++ IFLA_PROTINFO = 0xc ++ IFLA_TXQLEN = 0xd ++ IFLA_MAP = 0xe ++ IFLA_WEIGHT = 0xf ++ IFLA_OPERSTATE = 0x10 ++ IFLA_LINKMODE = 0x11 ++ IFLA_LINKINFO = 0x12 ++ IFLA_NET_NS_PID = 0x13 ++ IFLA_IFALIAS = 0x14 ++ IFLA_MAX = 0x39 ++ RT_SCOPE_UNIVERSE = 0x0 ++ RT_SCOPE_SITE = 0xc8 ++ RT_SCOPE_LINK = 0xfd ++ RT_SCOPE_HOST = 0xfe ++ RT_SCOPE_NOWHERE = 0xff ++ RT_TABLE_UNSPEC = 0x0 ++ RT_TABLE_COMPAT = 0xfc ++ RT_TABLE_DEFAULT = 0xfd ++ RT_TABLE_MAIN = 0xfe ++ RT_TABLE_LOCAL = 0xff ++ RT_TABLE_MAX = 0xffffffff ++ RTA_UNSPEC = 0x0 ++ RTA_DST = 0x1 ++ RTA_SRC = 0x2 ++ RTA_IIF = 0x3 ++ RTA_OIF = 0x4 ++ RTA_GATEWAY = 0x5 ++ RTA_PRIORITY = 0x6 ++ RTA_PREFSRC = 0x7 ++ RTA_METRICS = 0x8 ++ RTA_MULTIPATH = 0x9 ++ RTA_FLOW = 0xb ++ RTA_CACHEINFO = 0xc ++ RTA_TABLE = 0xf ++ RTN_UNSPEC = 0x0 ++ RTN_UNICAST = 0x1 ++ RTN_LOCAL = 0x2 ++ RTN_BROADCAST = 0x3 ++ RTN_ANYCAST = 0x4 ++ RTN_MULTICAST = 0x5 ++ RTN_BLACKHOLE = 0x6 ++ RTN_UNREACHABLE = 0x7 ++ RTN_PROHIBIT = 0x8 ++ RTN_THROW = 0x9 ++ RTN_NAT = 0xa ++ RTN_XRESOLVE = 0xb ++ RTNLGRP_NONE = 0x0 ++ RTNLGRP_LINK = 0x1 ++ RTNLGRP_NOTIFY = 0x2 ++ RTNLGRP_NEIGH = 0x3 ++ RTNLGRP_TC = 0x4 ++ RTNLGRP_IPV4_IFADDR = 0x5 ++ RTNLGRP_IPV4_MROUTE = 0x6 ++ RTNLGRP_IPV4_ROUTE = 0x7 ++ RTNLGRP_IPV4_RULE = 0x8 ++ RTNLGRP_IPV6_IFADDR = 0x9 ++ RTNLGRP_IPV6_MROUTE = 0xa ++ RTNLGRP_IPV6_ROUTE = 0xb ++ RTNLGRP_IPV6_IFINFO = 0xc ++ RTNLGRP_IPV6_PREFIX = 0x12 ++ RTNLGRP_IPV6_RULE = 0x13 ++ RTNLGRP_ND_USEROPT = 0x14 ++ SizeofNlMsghdr = 0x10 ++ SizeofNlMsgerr = 0x14 ++ SizeofRtGenmsg = 0x1 ++ SizeofNlAttr = 0x4 ++ SizeofRtAttr = 0x4 ++ SizeofIfInfomsg = 0x10 ++ SizeofIfAddrmsg = 0x8 ++ SizeofRtMsg = 0xc ++ SizeofRtNexthop = 0x8 ++) ++ ++type NlMsghdr struct { ++ Len uint32 ++ Type uint16 ++ Flags uint16 ++ Seq uint32 ++ Pid uint32 ++} ++ ++type NlMsgerr struct { ++ Error int32 ++ Msg NlMsghdr ++} ++ ++type RtGenmsg struct { ++ Family uint8 ++} ++ ++type NlAttr struct { ++ Len uint16 ++ Type uint16 ++} ++ ++type RtAttr struct { ++ Len uint16 ++ Type uint16 ++} ++ ++type IfInfomsg struct { ++ Family uint8 ++ X__ifi_pad uint8 ++ Type uint16 ++ Index int32 ++ Flags uint32 ++ Change uint32 ++} ++ ++type IfAddrmsg struct { ++ Family uint8 ++ Prefixlen uint8 ++ Flags uint8 ++ Scope uint8 ++ Index uint32 ++} ++ ++type RtMsg struct { ++ Family uint8 ++ Dst_len uint8 ++ Src_len uint8 ++ Tos uint8 ++ Table uint8 ++ Protocol uint8 ++ Scope uint8 ++ Type uint8 ++ Flags uint32 ++} ++ ++type RtNexthop struct { ++ Len uint16 ++ Flags uint8 ++ Hops uint8 ++ Ifindex int32 ++} ++ ++const ( ++ SizeofSockFilter = 0x8 ++ SizeofSockFprog = 0x10 ++) ++ ++type SockFilter struct { ++ Code uint16 ++ Jt uint8 ++ Jf uint8 ++ K uint32 ++} ++ ++type SockFprog struct { ++ Len uint16 ++ Filter *SockFilter ++} ++ ++type InotifyEvent struct { ++ Wd int32 ++ Mask uint32 ++ Cookie uint32 ++ Len uint32 ++} ++ ++const SizeofInotifyEvent = 0x10 ++ ++type PtraceRegs struct { ++ Regs [32]uint64 ++ Era uint64 ++ Badv uint64 ++ Reserved [11]uint64 ++} ++ ++type ptracePsw struct { ++} ++ ++type ptraceFpregs struct { ++} ++ ++type ptracePer struct { ++} ++ ++type FdSet struct { ++ Bits [16]int64 ++} ++ ++type Sysinfo_t struct { ++ Uptime int64 ++ Loads [3]uint64 ++ Totalram uint64 ++ Freeram uint64 ++ Sharedram uint64 ++ Bufferram uint64 ++ Totalswap uint64 ++ Freeswap uint64 ++ Procs uint16 ++ Pad uint16 ++ Totalhigh uint64 ++ Freehigh uint64 ++ Unit uint32 ++ X_f [0]int8 ++ Pad_cgo_0 [4]byte ++} ++ ++type Utsname struct { ++ Sysname [65]int8 ++ Nodename [65]int8 ++ Release [65]int8 ++ Version [65]int8 ++ Machine [65]int8 ++ Domainname [65]int8 ++} ++ ++type Ustat_t struct { ++ Tfree int32 ++ Tinode uint64 ++ Fname [6]int8 ++ Fpack [6]int8 ++ Pad_cgo_0 [4]byte ++} ++ ++type EpollEvent struct { ++ Events uint32 ++ _ int32 ++ Fd int32 ++ Pad int32 ++} ++ ++const ( ++ _AT_FDCWD = -0x64 ++ _AT_REMOVEDIR = 0x200 ++ _AT_SYMLINK_NOFOLLOW = 0x100 ++ _AT_EACCESS = 0x200 ++) ++ ++type pollFd struct { ++ Fd int32 ++ Events int16 ++ Revents int16 ++} ++ ++type Termios struct { ++ Iflag uint32 ++ Oflag uint32 ++ Cflag uint32 ++ Lflag uint32 ++ Line uint8 ++ Cc [32]uint8 ++ Ispeed uint32 ++ Ospeed uint32 ++} ++ ++const ( ++ IUCLC = 0x200 ++ OLCUC = 0x2 ++ TCGETS = 0x5401 ++ TCSETS = 0x5402 ++ XCASE = 0x4 ++) +-- +2.27.0 + diff --git a/0037-internal-syscall-unix-loong64-use-generic-syscall.patch b/0037-internal-syscall-unix-loong64-use-generic-syscall.patch new file mode 100644 index 0000000..ad72adf --- /dev/null +++ b/0037-internal-syscall-unix-loong64-use-generic-syscall.patch @@ -0,0 +1,45 @@ +From 99acdd2aa2b9b31571ca426a183684dcda7e7465 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Mon, 15 Nov 2021 20:59:24 +0800 +Subject: [PATCH 37/56] internal/syscall/unix: loong64 use generic syscall + +Change-Id: I5988bf3efed37b03b9193f1089dfece060ccba99 +--- + src/internal/syscall/unix/at_sysnum_fstatat_linux.go | 4 ++-- + src/internal/syscall/unix/sysnum_linux_generic.go | 4 ++-- + 2 files changed, 4 insertions(+), 4 deletions(-) + +diff --git a/src/internal/syscall/unix/at_sysnum_fstatat_linux.go b/src/internal/syscall/unix/at_sysnum_fstatat_linux.go +index e53a2d1..5f7ea12 100644 +--- a/src/internal/syscall/unix/at_sysnum_fstatat_linux.go ++++ b/src/internal/syscall/unix/at_sysnum_fstatat_linux.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build arm64 || riscv64 +-// +build arm64 riscv64 ++//go:build arm64 || riscv64 || loong64 ++// +build arm64 riscv64 loong64 + + package unix + +diff --git a/src/internal/syscall/unix/sysnum_linux_generic.go b/src/internal/syscall/unix/sysnum_linux_generic.go +index a760254..3ec0712 100644 +--- a/src/internal/syscall/unix/sysnum_linux_generic.go ++++ b/src/internal/syscall/unix/sysnum_linux_generic.go +@@ -2,9 +2,9 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build linux && (arm64 || riscv64) ++//go:build linux && (arm64 || riscv64 || loong64) + // +build linux +-// +build arm64 riscv64 ++// +build arm64 riscv64 loong64 + + package unix + +-- +2.27.0 + diff --git a/0038-misc-test-fix-test-error-for-loong64.patch b/0038-misc-test-fix-test-error-for-loong64.patch new file mode 100644 index 0000000..3544a30 --- /dev/null +++ b/0038-misc-test-fix-test-error-for-loong64.patch @@ -0,0 +1,93 @@ +From a48598dbd74341cf14761bf90e916b468e6c440d Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 17:13:29 +0800 +Subject: [PATCH 38/56] misc, test: fix test error for loong64 + +Change-Id: I6760b4a7e51646773cd0f48baa1baba01b213b7d +--- + .../cgo/test/testdata/issue9400/asm_loong64.s | 33 +++++++++++++++++++ + test/intrinsic_atomic.go | 2 +- + test/nosplit.go | 3 ++ + test/run.go | 1 + + 4 files changed, 38 insertions(+), 1 deletion(-) + create mode 100644 misc/cgo/test/testdata/issue9400/asm_loong64.s + +diff --git a/misc/cgo/test/testdata/issue9400/asm_loong64.s b/misc/cgo/test/testdata/issue9400/asm_loong64.s +new file mode 100644 +index 0000000..25132fa +--- /dev/null ++++ b/misc/cgo/test/testdata/issue9400/asm_loong64.s +@@ -0,0 +1,33 @@ ++// Copyright 2021 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// +build loong64 ++// +build !gccgo ++ ++#include "textflag.h" ++ ++#define DBAR WORD $0x38720000 ++ ++TEXT ·RewindAndSetgid(SB),NOSPLIT|NOFRAME,$0-0 ++ // Rewind stack pointer so anything that happens on the stack ++ // will clobber the test pattern created by the caller ++ ADDV $(1024*8), R3 ++ ++ // Ask signaller to setgid ++ MOVW $1, R12 ++ DBAR ++ MOVW R12, ·Baton(SB) ++ DBAR ++ ++ // Wait for setgid completion ++loop: ++ DBAR ++ MOVW ·Baton(SB), R12 ++ OR R13, R13, R13 // hint that we're in a spin loop ++ BNE R12, loop ++ DBAR ++ ++ // Restore stack ++ ADDV $(-1024*8), R3 ++ RET +diff --git a/test/intrinsic_atomic.go b/test/intrinsic_atomic.go +index 61911b7..a1004c8 100644 +--- a/test/intrinsic_atomic.go ++++ b/test/intrinsic_atomic.go +@@ -1,5 +1,5 @@ + // errorcheck -0 -d=ssa/intrinsics/debug +-// +build amd64 arm64 mips mipsle mips64 mips64le ppc64 ppc64le riscv64 s390x ++// +build amd64 arm64 loong64 mips mipsle mips64 mips64le ppc64 ppc64le riscv64 s390x + + // Copyright 2016 The Go Authors. All rights reserved. + // Use of this source code is governed by a BSD-style +diff --git a/test/nosplit.go b/test/nosplit.go +index 7c7e1bf..8e1ebff 100644 +--- a/test/nosplit.go ++++ b/test/nosplit.go +@@ -263,6 +263,9 @@ TestCases: + case "mips64", "mips64le": + ptrSize = 8 + fmt.Fprintf(&buf, "#define REGISTER (R0)\n") ++ case "loong64": ++ ptrSize = 8 ++ fmt.Fprintf(&buf, "#define REGISTER (R0)\n") + case "ppc64", "ppc64le": + ptrSize = 8 + fmt.Fprintf(&buf, "#define REGISTER (CTR)\n") +diff --git a/test/run.go b/test/run.go +index d7f5d02..4b94538 100644 +--- a/test/run.go ++++ b/test/run.go +@@ -1588,6 +1588,7 @@ var ( + "amd64": {}, + "arm": {"GOARM", "5", "6", "7"}, + "arm64": {}, ++ "loong64": {}, + "mips": {"GOMIPS", "hardfloat", "softfloat"}, + "mips64": {"GOMIPS64", "hardfloat", "softfloat"}, + "ppc64": {"GOPPC64", "power8", "power9"}, +-- +2.27.0 + diff --git a/0039-copyright-add-Loongson-into-AUTHORS.patch b/0039-copyright-add-Loongson-into-AUTHORS.patch new file mode 100644 index 0000000..abeb68d --- /dev/null +++ b/0039-copyright-add-Loongson-into-AUTHORS.patch @@ -0,0 +1,25 @@ +From 203a3a63029031a64fdffec9725f7737c35c7808 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 15 Aug 2021 14:57:00 +0800 +Subject: [PATCH 39/56] copyright: add Loongson into AUTHORS + +Change-Id: I23fb430f1f6e8a587f13e2f020721cbd3a45d4ed +--- + AUTHORS | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/AUTHORS b/AUTHORS +index 95d3158..c0bad0c 100644 +--- a/AUTHORS ++++ b/AUTHORS +@@ -824,6 +824,7 @@ Liberty Fund Inc + Linaro Limited + Lion Yang + Lloyd Dewolf ++Loongson Inc. + Lorenzo Masini + Lorenzo Stoakes + Luan Santos +-- +2.27.0 + diff --git a/0040-api-fix-check-errors-for-loong64.patch b/0040-api-fix-check-errors-for-loong64.patch new file mode 100644 index 0000000..a909e45 --- /dev/null +++ b/0040-api-fix-check-errors-for-loong64.patch @@ -0,0 +1,126 @@ +From d40474825614810e7a160738186a864ce783715b Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Thu, 23 Jun 2022 15:07:21 +0800 +Subject: [PATCH 40/56] api: fix check errors for loong64. + +Change-Id: I36775fadfb0a538136b119fe0350dcbb536d5ec4 +--- + api/go1.17.txt | 105 +++++++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 105 insertions(+) + +diff --git a/api/go1.17.txt b/api/go1.17.txt +index 4850538..61b38c3 100644 +--- a/api/go1.17.txt ++++ b/api/go1.17.txt +@@ -193,3 +193,108 @@ pkg time, method (Time) GoString() string + pkg time, method (Time) IsDST() bool + pkg time, method (Time) UnixMicro() int64 + pkg time, method (Time) UnixMilli() int64 ++pkg debug/elf, const EM_LOONGARCH = 258 ++pkg debug/elf, const EM_LOONGARCH Machine ++pkg debug/elf, const R_LARCH_32 = 1 ++pkg debug/elf, const R_LARCH_32 R_LARCH ++pkg debug/elf, const R_LARCH_64 = 2 ++pkg debug/elf, const R_LARCH_64 R_LARCH ++pkg debug/elf, const R_LARCH_ADD16 = 48 ++pkg debug/elf, const R_LARCH_ADD16 R_LARCH ++pkg debug/elf, const R_LARCH_ADD24 = 49 ++pkg debug/elf, const R_LARCH_ADD24 R_LARCH ++pkg debug/elf, const R_LARCH_ADD32 = 50 ++pkg debug/elf, const R_LARCH_ADD32 R_LARCH ++pkg debug/elf, const R_LARCH_ADD64 = 51 ++pkg debug/elf, const R_LARCH_ADD64 R_LARCH ++pkg debug/elf, const R_LARCH_ADD8 = 47 ++pkg debug/elf, const R_LARCH_ADD8 R_LARCH ++pkg debug/elf, const R_LARCH_COPY = 4 ++pkg debug/elf, const R_LARCH_COPY R_LARCH ++pkg debug/elf, const R_LARCH_IRELATIVE = 12 ++pkg debug/elf, const R_LARCH_IRELATIVE R_LARCH ++pkg debug/elf, const R_LARCH_JUMP_SLOT = 5 ++pkg debug/elf, const R_LARCH_JUMP_SLOT R_LARCH ++pkg debug/elf, const R_LARCH_MARK_LA = 20 ++pkg debug/elf, const R_LARCH_MARK_LA R_LARCH ++pkg debug/elf, const R_LARCH_MARK_PCREL = 21 ++pkg debug/elf, const R_LARCH_MARK_PCREL R_LARCH ++pkg debug/elf, const R_LARCH_NONE = 0 ++pkg debug/elf, const R_LARCH_NONE R_LARCH ++pkg debug/elf, const R_LARCH_RELATIVE = 3 ++pkg debug/elf, const R_LARCH_RELATIVE R_LARCH ++pkg debug/elf, const R_LARCH_SOP_ADD = 35 ++pkg debug/elf, const R_LARCH_SOP_ADD R_LARCH ++pkg debug/elf, const R_LARCH_SOP_AND = 36 ++pkg debug/elf, const R_LARCH_SOP_AND R_LARCH ++pkg debug/elf, const R_LARCH_SOP_ASSERT = 30 ++pkg debug/elf, const R_LARCH_SOP_ASSERT R_LARCH ++pkg debug/elf, const R_LARCH_SOP_IF_ELSE = 37 ++pkg debug/elf, const R_LARCH_SOP_IF_ELSE R_LARCH ++pkg debug/elf, const R_LARCH_SOP_NOT = 31 ++pkg debug/elf, const R_LARCH_SOP_NOT R_LARCH ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_0_10_10_16_S2 = 45 ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_0_10_10_16_S2 R_LARCH ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_0_5_10_16_S2 = 44 ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_0_5_10_16_S2 R_LARCH ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_12 = 40 ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_12 R_LARCH ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_16 = 41 ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_16 R_LARCH ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_16_S2 = 42 ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_16_S2 R_LARCH ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_5 = 38 ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_5 R_LARCH ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_5_20 = 43 ++pkg debug/elf, const R_LARCH_SOP_POP_32_S_5_20 R_LARCH ++pkg debug/elf, const R_LARCH_SOP_POP_32_U = 46 ++pkg debug/elf, const R_LARCH_SOP_POP_32_U R_LARCH ++pkg debug/elf, const R_LARCH_SOP_POP_32_U_10_12 = 39 ++pkg debug/elf, const R_LARCH_SOP_POP_32_U_10_12 R_LARCH ++pkg debug/elf, const R_LARCH_SOP_PUSH_ABSOLUTE = 23 ++pkg debug/elf, const R_LARCH_SOP_PUSH_ABSOLUTE R_LARCH ++pkg debug/elf, const R_LARCH_SOP_PUSH_DUP = 24 ++pkg debug/elf, const R_LARCH_SOP_PUSH_DUP R_LARCH ++pkg debug/elf, const R_LARCH_SOP_PUSH_GPREL = 25 ++pkg debug/elf, const R_LARCH_SOP_PUSH_GPREL R_LARCH ++pkg debug/elf, const R_LARCH_SOP_PUSH_PCREL = 22 ++pkg debug/elf, const R_LARCH_SOP_PUSH_PCREL R_LARCH ++pkg debug/elf, const R_LARCH_SOP_PUSH_PLT_PCREL = 29 ++pkg debug/elf, const R_LARCH_SOP_PUSH_PLT_PCREL R_LARCH ++pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_GD = 28 ++pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_GD R_LARCH ++pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_GOT = 27 ++pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_GOT R_LARCH ++pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_TPREL = 26 ++pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_TPREL R_LARCH ++pkg debug/elf, const R_LARCH_SOP_SL = 33 ++pkg debug/elf, const R_LARCH_SOP_SL R_LARCH ++pkg debug/elf, const R_LARCH_SOP_SR = 34 ++pkg debug/elf, const R_LARCH_SOP_SR R_LARCH ++pkg debug/elf, const R_LARCH_SOP_SUB = 32 ++pkg debug/elf, const R_LARCH_SOP_SUB R_LARCH ++pkg debug/elf, const R_LARCH_SUB16 = 53 ++pkg debug/elf, const R_LARCH_SUB16 R_LARCH ++pkg debug/elf, const R_LARCH_SUB24 = 54 ++pkg debug/elf, const R_LARCH_SUB24 R_LARCH ++pkg debug/elf, const R_LARCH_SUB32 = 55 ++pkg debug/elf, const R_LARCH_SUB32 R_LARCH ++pkg debug/elf, const R_LARCH_SUB64 = 56 ++pkg debug/elf, const R_LARCH_SUB64 R_LARCH ++pkg debug/elf, const R_LARCH_SUB8 = 52 ++pkg debug/elf, const R_LARCH_SUB8 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_DTPMOD32 = 6 ++pkg debug/elf, const R_LARCH_TLS_DTPMOD32 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_DTPMOD64 = 7 ++pkg debug/elf, const R_LARCH_TLS_DTPMOD64 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_DTPREL32 = 8 ++pkg debug/elf, const R_LARCH_TLS_DTPREL32 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_DTPREL64 = 9 ++pkg debug/elf, const R_LARCH_TLS_DTPREL64 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_TPREL32 = 10 ++pkg debug/elf, const R_LARCH_TLS_TPREL32 R_LARCH ++pkg debug/elf, const R_LARCH_TLS_TPREL64 = 11 ++pkg debug/elf, const R_LARCH_TLS_TPREL64 R_LARCH ++pkg debug/elf, method (R_LARCH) GoString() string ++pkg debug/elf, method (R_LARCH) String() string ++pkg debug/elf, type R_LARCH int +-- +2.27.0 + diff --git a/0041-fixup-fix-misc-cgo-test-sigaltstack-size-on-loong64.patch b/0041-fixup-fix-misc-cgo-test-sigaltstack-size-on-loong64.patch new file mode 100644 index 0000000..e229d0a --- /dev/null +++ b/0041-fixup-fix-misc-cgo-test-sigaltstack-size-on-loong64.patch @@ -0,0 +1,54 @@ +From 0353fca4e7ed2c66b0aa2e65d9dd6ae7f7197d07 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 9 Jan 2022 18:30:01 +0800 +Subject: [PATCH 41/56] fixup: fix misc/cgo/test sigaltstack size on loong64. + +Change-Id: I75ec0536f43e3d23ed366cbfaf6d5af8794a36ee +--- + misc/cgo/test/sigaltstack.go | 2 +- + misc/cgo/testcarchive/testdata/main4.c | 2 +- + src/runtime/testdata/testprogcgo/sigstack.go | 2 +- + 3 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/misc/cgo/test/sigaltstack.go b/misc/cgo/test/sigaltstack.go +index 034cc4b..3bab233 100644 +--- a/misc/cgo/test/sigaltstack.go ++++ b/misc/cgo/test/sigaltstack.go +@@ -14,7 +14,7 @@ package cgotest + #include + #include + +-#ifdef _AIX ++#if defined(_AIX) || defined(__loongarch64) + // On AIX, SIGSTKSZ is too small to handle Go sighandler. + #define CSIGSTKSZ 0x4000 + #else +diff --git a/misc/cgo/testcarchive/testdata/main4.c b/misc/cgo/testcarchive/testdata/main4.c +index 04f7740..ffd6612 100644 +--- a/misc/cgo/testcarchive/testdata/main4.c ++++ b/misc/cgo/testcarchive/testdata/main4.c +@@ -14,7 +14,7 @@ + + #include "libgo4.h" + +-#ifdef _AIX ++#if defined(_AIX) || defined(__loongarch64) + // On AIX, CSIGSTKSZ is too small to handle Go sighandler. + #define CSIGSTKSZ 0x4000 + #else +diff --git a/src/runtime/testdata/testprogcgo/sigstack.go b/src/runtime/testdata/testprogcgo/sigstack.go +index 21b668d..ae3899f 100644 +--- a/src/runtime/testdata/testprogcgo/sigstack.go ++++ b/src/runtime/testdata/testprogcgo/sigstack.go +@@ -17,7 +17,7 @@ package main + #include + #include + +-#ifdef _AIX ++#if defined(_AIX) || defined(__loongarch64) + // On AIX, SIGSTKSZ is too small to handle Go sighandler. + #define CSIGSTKSZ 0x4000 + #else +-- +2.27.0 + diff --git a/0042-fixup-fix-mabi-to-lp64-for-loong64.patch b/0042-fixup-fix-mabi-to-lp64-for-loong64.patch new file mode 100644 index 0000000..5a3e70a --- /dev/null +++ b/0042-fixup-fix-mabi-to-lp64-for-loong64.patch @@ -0,0 +1,82 @@ +From 76a3bd4567e0dfb4b46c67e185a1bc41bff0ce97 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Sun, 9 Jan 2022 18:33:02 +0800 +Subject: [PATCH 42/56] fixup: fix mabi to lp64 for loong64. + +Change-Id: I80852638547f05470edf335d033211979c1e86af +--- + src/cmd/cgo/gcc.go | 2 +- + src/cmd/go/internal/work/exec.go | 2 +- + src/cmd/link/internal/ld/elf.go | 2 +- + src/cmd/link/internal/ld/lib.go | 2 +- + src/runtime/cgo/gcc_loong64.S | 2 +- + 5 files changed, 5 insertions(+), 5 deletions(-) + +diff --git a/src/cmd/cgo/gcc.go b/src/cmd/cgo/gcc.go +index b9250d5..7428a1d 100644 +--- a/src/cmd/cgo/gcc.go ++++ b/src/cmd/cgo/gcc.go +@@ -1594,7 +1594,7 @@ func (p *Package) gccMachine() []string { + return []string{"-mabi=32", "-msoft-float"} + } + case "loong64": +- return []string{"-mabi=lp64d"} ++ return []string{"-mabi=lp64"} + } + return nil + } +diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go +index 83d4161..6477c54 100644 +--- a/src/cmd/go/internal/work/exec.go ++++ b/src/cmd/go/internal/work/exec.go +@@ -2649,7 +2649,7 @@ func (b *Builder) gccArchArgs() []string { + return append(args, "-msoft-float") + } + case "loong64": +- return []string{"-mabi=lp64d"} ++ return []string{"-mabi=lp64"} + case "ppc64": + if cfg.Goos == "aix" { + return []string{"-maix64"} +diff --git a/src/cmd/link/internal/ld/elf.go b/src/cmd/link/internal/ld/elf.go +index 5248ad2..1c86a63 100644 +--- a/src/cmd/link/internal/ld/elf.go ++++ b/src/cmd/link/internal/ld/elf.go +@@ -228,7 +228,7 @@ func Elfinit(ctxt *Link) { + ehdr.Flags = 0x20000004 /* MIPS 3 CPIC */ + } + if ctxt.Arch.Family == sys.Loong64 { +- ehdr.Flags = 0x3 /* LoongArch lp64d */ ++ ehdr.Flags = 0x3 /* LoongArch lp64 */ + } + if ctxt.Arch.Family == sys.RISCV64 { + ehdr.Flags = 0x4 /* RISCV Float ABI Double */ +diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go +index c9f14f1..8789cd5 100644 +--- a/src/cmd/link/internal/ld/lib.go ++++ b/src/cmd/link/internal/ld/lib.go +@@ -1779,7 +1779,7 @@ func hostlinkArchArgs(arch *sys.Arch) []string { + return []string{"-arch", "arm64"} + } + case sys.Loong64: +- return []string{"-mabi=lp64d"} ++ return []string{"-mabi=lp64"} + case sys.MIPS64: + return []string{"-mabi=64"} + case sys.MIPS: +diff --git a/src/runtime/cgo/gcc_loong64.S b/src/runtime/cgo/gcc_loong64.S +index e294164..a761104 100644 +--- a/src/runtime/cgo/gcc_loong64.S ++++ b/src/runtime/cgo/gcc_loong64.S +@@ -8,7 +8,7 @@ + * void crosscall1(void (*fn)(void), void (*setg_gcc)(void *g), void *g) + * + * Calling into the gc tool chain, where all registers are caller save. +- * Called from standard lp64d ABI, where $r1, $r3, $r23-$r30, and $f24-$f31 ++ * Called from standard lp64 ABI, where $r1, $r3, $r23-$r30, and $f24-$f31 + * are callee-save, so they must be saved explicitly, along with $r1 (LR). + */ + .globl crosscall1 +-- +2.27.0 + diff --git a/0043-fixup-fix-runtime-defs_linux_loong64.patch b/0043-fixup-fix-runtime-defs_linux_loong64.patch new file mode 100644 index 0000000..9ff75ed --- /dev/null +++ b/0043-fixup-fix-runtime-defs_linux_loong64.patch @@ -0,0 +1,200 @@ +From 9bb97de9a2e40a07a26aeaf2d91e5d11ccecc6ef Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 6 Apr 2022 20:08:46 +0800 +Subject: [PATCH 43/56] fixup: fix runtime defs_linux_loong64. + +Change-Id: If16a43949836f223971d6d4c7bf1bfd8ec35ed98 +--- + src/runtime/defs_linux_loong64.go | 88 +++++++++++++++++-------------- + src/runtime/os_linux_generic.go | 4 +- + src/runtime/os_linux_loong64.go | 27 ++++++++++ + 3 files changed, 78 insertions(+), 41 deletions(-) + +diff --git a/src/runtime/defs_linux_loong64.go b/src/runtime/defs_linux_loong64.go +index 3e0fac0..7e32230 100644 +--- a/src/runtime/defs_linux_loong64.go ++++ b/src/runtime/defs_linux_loong64.go +@@ -28,8 +28,8 @@ const ( + + _SA_RESTART = 0x10000000 + _SA_ONSTACK = 0x8000000 ++ _SA_RESTORER = 0x0 // Only used on intel + _SA_SIGINFO = 0x4 +- _SA_RESTORER = 0x0 + + _SI_KERNEL = 0x80 + _SI_TIMER = -0x2 +@@ -133,6 +133,29 @@ type itimerval struct { + it_value timeval + } + ++type sigactiont struct { ++ sa_handler uintptr ++ sa_flags uint64 ++ sa_mask [2]uint64 ++ sa_restorer uintptr ++} ++ ++type siginfoFields struct { ++ si_signo int32 ++ si_errno int32 ++ si_code int32 ++ __pad0 [1]int32 ++ // below here is a union; si_addr is the only field we use ++ si_addr uint64 ++} ++ ++type siginfo struct { ++ siginfoFields ++ ++ // Pad struct to the max size in the kernel. ++ _ [_si_max_size - unsafe.Sizeof(siginfoFields{})]byte ++} ++ + type sigeventFields struct { + value uintptr + signo int32 +@@ -143,14 +166,15 @@ type sigeventFields struct { + + type sigevent struct { + sigeventFields ++ + // Pad struct to the max size in the kernel. + _ [_sigev_max_size - unsafe.Sizeof(sigeventFields{})]byte + } + + type epollevent struct { +- events uint32 +- pad_cgo_0 [4]byte +- data [8]byte // unaligned uintptr ++ events uint32 ++ _pad uint32 ++ data [8]byte // to match amd64 + } + + const ( +@@ -159,30 +183,6 @@ const ( + _O_CLOEXEC = 0x80000 + ) + +-type sigactiont struct { +- sa_handler uintptr +- sa_flags uint64 +- sa_mask uint64 +- // Linux on loong64 does not have the sa_restorer field, but the setsig +- // function references it (for x86). Not much harm to include it at the end. +- sa_restorer uintptr +-} +- +-type siginfoFields struct { +- si_signo int32 +- si_errno int32 +- si_code int32 +- __pad0 [1]int32 +- // below here is a union; si_addr is the only field we use +- si_addr uint64 +-} +- +-type siginfo struct { +- siginfoFields +- // Pad struct to the max size in the kernel. +- _ [_si_max_size - unsafe.Sizeof(siginfoFields{})]byte +-} +- + type usigset struct { + val [16]uint64 + } +@@ -194,19 +194,29 @@ type stackt struct { + ss_size uintptr + } + ++type user_fpregs struct { ++ fpr [32]byte ++} ++ + type sigcontext struct { +- sc_pc uint64 +- sc_regs [32]uint64 +- sc_flags uint32 +- sc_extcontext [0]uint64 ++ sc_pc uint64 ++ sc_regs [32]uint64 ++ sc_flags uint32 ++ sc_fcsr uint32 ++ sc_vcsr uint32 ++ _pad0 [4]byte ++ sc_fcc uint64 ++ sc_scr [4]uint64 ++ sc_fpregs [32]user_fpregs ++ sc_reserved [4096]byte + } + + type ucontext struct { +- uc_flags uint64 +- uc_link *ucontext +- uc_stack stackt +- uc_sigmask usigset +- uc_x_unused [0]uint8 +- uc_pad_cgo_0 [8]byte +- uc_mcontext sigcontext ++ uc_flags uint64 ++ uc_link *ucontext ++ uc_stack stackt ++ _pad0 [24]byte ++ uc_mcontext sigcontext ++ uc_sigmask uint64 ++ _pad1 [120]byte + } +diff --git a/src/runtime/os_linux_generic.go b/src/runtime/os_linux_generic.go +index fe1973d..5b1b9a0 100644 +--- a/src/runtime/os_linux_generic.go ++++ b/src/runtime/os_linux_generic.go +@@ -2,8 +2,8 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build !mips && !mipsle && !mips64 && !mips64le && !s390x && !ppc64 && linux +-// +build !mips,!mipsle,!mips64,!mips64le,!s390x,!ppc64,linux ++//go:build !loong64 && !mips && !mipsle && !mips64 && !mips64le && !s390x && !ppc64 && linux ++// +build !loong64,!mips,!mipsle,!mips64,!mips64le,!s390x,!ppc64,linux + + package runtime + +diff --git a/src/runtime/os_linux_loong64.go b/src/runtime/os_linux_loong64.go +index e9a8728..4cabd20 100644 +--- a/src/runtime/os_linux_loong64.go ++++ b/src/runtime/os_linux_loong64.go +@@ -17,3 +17,30 @@ func cputicks() int64 { + // nanotime() is a poor approximation of CPU ticks that is enough for the profiler. + return nanotime() + } ++ ++const ( ++ _SS_DISABLE = 2 ++ _NSIG = 65 ++ _SI_USER = 0 ++ _SIG_BLOCK = 0 ++ _SIG_UNBLOCK = 1 ++ _SIG_SETMASK = 2 ++) ++ ++type sigset [2]uint64 ++ ++var sigset_all = sigset{^uint64(0), ^uint64(0)} ++ ++//go:nosplit ++//go:nowritebarrierrec ++func sigaddset(mask *sigset, i int) { ++ (*mask)[(i-1)/64] |= 1 << ((uint32(i) - 1) & 63) ++} ++ ++func sigdelset(mask *sigset, i int) { ++ (*mask)[(i-1)/64] &^= 1 << ((uint32(i) - 1) & 63) ++} ++ ++func sigfillset(mask *[2]uint64) { ++ (*mask)[0], (*mask)[1] = ^uint64(0), ^uint64(0) ++} +-- +2.27.0 + diff --git a/0044-fixup-fix-test-issue11656-for-loong64.patch b/0044-fixup-fix-test-issue11656-for-loong64.patch new file mode 100644 index 0000000..5c6b8e6 --- /dev/null +++ b/0044-fixup-fix-test-issue11656-for-loong64.patch @@ -0,0 +1,26 @@ +From 432e1fe985e9114e00cbf16839f49a7030dd05f3 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Thu, 7 Apr 2022 09:07:59 +0800 +Subject: [PATCH 44/56] fixup: fix test issue11656 for loong64. + +Change-Id: I60575b3e114a484d05f1f3802b342b1738d3abaa +--- + test/fixedbugs/issue11656.go | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/test/fixedbugs/issue11656.go b/test/fixedbugs/issue11656.go +index 85fe720..9032e7d 100644 +--- a/test/fixedbugs/issue11656.go ++++ b/test/fixedbugs/issue11656.go +@@ -78,6 +78,8 @@ func f(n int) { + ill = append(ill, 0xe3, 0x00, 0x00, 0x00, 0x00, 0x24) // MOVD R0, (R0) + case "riscv64": + binary.LittleEndian.PutUint32(ill, 0x00003023) // MOV X0, (X0) ++ case "loong64": ++ binary.LittleEndian.PutUint32(ill, 0x29c00000) // MOVV R0, (R0) + default: + // Just leave it as 0 and hope for the best. + } +-- +2.27.0 + diff --git a/0045-runtime-fixed-func-breakpoint-implementation-on-loon.patch b/0045-runtime-fixed-func-breakpoint-implementation-on-loon.patch new file mode 100644 index 0000000..4bfc0a2 --- /dev/null +++ b/0045-runtime-fixed-func-breakpoint-implementation-on-loon.patch @@ -0,0 +1,30 @@ +From 26951d233dd2efe845dadc2b4affeada3cbb31b9 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Thu, 24 Mar 2022 21:11:16 +0800 +Subject: [PATCH 45/56] runtime: fixed func breakpoint implementation on + loong64. + +use the break instruction to implement the function of the func breakpoint. + +Signed-off-by: Guoqi Chen +Change-Id: Iad0274d110f39f0ca2b330df103fd73f8ebbb358 +--- + src/runtime/asm_loong64.s | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s +index cfc270f..9541f6e 100644 +--- a/src/runtime/asm_loong64.s ++++ b/src/runtime/asm_loong64.s +@@ -79,7 +79,7 @@ DATA runtime·mainPC+0(SB)/8,$runtime·main(SB) + GLOBL runtime·mainPC(SB),RODATA,$8 + + TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0 +- MOVV R0, 2(R0) // TODO: TD ++ BREAK + RET + + TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0 +-- +2.27.0 + diff --git a/0046-update-vendor-golang.org-x-sys-for-byteorder-fix.patch b/0046-update-vendor-golang.org-x-sys-for-byteorder-fix.patch new file mode 100644 index 0000000..c3c9dfb --- /dev/null +++ b/0046-update-vendor-golang.org-x-sys-for-byteorder-fix.patch @@ -0,0 +1,26 @@ +From dc5ca1aeb90d64fb47f74a1b71fe27b76d1acd11 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Tue, 26 Apr 2022 17:43:15 +0800 +Subject: [PATCH 46/56] update vendor golang.org/x/sys for byteorder fix. + +Signed-off-by: Guoqi Chen +Change-Id: I3a192b1af2e254d7c0e4edce64ccf5460e1f1168 +--- + src/vendor/golang.org/x/sys/cpu/byteorder.go | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/src/vendor/golang.org/x/sys/cpu/byteorder.go b/src/vendor/golang.org/x/sys/cpu/byteorder.go +index dcbb14e..271055b 100644 +--- a/src/vendor/golang.org/x/sys/cpu/byteorder.go ++++ b/src/vendor/golang.org/x/sys/cpu/byteorder.go +@@ -46,6 +46,7 @@ func hostByteOrder() byteOrder { + case "386", "amd64", "amd64p32", + "alpha", + "arm", "arm64", ++ "loong64", + "mipsle", "mips64le", "mips64p32le", + "nios2", + "ppc64le", +-- +2.27.0 + diff --git a/0047-cmd-compile-remove-atomic-Cas-Xchg-and-Xadd-intrinsi.patch b/0047-cmd-compile-remove-atomic-Cas-Xchg-and-Xadd-intrinsi.patch new file mode 100644 index 0000000..06584ab --- /dev/null +++ b/0047-cmd-compile-remove-atomic-Cas-Xchg-and-Xadd-intrinsi.patch @@ -0,0 +1,68 @@ +From ea6516afec835062415a02001f9f9c5a1ad6bd59 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Wed, 11 May 2022 07:49:35 +0800 +Subject: [PATCH 47/56] cmd/compile: remove atomic Cas Xchg and Xadd intrinsics + on loong64 + +Change-Id: Id182b0e39845d55668a92b252a36cae6b83bb018 +--- + src/cmd/compile/internal/ssagen/ssa.go | 6 +++--- + test/inline_sync.go | 2 +- + test/intrinsic_atomic.go | 2 +- + 3 files changed, 5 insertions(+), 5 deletions(-) + +diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go +index 1b0452d..b6acc7b 100644 +--- a/src/cmd/compile/internal/ssagen/ssa.go ++++ b/src/cmd/compile/internal/ssagen/ssa.go +@@ -3945,7 +3945,7 @@ func InitTables() { + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) + }, +- sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Xchg64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem()) +@@ -4010,7 +4010,7 @@ func InitTables() { + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) + }, +- sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Xadd64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem()) +@@ -4032,7 +4032,7 @@ func InitTables() { + s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) + return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v) + }, +- sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) ++ sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) + addF("runtime/internal/atomic", "Cas64", + func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) +diff --git a/test/inline_sync.go b/test/inline_sync.go +index 30b436a..de39343 100644 +--- a/test/inline_sync.go ++++ b/test/inline_sync.go +@@ -1,4 +1,4 @@ +-// +build !nacl,!386,!wasm,!arm,!gcflags_noopt ++// +build !nacl,!386,!wasm,!arm,!gcflags_noopt,!loong64 + // errorcheck -0 -m + + // Copyright 2019 The Go Authors. All rights reserved. +diff --git a/test/intrinsic_atomic.go b/test/intrinsic_atomic.go +index a1004c8..61911b7 100644 +--- a/test/intrinsic_atomic.go ++++ b/test/intrinsic_atomic.go +@@ -1,5 +1,5 @@ + // errorcheck -0 -d=ssa/intrinsics/debug +-// +build amd64 arm64 loong64 mips mipsle mips64 mips64le ppc64 ppc64le riscv64 s390x ++// +build amd64 arm64 mips mipsle mips64 mips64le ppc64 ppc64le riscv64 s390x + + // Copyright 2016 The Go Authors. All rights reserved. + // Use of this source code is governed by a BSD-style +-- +2.27.0 + diff --git a/0048-runtime-fix-asyncPreempt-implementation-for-errors-o.patch b/0048-runtime-fix-asyncPreempt-implementation-for-errors-o.patch new file mode 100644 index 0000000..d9c20eb --- /dev/null +++ b/0048-runtime-fix-asyncPreempt-implementation-for-errors-o.patch @@ -0,0 +1,101 @@ +From c6008eec79cab6ffb243f16bd68e83ebee938098 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Thu, 19 May 2022 10:26:36 +0800 +Subject: [PATCH 48/56] runtime: fix asyncPreempt implementation for errors on + loong64 + +Change-Id: If93ff18167adec2783503cf428e07da121b1c918 +--- + src/runtime/mkpreempt.go | 28 ++++++++++------------------ + src/runtime/preempt.go | 2 +- + 2 files changed, 11 insertions(+), 19 deletions(-) + +diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go +index 1bad20d..7f74d54 100644 +--- a/src/runtime/mkpreempt.go ++++ b/src/runtime/mkpreempt.go +@@ -81,7 +81,7 @@ var arches = map[string]func(){ + "amd64": genAMD64, + "arm": genARM, + "arm64": genARM64, +- "loong64": genLOONG64, ++ "loong64": genLoong64, + "mips64x": func() { genMIPS(true) }, + "mipsx": func() { genMIPS(false) }, + "ppc64x": genPPC64, +@@ -450,7 +450,7 @@ func genMIPS(_64bit bool) { + p("JMP (R23)") + } + +-func genLOONG64() { ++func genLoong64() { + mov := "MOVV" + movf := "MOVD" + add := "ADDV" +@@ -459,42 +459,34 @@ func genLOONG64() { + regsize := 8 + + // Add integer registers r4-r21 r23-r29 r31 +- // R0 (zero), R30 (REGTMP), R2(tp),R3 (SP), R22 (g), R1 (LR) are special, ++ // R0 (zero), R30 (REGTMP), R2 (tp), R3 (SP), R22 (g), R1 (LR) are special, + var l = layout{sp: "R3", stack: regsize} // add slot to save PC of interrupted instruction (in LR) + for i := 4; i <= 29; i++ { + if i == 22 { +- continue //R3 is REGSP R22 is g ++ continue // R3 is REGSP R22 is g + } + reg := fmt.Sprintf("R%d", i) + l.add(mov, reg, regsize) + } + l.add(mov, r31, regsize) + +- // Add floating point control/status register FCR31 (FCR0-FCR30 are irrelevant) +- var lfp = layout{sp: "R3", stack: l.stack} +- // lfp.addSpecial( +- // mov+" FCR31, R1\n"+mov+" R1, %d(R29)", +- // mov+" %d(R29), R1\n"+mov+" R1, FCR31", +- // regsize) + // Add floating point registers F0-F31. + for i := 0; i <= 31; i++ { + reg := fmt.Sprintf("F%d", i) +- lfp.add(movf, reg, regsize) ++ l.add(movf, reg, regsize) + } + + // allocate frame, save PC of interrupted instruction (in LR) +- p(mov+" R1, -%d(R3)", lfp.stack) +- p(sub+" $%d, R3", lfp.stack) ++ p(mov+" R1, -%d(R3)", l.stack) ++ p(sub+" $%d, R3", l.stack) + + l.save() +- lfp.save() + p("CALL ·asyncPreempt2(SB)") +- lfp.restore() + l.restore() + +- p(mov+" %d(R3), R1", lfp.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it +- p(mov + " (R3), R30") // load PC to REGTMP +- p(add+" $%d, R3", lfp.stack+regsize) // pop frame (including the space pushed by sigctxt.pushCall) ++ p(mov+" %d(R3), R1", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it ++ p(mov + " (R3), R30") // load PC to REGTMP ++ p(add+" $%d, R3", l.stack+regsize) // pop frame (including the space pushed by sigctxt.pushCall) + p("JMP (R30)") + } + +diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go +index 92a05d2..1d5aae1 100644 +--- a/src/runtime/preempt.go ++++ b/src/runtime/preempt.go +@@ -386,7 +386,7 @@ func isAsyncSafePoint(gp *g, pc, sp, lr uintptr) (bool, uintptr) { + // Not Go code. + return false, 0 + } +- if (GOARCH == "loong64" || GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "mips64" || GOARCH == "mips64le") && lr == pc+8 && funcspdelta(f, pc, nil) == 0 { ++ if (GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "mips64" || GOARCH == "mips64le") && lr == pc+8 && funcspdelta(f, pc, nil) == 0 { + // We probably stopped at a half-executed CALL instruction, + // where the LR is updated but the PC has not. If we preempt + // here we'll see a seemingly self-recursive call, which is in +-- +2.27.0 + diff --git a/0049-cmd-internal-obj-add-FuncInfo-SPWRITE-flag-for-linux.patch b/0049-cmd-internal-obj-add-FuncInfo-SPWRITE-flag-for-linux.patch new file mode 100644 index 0000000..a249242 --- /dev/null +++ b/0049-cmd-internal-obj-add-FuncInfo-SPWRITE-flag-for-linux.patch @@ -0,0 +1,50 @@ +From 9f6af9533a7a6e44c666ed3f1ad6949d4b6d7d94 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Mon, 23 May 2022 08:32:35 +0800 +Subject: [PATCH 49/56] cmd/internal/obj: add FuncInfo SPWRITE flag for + linux/loong64 + +Co-authored-by: limeidan +Change-Id: Iac4f706fc9ff1047f1fa02f7178ebfbe38ad1ec3 +--- + src/cmd/internal/obj/loong64/obj.go | 17 ++++++++++++++++- + 1 file changed, 16 insertions(+), 1 deletion(-) + +diff --git a/src/cmd/internal/obj/loong64/obj.go b/src/cmd/internal/obj/loong64/obj.go +index 36036e5..abfe67d 100644 +--- a/src/cmd/internal/obj/loong64/obj.go ++++ b/src/cmd/internal/obj/loong64/obj.go +@@ -8,7 +8,7 @@ import ( + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/sys" +- ++ "log" + "math" + ) + +@@ -460,6 +460,21 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { + p.From.Reg = REGSP + } + } ++ ++ if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.Spadj == 0 { ++ f := c.cursym.Func() ++ if f.FuncFlag&objabi.FuncFlag_SPWRITE == 0 { ++ c.cursym.Func().FuncFlag |= objabi.FuncFlag_SPWRITE ++ if ctxt.Debugvlog || !ctxt.IsAsm { ++ ctxt.Logf("auto-SPWRITE: %s %v\n", c.cursym.Name, p) ++ if !ctxt.IsAsm { ++ ctxt.Diag("invalid auto-SPWRITE in non-assembly") ++ ctxt.DiagFlush() ++ log.Fatalf("bad SPWRITE") ++ } ++ } ++ } ++ } + } + } + +-- +2.27.0 + diff --git a/0050-runtime-add-missing-TOPFRAME-NOFRAME-flag-for-linux-.patch b/0050-runtime-add-missing-TOPFRAME-NOFRAME-flag-for-linux-.patch new file mode 100644 index 0000000..2614e0b --- /dev/null +++ b/0050-runtime-add-missing-TOPFRAME-NOFRAME-flag-for-linux-.patch @@ -0,0 +1,48 @@ +From 978c7638900b764d5a91088c305a97951cce1744 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Mon, 23 May 2022 08:58:51 +0800 +Subject: [PATCH 50/56] runtime: add missing {TOPFRAME,NOFRAME} flag for + linux/loong64 + +Co-authored-by: limeidan +Change-Id: I0011a10f831e6c2b0da96265682212b0747f0e2a +--- + src/runtime/asm_loong64.s | 12 ++++++++---- + 1 file changed, 8 insertions(+), 4 deletions(-) + +diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s +index 9541f6e..85df3ed 100644 +--- a/src/runtime/asm_loong64.s ++++ b/src/runtime/asm_loong64.s +@@ -11,7 +11,7 @@ + + #define REGCTXT R29 + +-TEXT runtime·rt0_go(SB),NOSPLIT,$0 ++TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0 + // R3 = stack; R4 = argc; R5 = argv + + ADDV $-24, R3 +@@ -127,12 +127,16 @@ TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8 + + // void gogo(Gobuf*) + // restore state from Gobuf; longjmp +-TEXT runtime·gogo(SB), NOSPLIT, $16-8 ++TEXT runtime·gogo(SB), NOSPLIT|NOFRAME, $0-8 + MOVV buf+0(FP), R4 +- MOVV gobuf_g(R4), g // make sure g is not nil ++ MOVV gobuf_g(R4), R5 ++ MOVV 0(R5), R0 // make sure g != nil ++ JMP gogo<>(SB) ++ ++TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0 ++ MOVV R5, g + JAL runtime·save_g(SB) + +- MOVV 0(g), R5 + MOVV gobuf_sp(R4), R3 + MOVV gobuf_lr(R4), R1 + MOVV gobuf_ret(R4), R19 +-- +2.27.0 + diff --git a/0051-cmd-compile-fix-loong64-constant-folding-in-division.patch b/0051-cmd-compile-fix-loong64-constant-folding-in-division.patch new file mode 100644 index 0000000..4c4793e --- /dev/null +++ b/0051-cmd-compile-fix-loong64-constant-folding-in-division.patch @@ -0,0 +1,148 @@ +From 367d5ec4dbf9c3142f9499181a01bdfbc8416b2d Mon Sep 17 00:00:00 2001 +From: Cuong Manh Le +Date: Sat, 21 May 2022 19:00:18 +0700 +Subject: [PATCH 51/56] cmd/compile: fix loong64 constant folding in division + rules + +The divisor must be non-zero for the rule to be triggered. + +Fixes #53018 + +Change-Id: Id56b8d986945bbb66e13131d11264ee438de5cb2 +--- + .../compile/internal/ssa/gen/LOONG64.rules | 8 ++--- + .../compile/internal/ssa/rewriteLOONG64.go | 16 ++++++++++ + test/fixedbugs/issue53018.go | 30 +++++++++++++++++++ + 3 files changed, 50 insertions(+), 4 deletions(-) + create mode 100644 test/fixedbugs/issue53018.go + +diff --git a/src/cmd/compile/internal/ssa/gen/LOONG64.rules b/src/cmd/compile/internal/ssa/gen/LOONG64.rules +index 3fd4552..4237aea 100644 +--- a/src/cmd/compile/internal/ssa/gen/LOONG64.rules ++++ b/src/cmd/compile/internal/ssa/gen/LOONG64.rules +@@ -617,10 +617,10 @@ + (SRLVconst [c] (MOVVconst [d])) => (MOVVconst [int64(uint64(d)>>uint64(c))]) + (SRAVconst [c] (MOVVconst [d])) => (MOVVconst [d>>uint64(c)]) + (Select1 (MULVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c*d]) +-(Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c/d]) +-(Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [int64(uint64(c)/uint64(d))]) +-(Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c%d]) // mod +-(Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [int64(uint64(c)%uint64(d))]) // mod ++(Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [c/d]) ++(Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [int64(uint64(c)/uint64(d))]) ++(Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [c%d]) // mod ++(Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [int64(uint64(c)%uint64(d))]) // mod + (ANDconst [c] (MOVVconst [d])) => (MOVVconst [c&d]) + (ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x) + (ORconst [c] (MOVVconst [d])) => (MOVVconst [c|d]) +diff --git a/src/cmd/compile/internal/ssa/rewriteLOONG64.go b/src/cmd/compile/internal/ssa/rewriteLOONG64.go +index 463a045..e0f0df0 100644 +--- a/src/cmd/compile/internal/ssa/rewriteLOONG64.go ++++ b/src/cmd/compile/internal/ssa/rewriteLOONG64.go +@@ -6825,6 +6825,7 @@ func rewriteValueLOONG64_OpSelect0(v *Value) bool { + return true + } + // match: (Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) ++ // cond: d != 0 + // result: (MOVVconst [c%d]) + for { + if v_0.Op != OpLOONG64DIVV { +@@ -6841,11 +6842,15 @@ func rewriteValueLOONG64_OpSelect0(v *Value) bool { + break + } + d := auxIntToInt64(v_0_1.AuxInt) ++ if !(d != 0) { ++ break ++ } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(c % d) + return true + } + // match: (Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) ++ // cond: d != 0 + // result: (MOVVconst [int64(uint64(c)%uint64(d))]) + for { + if v_0.Op != OpLOONG64DIVVU { +@@ -6862,6 +6867,9 @@ func rewriteValueLOONG64_OpSelect0(v *Value) bool { + break + } + d := auxIntToInt64(v_0_1.AuxInt) ++ if !(d != 0) { ++ break ++ } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(uint64(c) % uint64(d))) + return true +@@ -7018,6 +7026,7 @@ func rewriteValueLOONG64_OpSelect1(v *Value) bool { + return true + } + // match: (Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) ++ // cond: d != 0 + // result: (MOVVconst [c/d]) + for { + if v_0.Op != OpLOONG64DIVV { +@@ -7034,11 +7043,15 @@ func rewriteValueLOONG64_OpSelect1(v *Value) bool { + break + } + d := auxIntToInt64(v_0_1.AuxInt) ++ if !(d != 0) { ++ break ++ } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(c / d) + return true + } + // match: (Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) ++ // cond: d != 0 + // result: (MOVVconst [int64(uint64(c)/uint64(d))]) + for { + if v_0.Op != OpLOONG64DIVVU { +@@ -7055,6 +7068,9 @@ func rewriteValueLOONG64_OpSelect1(v *Value) bool { + break + } + d := auxIntToInt64(v_0_1.AuxInt) ++ if !(d != 0) { ++ break ++ } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(uint64(c) / uint64(d))) + return true +diff --git a/test/fixedbugs/issue53018.go b/test/fixedbugs/issue53018.go +new file mode 100644 +index 0000000..439d9d5 +--- /dev/null ++++ b/test/fixedbugs/issue53018.go +@@ -0,0 +1,30 @@ ++// compile ++ ++// Copyright 2022 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package p ++ ++var V []int ++ ++func f(i int, c chan int) int { ++ arr := []int{0, 1} ++ for range c { ++ for a2 := range arr { ++ var a []int ++ V = V[:1/a2] ++ a[i] = 0 ++ } ++ return func() int { ++ arr = []int{} ++ return func() int { ++ return func() int { ++ return func() int { return 4 }() ++ }() ++ }() ++ }() ++ } ++ ++ return 0 ++} +-- +2.27.0 + diff --git a/0052-runtime-fix-the-vDSO-symbol-version-on-loong64.patch b/0052-runtime-fix-the-vDSO-symbol-version-on-loong64.patch new file mode 100644 index 0000000..484cc07 --- /dev/null +++ b/0052-runtime-fix-the-vDSO-symbol-version-on-loong64.patch @@ -0,0 +1,39 @@ +From 6e3ca7931d62fe11a21f2558ce9c39b628ace5ba Mon Sep 17 00:00:00 2001 +From: WANG Xuerui +Date: Mon, 23 May 2022 11:02:57 +0800 +Subject: [PATCH 52/56] runtime: fix the vDSO symbol version on loong64 + +The current value is appropriate for an early in-house version of +Linux/LoongArch, but for the upstream version it is very likely +"LINUX_5.10" instead, per the latest upstream submission [1]. + +[1]: https://lore.kernel.org/all/20220518095709.1313120-3-chenhuacai@loongson.cn/ + +Change-Id: Ia97e5cae82a5b306bd3eea86b9e442441da07973 +--- + src/runtime/vdso_linux_loong64.go | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/src/runtime/vdso_linux_loong64.go b/src/runtime/vdso_linux_loong64.go +index e8afdd4..00cdb17 100644 +--- a/src/runtime/vdso_linux_loong64.go ++++ b/src/runtime/vdso_linux_loong64.go +@@ -13,11 +13,11 @@ const ( + vdsoArrayMax = 1<<50 - 1 + ) + +-// see man 7 vdso : loong64 +-var vdsoLinuxVersion = vdsoVersionKey{"LINUX_2.6", 0x3ae75f6} ++// not currently described in manpages as of May 2022, but will eventually ++// appear ++// when that happens, see man 7 vdso : loongarch ++var vdsoLinuxVersion = vdsoVersionKey{"LINUX_5.10", 0xae78f70} + +-// The symbol name is not __kernel_clock_gettime as suggested by the manpage; +-// according to Linux source code it should be __vdso_clock_gettime instead. + var vdsoSymbolKeys = []vdsoSymbolKey{ + {"__vdso_clock_gettime", 0xd35ec75, 0x6e43a318, &vdsoClockgettimeSym}, + } +-- +2.27.0 + diff --git a/0053-internal-cpu-fix-cpu-cacheLineSize-for-loong64.patch b/0053-internal-cpu-fix-cpu-cacheLineSize-for-loong64.patch new file mode 100644 index 0000000..9a5d2a6 --- /dev/null +++ b/0053-internal-cpu-fix-cpu-cacheLineSize-for-loong64.patch @@ -0,0 +1,36 @@ +From 8e3af9a94ded88f57737f8e7156e6f5ca4fae6af Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Thu, 26 May 2022 19:38:02 +0800 +Subject: [PATCH 53/56] internal/cpu: fix cpu cacheLineSize for loong64 + +We choose 64 because the L1 Dcache of Loongson 3A5000 CPU is +4-way 256-line 64-byte-per-line. + +Change-Id: Ifb9a9f993dd6f75b5adb4ff6e4d93e945b1b2a98 +Reviewed-on: https://go-review.googlesource.com/c/go/+/408854 +Run-TryBot: Ian Lance Taylor +TryBot-Result: Gopher Robot +Auto-Submit: Ian Lance Taylor +Reviewed-by: Alex Rakoczy +Reviewed-by: Ian Lance Taylor +--- + src/internal/cpu/cpu_loong64.go | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/src/internal/cpu/cpu_loong64.go b/src/internal/cpu/cpu_loong64.go +index d0ff934..7e7701d 100644 +--- a/src/internal/cpu/cpu_loong64.go ++++ b/src/internal/cpu/cpu_loong64.go +@@ -7,6 +7,8 @@ + + package cpu + +-const CacheLinePadSize = 32 ++// CacheLinePadSize is used to prevent false sharing of cache lines. ++// We choose 64 because Loongson 3A5000 the L1 Dcache is 4-way 256-line 64-byte-per-line. ++const CacheLinePadSize = 64 + + func doinit() {} +-- +2.27.0 + diff --git a/0054-syscall-runtime-internal-syscall-always-zero-the-hig.patch b/0054-syscall-runtime-internal-syscall-always-zero-the-hig.patch new file mode 100644 index 0000000..cb40f41 --- /dev/null +++ b/0054-syscall-runtime-internal-syscall-always-zero-the-hig.patch @@ -0,0 +1,68 @@ +From 533efc27ca0aa1330a55a316cf8e797896821cb2 Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Fri, 10 Jun 2022 19:08:14 +0800 +Subject: [PATCH 54/56] syscall, runtime/internal/syscall: always zero the + higher bits of return value on linux/loong64 + +All loong64 syscalls return values only via R4/A0, and R5/A1 may contain unrelated +content. Always zero the second return value. + +Change-Id: I62af59369bece5bd8028b937c74f4694150f7a55 +Reviewed-on: https://go-review.googlesource.com/c/go/+/411615 +Run-TryBot: Ian Lance Taylor +TryBot-Result: Gopher Robot +Auto-Submit: Ian Lance Taylor +Reviewed-by: Ian Lance Taylor +Reviewed-by: Austin Clements +--- + src/cmd/vendor/golang.org/x/sys/unix/asm_linux_loong64.s | 4 ++-- + src/runtime/internal/syscall/asm_linux_loong64.s | 2 +- + src/syscall/asm_linux_loong64.s | 2 +- + 3 files changed, 4 insertions(+), 4 deletions(-) + +diff --git a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_loong64.s +index 1ccfa5d..dccf8ac 100644 +--- a/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_loong64.s ++++ b/src/cmd/vendor/golang.org/x/sys/unix/asm_linux_loong64.s +@@ -30,7 +30,7 @@ TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 + MOVV trap+0(FP), R11 // syscall entry + SYSCALL + MOVV R4, r1+32(FP) +- MOVV R5, r2+40(FP) ++ MOVV R0, r2+40(FP) // r2 is not used. Always set to 0 + JAL runtime·exitsyscall(SB) + RET + +@@ -50,5 +50,5 @@ TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 + MOVV trap+0(FP), R11 // syscall entry + SYSCALL + MOVV R4, r1+32(FP) +- MOVV R5, r2+40(FP) ++ MOVV R0, r2+40(FP) // r2 is not used. Always set to 0 + RET +diff --git a/src/runtime/internal/syscall/asm_linux_loong64.s b/src/runtime/internal/syscall/asm_linux_loong64.s +index 39bf5b1..ccddeee 100644 +--- a/src/runtime/internal/syscall/asm_linux_loong64.s ++++ b/src/runtime/internal/syscall/asm_linux_loong64.s +@@ -24,6 +24,6 @@ TEXT ·Syscall6(SB),NOSPLIT,$0-80 + RET + ok: + MOVV R4, r1+56(FP) // r1 +- MOVV R5, r2+64(FP) // r2 ++ MOVV R0, r2+64(FP) // r2 is not used. Always set to 0. + MOVV R0, err+72(FP) // errno + RET +diff --git a/src/syscall/asm_linux_loong64.s b/src/syscall/asm_linux_loong64.s +index 09f3f97..0ec9b3d 100644 +--- a/src/syscall/asm_linux_loong64.s ++++ b/src/syscall/asm_linux_loong64.s +@@ -143,5 +143,5 @@ TEXT ·rawSyscallNoError(SB),NOSPLIT,$0-48 + MOVV trap+0(FP), R11 // syscall entry + SYSCALL + MOVV R4, r1+32(FP) +- MOVV R5, r2+40(FP) ++ MOVV R0, r2+40(FP) // r2 is not used. Always set to 0. + RET +-- +2.27.0 + diff --git a/0055-runtime-clean-up-unused-function-gosave-on-loong64.patch b/0055-runtime-clean-up-unused-function-gosave-on-loong64.patch new file mode 100644 index 0000000..fc0afe8 --- /dev/null +++ b/0055-runtime-clean-up-unused-function-gosave-on-loong64.patch @@ -0,0 +1,39 @@ +From 0d5621c29995e466471c08743eed307613f5f06d Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Mon, 30 May 2022 18:44:57 +0800 +Subject: [PATCH 55/56] runtime: clean up unused function gosave on loong64 + +Change-Id: I28960a33d251a36e5e364fa6e27c5b2e13349f6b +--- + src/runtime/asm_loong64.s | 15 --------------- + 1 file changed, 15 deletions(-) + +diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s +index 85df3ed..0cb1c41 100644 +--- a/src/runtime/asm_loong64.s ++++ b/src/runtime/asm_loong64.s +@@ -110,21 +110,6 @@ TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16 + * go-routine + */ + +-// void gosave(Gobuf*) +-// save state in Gobuf; setjmp +-TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8 +- MOVV buf+0(FP), R19 +- MOVV R3, gobuf_sp(R19) +- MOVV R1, gobuf_pc(R19) +- MOVV g, gobuf_g(R19) +- MOVV R0, gobuf_lr(R19) +- MOVV R0, gobuf_ret(R19) +- // Assert ctxt is zero. See func save. +- MOVV gobuf_ctxt(R19), R19 +- BEQ R19, 2(PC) +- JAL runtime·badctxt(SB) +- RET +- + // void gogo(Gobuf*) + // restore state from Gobuf; longjmp + TEXT runtime·gogo(SB), NOSPLIT|NOFRAME, $0-8 +-- +2.27.0 + diff --git a/0056-debug-pe-add-IMAGE_FILE_MACHINE_LOONGARCH-64-32.patch b/0056-debug-pe-add-IMAGE_FILE_MACHINE_LOONGARCH-64-32.patch new file mode 100644 index 0000000..b58c9b6 --- /dev/null +++ b/0056-debug-pe-add-IMAGE_FILE_MACHINE_LOONGARCH-64-32.patch @@ -0,0 +1,85 @@ +From 0c9d664e2a01e5e8c85c2c0a32fd36c7b1dc85cb Mon Sep 17 00:00:00 2001 +From: Guoqi Chen +Date: Mon, 27 Jun 2022 14:51:19 +0800 +Subject: [PATCH 56/56] debug/pe: add IMAGE_FILE_MACHINE_LOONGARCH{64,32} + +Related: https://github.com/MicrosoftDocs/win32/pull/1067 + +Change-Id: I946253f217a5c616ae4a19be44634000cba5020e +--- + api/go1.17.txt | 4 ++++ + src/debug/pe/pe.go | 46 ++++++++++++++++++++++++---------------------- + 2 files changed, 28 insertions(+), 22 deletions(-) + +diff --git a/api/go1.17.txt b/api/go1.17.txt +index 61b38c3..0e7fe3c 100644 +--- a/api/go1.17.txt ++++ b/api/go1.17.txt +@@ -298,3 +298,7 @@ pkg debug/elf, const R_LARCH_TLS_TPREL64 R_LARCH + pkg debug/elf, method (R_LARCH) GoString() string + pkg debug/elf, method (R_LARCH) String() string + pkg debug/elf, type R_LARCH int ++pkg debug/pe, const IMAGE_FILE_MACHINE_LOONGARCH32 = 25138 ++pkg debug/pe, const IMAGE_FILE_MACHINE_LOONGARCH32 ideal-int ++pkg debug/pe, const IMAGE_FILE_MACHINE_LOONGARCH64 = 25188 ++pkg debug/pe, const IMAGE_FILE_MACHINE_LOONGARCH64 ideal-int +diff --git a/src/debug/pe/pe.go b/src/debug/pe/pe.go +index db112fe..9d55c40 100644 +--- a/src/debug/pe/pe.go ++++ b/src/debug/pe/pe.go +@@ -87,28 +87,30 @@ type OptionalHeader64 struct { + } + + const ( +- IMAGE_FILE_MACHINE_UNKNOWN = 0x0 +- IMAGE_FILE_MACHINE_AM33 = 0x1d3 +- IMAGE_FILE_MACHINE_AMD64 = 0x8664 +- IMAGE_FILE_MACHINE_ARM = 0x1c0 +- IMAGE_FILE_MACHINE_ARMNT = 0x1c4 +- IMAGE_FILE_MACHINE_ARM64 = 0xaa64 +- IMAGE_FILE_MACHINE_EBC = 0xebc +- IMAGE_FILE_MACHINE_I386 = 0x14c +- IMAGE_FILE_MACHINE_IA64 = 0x200 +- IMAGE_FILE_MACHINE_M32R = 0x9041 +- IMAGE_FILE_MACHINE_MIPS16 = 0x266 +- IMAGE_FILE_MACHINE_MIPSFPU = 0x366 +- IMAGE_FILE_MACHINE_MIPSFPU16 = 0x466 +- IMAGE_FILE_MACHINE_POWERPC = 0x1f0 +- IMAGE_FILE_MACHINE_POWERPCFP = 0x1f1 +- IMAGE_FILE_MACHINE_R4000 = 0x166 +- IMAGE_FILE_MACHINE_SH3 = 0x1a2 +- IMAGE_FILE_MACHINE_SH3DSP = 0x1a3 +- IMAGE_FILE_MACHINE_SH4 = 0x1a6 +- IMAGE_FILE_MACHINE_SH5 = 0x1a8 +- IMAGE_FILE_MACHINE_THUMB = 0x1c2 +- IMAGE_FILE_MACHINE_WCEMIPSV2 = 0x169 ++ IMAGE_FILE_MACHINE_UNKNOWN = 0x0 ++ IMAGE_FILE_MACHINE_AM33 = 0x1d3 ++ IMAGE_FILE_MACHINE_AMD64 = 0x8664 ++ IMAGE_FILE_MACHINE_ARM = 0x1c0 ++ IMAGE_FILE_MACHINE_ARMNT = 0x1c4 ++ IMAGE_FILE_MACHINE_ARM64 = 0xaa64 ++ IMAGE_FILE_MACHINE_EBC = 0xebc ++ IMAGE_FILE_MACHINE_I386 = 0x14c ++ IMAGE_FILE_MACHINE_IA64 = 0x200 ++ IMAGE_FILE_MACHINE_LOONGARCH32 = 0x6232 ++ IMAGE_FILE_MACHINE_LOONGARCH64 = 0x6264 ++ IMAGE_FILE_MACHINE_M32R = 0x9041 ++ IMAGE_FILE_MACHINE_MIPS16 = 0x266 ++ IMAGE_FILE_MACHINE_MIPSFPU = 0x366 ++ IMAGE_FILE_MACHINE_MIPSFPU16 = 0x466 ++ IMAGE_FILE_MACHINE_POWERPC = 0x1f0 ++ IMAGE_FILE_MACHINE_POWERPCFP = 0x1f1 ++ IMAGE_FILE_MACHINE_R4000 = 0x166 ++ IMAGE_FILE_MACHINE_SH3 = 0x1a2 ++ IMAGE_FILE_MACHINE_SH3DSP = 0x1a3 ++ IMAGE_FILE_MACHINE_SH4 = 0x1a6 ++ IMAGE_FILE_MACHINE_SH5 = 0x1a8 ++ IMAGE_FILE_MACHINE_THUMB = 0x1c2 ++ IMAGE_FILE_MACHINE_WCEMIPSV2 = 0x169 + ) + + // IMAGE_DIRECTORY_ENTRY constants +-- +2.27.0 + diff --git a/golang.spec b/golang.spec index c62da57..4130f73 100644 --- a/golang.spec +++ b/golang.spec @@ -29,20 +29,20 @@ # Define GOROOT macros %global goroot %{_prefix}/lib/%{name} %global gopath %{_datadir}/gocode -%global golang_arches x86_64 aarch64 ppc64le s390x +%global golang_arches x86_64 aarch64 ppc64le s390x loongarch64 %global golibdir %{_libdir}/%{name} # Golang build options. # Build golang using external/internal(close to cgo disabled) linking. -%ifarch x86_64 ppc64le %{arm} aarch64 s390x +%ifarch x86_64 ppc64le %{arm} aarch64 s390x loongarch64 %global external_linker 1 %else %global external_linker 0 %endif # Build golang with cgo enabled/disabled(later equals more or less to internal linking). -%ifarch x86_64 ppc64le %{arm} aarch64 s390x +%ifarch x86_64 ppc64le %{arm} aarch64 s390x loongarch64 %global cgo_enabled 1 %else %global cgo_enabled 0 @@ -56,7 +56,7 @@ %endif # Controls what ever we fail on failed tests -%ifarch x86_64 %{arm} aarch64 ppc64le +%ifarch x86_64 %{arm} aarch64 ppc64le loongarch64 %global fail_on_tests 1 %else %global fail_on_tests 0 @@ -94,14 +94,18 @@ %ifarch s390x %global gohostarch s390x %endif +%ifarch loongarch64 +%global gohostarch loong64 +%endif %global go_api 1.17 %global go_version 1.17.10 %global pkg_release 1 +%global anolis_release .0.1 Name: golang Version: %{go_version} -Release: 1%{?dist} +Release: 1%{anolis_release}%{?dist} Summary: The Go Programming Language # source tree includes several copies of Mark.Twain-Tom.Sawyer.txt under Public Domain License: BSD and Public Domain @@ -143,6 +147,64 @@ Patch221: fix_TestScript_list_std.patch Patch1939923: skip_test_rhbz1939923.patch +# Port to loongarch64 patches +Patch0001: 0001-cmd-internal-sys-declare-loong64-arch.patch +Patch0002: 0002-cmd-internal-sys-fix-placement-of-loong64-definition.patch +Patch0003: 0003-internal-add-loong64-constant-definition.patch +Patch0004: 0004-cmd-go-internal-configure-go-tool-workflow-for-loong.patch +Patch0005: 0005-cmd-compile-register-loong64.Init-function-for-compi.patch +Patch0006: 0006-cmd-compile-internal-loong64-implement-Init-function.patch +Patch0007: 0007-cmd-compile-internal-ssa-config-lower-pass-function-.patch +Patch0008: 0008-cmd-compile-internal-ssa-increase-the-bit-width-of-B.patch +Patch0009: 0009-cmd-compile-internal-ssa-gen-define-rules-and-operat.patch +Patch0010: 0010-cmd-compile-internal-ssa-inline-memmove-with-known-s.patch +Patch0011: 0011-cmd-compile-internal-ssa-add-support-on-loong64-for-.patch +Patch0012: 0012-cmd-compile-internal-ssagen-enable-intrinsic-operati.patch +Patch0013: 0013-cmd-compile-internal-fix-test-error-on-loong64.patch +Patch0014: 0014-cmd-internal-obj-instructions-and-registers-for-loon.patch +Patch0015: 0015-cmd-asm-internal-helper-function-and-end-to-end-test.patch +Patch0016: 0016-cmd-internal-objabi-cmd-link-support-linker-for-linu.patch +Patch0017: 0017-runtime-bootstrap-for-linux-loong64-and-implement-ru.patch +Patch0018: 0018-runtime-load-save-TLS-variable-g-on-loong64.patch +Patch0019: 0019-runtime-implement-signal-for-linux-loong64.patch +Patch0020: 0020-runtime-support-vdso-for-linux-loong64.patch +Patch0021: 0021-runtime-implement-duffzero-duffcopy-for-linux-loong6.patch +Patch0022: 0022-runtime-implement-asyncPreempt-for-linux-loong64.patch +Patch0023: 0023-runtime-support-memclr-memmove-for-linux-loong64.patch +Patch0024: 0024-runtime-implement-syscalls-for-runtime-bootstrap-on-.patch +Patch0025: 0025-runtime-add-build-tag-for-common-support-on-linux-lo.patch +Patch0026: 0026-runtime-fix-runtime-test-error-for-loong64.patch +Patch0027: 0027-runtime-internal-add-atomic-support-for-loong64.patch +Patch0028: 0028-cmd-cgo-configure-cgo-tool-for-loong64.patch +Patch0029: 0029-runtime-cgo-add-cgo-function-call-support-for-loong6.patch +Patch0030: 0030-cmd-nm-cmd-objdump-cmd-pprof-disassembly-is-not-supp.patch +Patch0031: 0031-cmd-dist-support-dist-tool-for-loong64.patch +Patch0032: 0032-cmd-vendor-update-vendored-golang.org-x-sys-to-suppo.patch +Patch0033: 0033-cmd-vendor-update-vendored-golang.org-x-tools-to-sup.patch +Patch0034: 0034-internal-bytealg-support-basic-byte-operation-on-loo.patch +Patch0035: 0035-debug-go-math-os-reflect-vendor-support-standard-lib.patch +Patch0036: 0036-syscall-add-syscall-support-for-linux-loong64.patch +Patch0037: 0037-internal-syscall-unix-loong64-use-generic-syscall.patch +Patch0038: 0038-misc-test-fix-test-error-for-loong64.patch +Patch0039: 0039-copyright-add-Loongson-into-AUTHORS.patch +Patch0040: 0040-api-fix-check-errors-for-loong64.patch +Patch0041: 0041-fixup-fix-misc-cgo-test-sigaltstack-size-on-loong64.patch +Patch0042: 0042-fixup-fix-mabi-to-lp64-for-loong64.patch +Patch0043: 0043-fixup-fix-runtime-defs_linux_loong64.patch +Patch0044: 0044-fixup-fix-test-issue11656-for-loong64.patch +Patch0045: 0045-runtime-fixed-func-breakpoint-implementation-on-loon.patch +Patch0046: 0046-update-vendor-golang.org-x-sys-for-byteorder-fix.patch +Patch0047: 0047-cmd-compile-remove-atomic-Cas-Xchg-and-Xadd-intrinsi.patch +Patch0048: 0048-runtime-fix-asyncPreempt-implementation-for-errors-o.patch +Patch0049: 0049-cmd-internal-obj-add-FuncInfo-SPWRITE-flag-for-linux.patch +Patch0050: 0050-runtime-add-missing-TOPFRAME-NOFRAME-flag-for-linux-.patch +Patch0051: 0051-cmd-compile-fix-loong64-constant-folding-in-division.patch +Patch0052: 0052-runtime-fix-the-vDSO-symbol-version-on-loong64.patch +Patch0053: 0053-internal-cpu-fix-cpu-cacheLineSize-for-loong64.patch +Patch0054: 0054-syscall-runtime-internal-syscall-always-zero-the-hig.patch +Patch0055: 0055-runtime-clean-up-unused-function-gosave-on-loong64.patch +Patch0056: 0056-debug-pe-add-IMAGE_FILE_MACHINE_LOONGARCH-64-32.patch + # Having documentation separate was broken Obsoletes: %{name}-docs < 1.1-4 @@ -230,13 +292,7 @@ Requires: %{name} = %{version}-%{release} %endif %prep -%setup -q -n go-go%{go_version}-%{pkg_release}-openssl-fips - -%patch215 -p1 - -%patch221 -p1 - -%patch1939923 -p1 +%autosetup -p1 -n go-go%{go_version}-%{pkg_release}-openssl-fips cp %{SOURCE1} ./src/runtime/ @@ -511,6 +567,9 @@ cd .. %endif %changelog +* Tue Jun 28 2022 Guoqi Chen - 1.17.10-1.0.1 +- Add loongarch support (Guoqi Chen) + * Fri May 27 2022 David Benoit - 1.17.10-1 - Rebase to Go 1.17.10 - Resolves: rhbz#2091077 -- Gitee