PeiServicesTablePointerLib|MdePkg/Library/PeiServicesTablePointerLib/PeiServicesTablePointerLib.inf\r
\r
[LibraryClasses.ARM, LibraryClasses.AARCH64]\r
- NULL|ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
-\r
# Add support for GCC stack protector\r
NULL|MdePkg/Library/BaseStackCheckLib/BaseStackCheckLib.inf\r
\r
ArmPkg/Library/ArmCacheMaintenanceLib/ArmCacheMaintenanceLib.inf\r
ArmPkg/Library/ArmDisassemblerLib/ArmDisassemblerLib.inf\r
ArmPkg/Library/ArmPsciResetSystemLib/ArmPsciResetSystemLib.inf\r
- ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
ArmPkg/Library/DebugAgentSymbolsBaseLib/DebugAgentSymbolsBaseLib.inf\r
ArmPkg/Library/DebugPeCoffExtraActionLib/DebugPeCoffExtraActionLib.inf\r
ArmPkg/Library/DefaultExceptionHandlerLib/DefaultExceptionHandlerLib.inf\r
UefiLib|MdePkg/Library/UefiLib/UefiLib.inf\r
UefiRuntimeServicesTableLib|MdePkg/Library/UefiRuntimeServicesTableLib/UefiRuntimeServicesTableLib.inf\r
\r
- NULL|ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
NULL|MdePkg/Library/BaseStackCheckLib/BaseStackCheckLib.inf\r
\r
[Components.common]\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2020, Arm, Limited. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
- /*\r
- * Provide the GCC intrinsics that are required when using GCC 9 or\r
- * later with the -moutline-atomics options (which became the default\r
- * in GCC 10)\r
- */\r
- .arch armv8-a\r
-\r
- .macro reg_alias, pfx, sz\r
- r0_\sz .req \pfx\()0\r
- r1_\sz .req \pfx\()1\r
- tmp0_\sz .req \pfx\()16\r
- tmp1_\sz .req \pfx\()17\r
- .endm\r
-\r
- /*\r
- * Define register aliases of the right type for each size\r
- * (xN for 8 bytes, wN for everything smaller)\r
- */\r
- reg_alias w, 1\r
- reg_alias w, 2\r
- reg_alias w, 4\r
- reg_alias x, 8\r
-\r
- .macro fn_start, name:req\r
- .section .text.\name\r
- .globl \name\r
- .type \name, %function\r
-\name\():\r
- .endm\r
-\r
- .macro fn_end, name:req\r
- .size \name, . - \name\r
- .endm\r
-\r
- /*\r
- * Emit an atomic helper for \model with operands of size \sz, using\r
- * the operation specified by \insn (which is the LSE name), and which\r
- * can be implemented using the generic load-locked/store-conditional\r
- * (LL/SC) sequence below, using the arithmetic operation given by\r
- * \opc.\r
- */\r
- .macro emit_ld_sz, sz:req, insn:req, opc:req, model:req, s, a, l\r
- fn_start __aarch64_\insn\()\sz\()\model\r
- mov tmp0_\sz, r0_\sz\r
-0: ld\a\()xr\s r0_\sz, [x1]\r
- .ifnc \insn, swp\r
- \opc tmp1_\sz, r0_\sz, tmp0_\sz\r
- st\l\()xr\s w15, tmp1_\sz, [x1]\r
- .else\r
- st\l\()xr\s w15, tmp0_\sz, [x1]\r
- .endif\r
- cbnz w15, 0b\r
- ret\r
- fn_end __aarch64_\insn\()\sz\()\model\r
- .endm\r
-\r
- /*\r
- * Emit atomic helpers for \model for operand sizes in the\r
- * set {1, 2, 4, 8}, for the instruction pattern given by\r
- * \insn. (This is the LSE name, but this implementation uses\r
- * the generic LL/SC sequence using \opc as the arithmetic\r
- * operation on the target.)\r
- */\r
- .macro emit_ld, insn:req, opc:req, model:req, a, l\r
- emit_ld_sz 1, \insn, \opc, \model, b, \a, \l\r
- emit_ld_sz 2, \insn, \opc, \model, h, \a, \l\r
- emit_ld_sz 4, \insn, \opc, \model, , \a, \l\r
- emit_ld_sz 8, \insn, \opc, \model, , \a, \l\r
- .endm\r
-\r
- /*\r
- * Emit the compare and swap helper for \model and size \sz\r
- * using LL/SC instructions.\r
- */\r
- .macro emit_cas_sz, sz:req, model:req, uxt:req, s, a, l\r
- fn_start __aarch64_cas\sz\()\model\r
- \uxt tmp0_\sz, r0_\sz\r
-0: ld\a\()xr\s r0_\sz, [x2]\r
- cmp r0_\sz, tmp0_\sz\r
- bne 1f\r
- st\l\()xr\s w15, r1_\sz, [x2]\r
- cbnz w15, 0b\r
-1: ret\r
- fn_end __aarch64_cas\sz\()\model\r
- .endm\r
-\r
- /*\r
- * Emit compare-and-swap helpers for \model for operand sizes in the\r
- * set {1, 2, 4, 8, 16}.\r
- */\r
- .macro emit_cas, model:req, a, l\r
- emit_cas_sz 1, \model, uxtb, b, \a, \l\r
- emit_cas_sz 2, \model, uxth, h, \a, \l\r
- emit_cas_sz 4, \model, mov , , \a, \l\r
- emit_cas_sz 8, \model, mov , , \a, \l\r
-\r
- /*\r
- * We cannot use the parameterized sequence for 16 byte CAS, so we\r
- * need to define it explicitly.\r
- */\r
- fn_start __aarch64_cas16\model\r
- mov x16, x0\r
- mov x17, x1\r
-0: ld\a\()xp x0, x1, [x4]\r
- cmp x0, x16\r
- ccmp x1, x17, #0, eq\r
- bne 1f\r
- st\l\()xp w15, x16, x17, [x4]\r
- cbnz w15, 0b\r
-1: ret\r
- fn_end __aarch64_cas16\model\r
- .endm\r
-\r
- /*\r
- * Emit the set of GCC outline atomic helper functions for\r
- * the memory ordering model given by \model:\r
- * - relax unordered loads and stores\r
- * - acq load-acquire, unordered store\r
- * - rel unordered load, store-release\r
- * - acq_rel load-acquire, store-release\r
- */\r
- .macro emit_model, model:req, a, l\r
- emit_ld ldadd, add, \model, \a, \l\r
- emit_ld ldclr, bic, \model, \a, \l\r
- emit_ld ldeor, eor, \model, \a, \l\r
- emit_ld ldset, orr, \model, \a, \l\r
- emit_ld swp, mov, \model, \a, \l\r
- emit_cas \model, \a, \l\r
- .endm\r
-\r
- emit_model _relax\r
- emit_model _acq, a\r
- emit_model _rel,, l\r
- emit_model _acq_rel, a, l\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-ASM_FUNC(__ashlti3)\r
- # return if shift is 0\r
- cbz x2, 1f\r
-\r
- mov x3, #64\r
- sub x3, x3, x2\r
- cmp x3, #0\r
- b.le 2f\r
-\r
- # shift is <= 64 bits\r
- lsr x3, x0, x3\r
- lsl x1, x1, x2\r
- orr x1, x1, x3\r
- lsl x0, x0, x2\r
-1:\r
- ret\r
-\r
-2:\r
- # shift is > 64\r
- neg w3, w3\r
- lsl x1, x0, x3\r
- mov x0, #0\r
- ret\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-ASM_FUNC(__ashldi3)\r
- cmp r2, #31\r
- bls L2\r
- cmp r2, #63\r
- subls r2, r2, #32\r
- movls r2, r0, asl r2\r
- movhi r2, #0\r
- mov r1, r2\r
- mov r0, #0\r
- bx lr\r
-L2:\r
- cmp r2, #0\r
- rsbne r3, r2, #32\r
- movne r3, r0, lsr r3\r
- movne r0, r0, asl r2\r
- orrne r1, r3, r1, asl r2\r
- bx lr\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-ASM_FUNC(__ashrdi3)\r
- cmp r2, #31\r
- bls L2\r
- cmp r2, #63\r
- subls r2, r2, #32\r
- mov ip, r1, asr #31\r
- movls r2, r1, asr r2\r
- movhi r2, ip\r
- mov r0, r2\r
- mov r1, ip\r
- bx lr\r
-L2:\r
- cmp r2, #0\r
- rsbne r3, r2, #32\r
- movne r3, r1, asl r3\r
- movne r1, r1, asr r2\r
- orrne r0, r3, r0, lsr r2\r
- bx lr\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-ASM_FUNC(__clzsi2)\r
- @ frame_needed = 1, uses_anonymous_args = 0\r
- stmfd sp!, {r7, lr}\r
- add r7, sp, #0\r
- movs r3, r0, lsr #16\r
- movne r3, #16\r
- moveq r3, #0\r
- movne r9, #0\r
- moveq r9, #16\r
- mov r3, r0, lsr r3\r
- tst r3, #65280\r
- movne r0, #8\r
- moveq r0, #0\r
- movne lr, #0\r
- moveq lr, #8\r
- mov r3, r3, lsr r0\r
- tst r3, #240\r
- movne r0, #4\r
- moveq r0, #0\r
- movne ip, #0\r
- moveq ip, #4\r
- mov r3, r3, lsr r0\r
- tst r3, #12\r
- movne r0, #2\r
- moveq r0, #0\r
- movne r1, #0\r
- moveq r1, #2\r
- mov r2, r3, lsr r0\r
- add r3, lr, r9\r
- add r0, r3, ip\r
- add r1, r0, r1\r
- mov r0, r2, lsr #1\r
- eor r0, r0, #1\r
- ands r0, r0, #1\r
- mvnne r0, #0\r
- rsb r3, r2, #2\r
- and r0, r0, r3\r
- add r0, r1, r0\r
- ldmfd sp!, {r7, pc}\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-ASM_FUNC(__ctzsi2)\r
- uxth r3, r0\r
- cmp r3, #0\r
- moveq ip, #16\r
- movne ip, #0\r
- @ lr needed for prologue\r
- mov r0, r0, lsr ip\r
- tst r0, #255\r
- movne r3, #0\r
- moveq r3, #8\r
- mov r0, r0, lsr r3\r
- tst r0, #15\r
- movne r1, #0\r
- moveq r1, #4\r
- add r3, r3, ip\r
- mov r0, r0, lsr r1\r
- tst r0, #3\r
- movne r2, #0\r
- moveq r2, #2\r
- add r3, r3, r1\r
- mov r0, r0, lsr r2\r
- and r0, r0, #3\r
- add r2, r3, r2\r
- eor r3, r0, #1\r
- mov r0, r0, lsr #1\r
- ands r3, r3, #1\r
- mvnne r3, #0\r
- rsb r0, r0, #2\r
- and r0, r3, r0\r
- add r0, r2, r0\r
- bx lr\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2011, ARM. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-.text\r
-.align 2\r
-GCC_ASM_EXPORT(__aeabi_uidiv)\r
-GCC_ASM_EXPORT(__aeabi_uidivmod)\r
-GCC_ASM_EXPORT(__aeabi_idiv)\r
-GCC_ASM_EXPORT(__aeabi_idivmod)\r
-\r
-# AREA Math, CODE, READONLY\r
-\r
-#\r
-#UINT32\r
-#EFIAPI\r
-#__aeabi_uidivmode (\r
-# IN UINT32 Dividen\r
-# IN UINT32 Divisor\r
-# );\r
-#\r
-\r
-ASM_PFX(__aeabi_uidiv):\r
-ASM_PFX(__aeabi_uidivmod):\r
- rsbs r12, r1, r0, LSR #4\r
- mov r2, #0\r
- bcc ASM_PFX(__arm_div4)\r
- rsbs r12, r1, r0, LSR #8\r
- bcc ASM_PFX(__arm_div8)\r
- mov r3, #0\r
- b ASM_PFX(__arm_div_large)\r
-\r
-#\r
-#INT32\r
-#EFIAPI\r
-#__aeabi_idivmode (\r
-# IN INT32 Dividen\r
-# IN INT32 Divisor\r
-# );\r
-#\r
-ASM_PFX(__aeabi_idiv):\r
-ASM_PFX(__aeabi_idivmod):\r
- orrs r12, r0, r1\r
- bmi ASM_PFX(__arm_div_negative)\r
- rsbs r12, r1, r0, LSR #1\r
- mov r2, #0\r
- bcc ASM_PFX(__arm_div1)\r
- rsbs r12, r1, r0, LSR #4\r
- bcc ASM_PFX(__arm_div4)\r
- rsbs r12, r1, r0, LSR #8\r
- bcc ASM_PFX(__arm_div8)\r
- mov r3, #0\r
- b ASM_PFX(__arm_div_large)\r
-ASM_PFX(__arm_div8):\r
- rsbs r12, r1, r0, LSR #7\r
- subcs r0, r0, r1, LSL #7\r
- adc r2, r2, r2\r
- rsbs r12, r1, r0,LSR #6\r
- subcs r0, r0, r1, LSL #6\r
- adc r2, r2, r2\r
- rsbs r12, r1, r0, LSR #5\r
- subcs r0, r0, r1, LSL #5\r
- adc r2, r2, r2\r
- rsbs r12, r1, r0, LSR #4\r
- subcs r0, r0, r1, LSL #4\r
- adc r2, r2, r2\r
-ASM_PFX(__arm_div4):\r
- rsbs r12, r1, r0, LSR #3\r
- subcs r0, r0, r1, LSL #3\r
- adc r2, r2, r2\r
- rsbs r12, r1, r0, LSR #2\r
- subcs r0, r0, r1, LSL #2\r
- adcs r2, r2, r2\r
- rsbs r12, r1, r0, LSR #1\r
- subcs r0, r0, r1, LSL #1\r
- adc r2, r2, r2\r
-ASM_PFX(__arm_div1):\r
- subs r1, r0, r1\r
- movcc r1, r0\r
- adc r0, r2, r2\r
- bx r14\r
-ASM_PFX(__arm_div_negative):\r
- ands r2, r1, #0x80000000\r
- rsbmi r1, r1, #0\r
- eors r3, r2, r0, ASR #32\r
- rsbcs r0, r0, #0\r
- rsbs r12, r1, r0, LSR #4\r
- bcc label1\r
- rsbs r12, r1, r0, LSR #8\r
- bcc label2\r
-ASM_PFX(__arm_div_large):\r
- lsl r1, r1, #6\r
- rsbs r12, r1, r0, LSR #8\r
- orr r2, r2, #0xfc000000\r
- bcc label2\r
- lsl r1, r1, #6\r
- rsbs r12, r1, r0, LSR #8\r
- orr r2, r2, #0x3f00000\r
- bcc label2\r
- lsl r1, r1, #6\r
- rsbs r12, r1, r0, LSR #8\r
- orr r2, r2, #0xfc000\r
- orrcs r2, r2, #0x3f00\r
- lslcs r1, r1, #6\r
- rsbs r12, r1, #0\r
- bcs ASM_PFX(__aeabi_idiv0)\r
-label3:\r
- lsrcs r1, r1, #6\r
-label2:\r
- rsbs r12, r1, r0, LSR #7\r
- subcs r0, r0, r1, LSL #7\r
- adc r2, r2, r2\r
- rsbs r12, r1, r0, LSR #6\r
- subcs r0, r0, r1, LSL #6\r
- adc r2, r2, r2\r
- rsbs r12, r1, r0, LSR #5\r
- subcs r0, r0, r1, LSL #5\r
- adc r2, r2, r2\r
- rsbs r12, r1, r0, LSR #4\r
- subcs r0, r0, r1, LSL #4\r
- adc r2, r2, r2\r
-label1:\r
- rsbs r12, r1, r0, LSR #3\r
- subcs r0, r0, r1, LSL #3\r
- adc r2, r2, r2\r
- rsbs r12, r1, r0, LSR #2\r
- subcs r0, r0, r1, LSL #2\r
- adcs r2, r2, r2\r
- bcs label3\r
- rsbs r12, r1, r0, LSR #1\r
- subcs r0, r0, r1, LSL #1\r
- adc r2, r2, r2\r
- subs r1, r0, r1\r
- movcc r1, r0\r
- adc r0, r2, r2\r
- asrs r3, r3, #31\r
- rsbmi r0, r0, #0\r
- rsbcs r1, r1, #0\r
- bx r14\r
-\r
- @ What to do about division by zero? For now, just return.\r
-ASM_PFX(__aeabi_idiv0):\r
- bx r14\r
+++ /dev/null
-//------------------------------------------------------------------------------\r
-//\r
-// Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-// Copyright (c) 2018, Pete Batard. All rights reserved.<BR>\r
-//\r
-// SPDX-License-Identifier: BSD-2-Clause-Patent\r
-//\r
-//------------------------------------------------------------------------------\r
-\r
-\r
- EXPORT __aeabi_uidiv\r
- EXPORT __aeabi_uidivmod\r
- EXPORT __aeabi_idiv\r
- EXPORT __aeabi_idivmod\r
- EXPORT __rt_udiv\r
- EXPORT __rt_sdiv\r
-\r
- AREA Math, CODE, READONLY\r
-\r
-;\r
-;UINT32\r
-;EFIAPI\r
-;__aeabi_uidivmod (\r
-; IN UINT32 Dividend\r
-; IN UINT32 Divisor\r
-; );\r
-;\r
-__aeabi_uidiv\r
-__aeabi_uidivmod\r
- RSBS r12, r1, r0, LSR #4\r
- MOV r2, #0\r
- BCC __arm_div4\r
- RSBS r12, r1, r0, LSR #8\r
- BCC __arm_div8\r
- MOV r3, #0\r
- B __arm_div_large\r
-\r
-;\r
-;UINT64\r
-;EFIAPI\r
-;__rt_udiv (\r
-; IN UINT32 Divisor,\r
-; IN UINT32 Dividend\r
-; );\r
-;\r
-__rt_udiv\r
- ; Swap R0 and R1\r
- MOV r12, r0\r
- MOV r0, r1\r
- MOV r1, r12\r
- B __aeabi_uidivmod\r
-\r
-;\r
-;UINT64\r
-;EFIAPI\r
-;__rt_sdiv (\r
-; IN INT32 Divisor,\r
-; IN INT32 Dividend\r
-; );\r
-;\r
-__rt_sdiv\r
- ; Swap R0 and R1\r
- MOV r12, r0\r
- MOV r0, r1\r
- MOV r1, r12\r
- B __aeabi_idivmod\r
-\r
-;\r
-;INT32\r
-;EFIAPI\r
-;__aeabi_idivmod (\r
-; IN INT32 Dividend\r
-; IN INT32 Divisor\r
-; );\r
-;\r
-__aeabi_idiv\r
-__aeabi_idivmod\r
- ORRS r12, r0, r1\r
- BMI __arm_div_negative\r
- RSBS r12, r1, r0, LSR #1\r
- MOV r2, #0\r
- BCC __arm_div1\r
- RSBS r12, r1, r0, LSR #4\r
- BCC __arm_div4\r
- RSBS r12, r1, r0, LSR #8\r
- BCC __arm_div8\r
- MOV r3, #0\r
- B __arm_div_large\r
-__arm_div8\r
- RSBS r12, r1, r0, LSR #7\r
- SUBCS r0, r0, r1, LSL #7\r
- ADC r2, r2, r2\r
- RSBS r12, r1, r0,LSR #6\r
- SUBCS r0, r0, r1, LSL #6\r
- ADC r2, r2, r2\r
- RSBS r12, r1, r0, LSR #5\r
- SUBCS r0, r0, r1, LSL #5\r
- ADC r2, r2, r2\r
- RSBS r12, r1, r0, LSR #4\r
- SUBCS r0, r0, r1, LSL #4\r
- ADC r2, r2, r2\r
-__arm_div4\r
- RSBS r12, r1, r0, LSR #3\r
- SUBCS r0, r0, r1, LSL #3\r
- ADC r2, r2, r2\r
- RSBS r12, r1, r0, LSR #2\r
- SUBCS r0, r0, r1, LSL #2\r
- ADCS r2, r2, r2\r
- RSBS r12, r1, r0, LSR #1\r
- SUBCS r0, r0, r1, LSL #1\r
- ADC r2, r2, r2\r
-__arm_div1\r
- SUBS r1, r0, r1\r
- MOVCC r1, r0\r
- ADC r0, r2, r2\r
- BX r14\r
-__arm_div_negative\r
- ANDS r2, r1, #0x80000000\r
- RSBMI r1, r1, #0\r
- EORS r3, r2, r0, ASR #32\r
- RSBCS r0, r0, #0\r
- RSBS r12, r1, r0, LSR #4\r
- BCC label1\r
- RSBS r12, r1, r0, LSR #8\r
- BCC label2\r
-__arm_div_large\r
- LSL r1, r1, #6\r
- RSBS r12, r1, r0, LSR #8\r
- ORR r2, r2, #0xfc000000\r
- BCC label2\r
- LSL r1, r1, #6\r
- RSBS r12, r1, r0, LSR #8\r
- ORR r2, r2, #0x3f00000\r
- BCC label2\r
- LSL r1, r1, #6\r
- RSBS r12, r1, r0, LSR #8\r
- ORR r2, r2, #0xfc000\r
- ORRCS r2, r2, #0x3f00\r
- LSLCS r1, r1, #6\r
- RSBS r12, r1, #0\r
- BCS __aeabi_idiv0\r
-label3\r
- LSRCS r1, r1, #6\r
-label2\r
- RSBS r12, r1, r0, LSR #7\r
- SUBCS r0, r0, r1, LSL #7\r
- ADC r2, r2, r2\r
- RSBS r12, r1, r0, LSR #6\r
- SUBCS r0, r0, r1, LSL #6\r
- ADC r2, r2, r2\r
- RSBS r12, r1, r0, LSR #5\r
- SUBCS r0, r0, r1, LSL #5\r
- ADC r2, r2, r2\r
- RSBS r12, r1, r0, LSR #4\r
- SUBCS r0, r0, r1, LSL #4\r
- ADC r2, r2, r2\r
-label1\r
- RSBS r12, r1, r0, LSR #3\r
- SUBCS r0, r0, r1, LSL #3\r
- ADC r2, r2, r2\r
- RSBS r12, r1, r0, LSR #2\r
- SUBCS r0, r0, r1, LSL #2\r
- ADCS r2, r2, r2\r
- BCS label3\r
- RSBS r12, r1, r0, LSR #1\r
- SUBCS r0, r0, r1, LSL #1\r
- ADC r2, r2, r2\r
- SUBS r1, r0, r1\r
- MOVCC r1, r0\r
- ADC r0, r2, r2\r
- ASRS r3, r3, #31\r
- RSBMI r0, r0, #0\r
- RSBCS r1, r1, #0\r
- BX r14\r
-\r
- ; What to do about division by zero? For now, just return.\r
-__aeabi_idiv0\r
- BX r14\r
-\r
- END\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-ASM_FUNC(__divdi3)\r
- @ args = 0, pretend = 0, frame = 0\r
- @ frame_needed = 1, uses_anonymous_args = 0\r
- stmfd sp!, {r4, r5, r7, lr}\r
- mov r4, r3, asr #31\r
- add r7, sp, #8\r
- stmfd sp!, {r10, r11}\r
- mov r10, r1, asr #31\r
- sub sp, sp, #8\r
- mov r11, r10\r
- mov r5, r4\r
- eor r0, r0, r10\r
- eor r1, r1, r10\r
- eor r2, r2, r4\r
- eor r3, r3, r4\r
- subs r2, r2, r4\r
- sbc r3, r3, r5\r
- mov ip, #0\r
- subs r0, r0, r10\r
- sbc r1, r1, r11\r
- str ip, [sp, #0]\r
- bl ASM_PFX(__udivmoddi4)\r
- eor r2, r10, r4\r
- eor r3, r10, r4\r
- eor r0, r0, r2\r
- eor r1, r1, r3\r
- subs r0, r0, r2\r
- sbc r1, r1, r3\r
- sub sp, r7, #16\r
- ldmfd sp!, {r10, r11}\r
- ldmfd sp!, {r4, r5, r7, pc}\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-ASM_FUNC(__divsi3)\r
- eor r3, r0, r0, asr #31\r
- eor r2, r1, r1, asr #31\r
- stmfd sp!, {r4, r5, r7, lr}\r
- mov r5, r0, asr #31\r
- add r7, sp, #8\r
- mov r4, r1, asr #31\r
- sub r0, r3, r0, asr #31\r
- sub r1, r2, r1, asr #31\r
- bl ASM_PFX(__udivsi3)\r
- eor r1, r5, r4\r
- eor r0, r0, r1\r
- rsb r0, r1, r0\r
- ldmfd sp!, {r4, r5, r7, pc}\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-#\r
-#UINT64\r
-#EFIAPI\r
-#__aeabi_lasr (\r
-# IN UINT64 Value\r
-# IN UINT32 Shift\r
-# );\r
-#\r
-ASM_FUNC(__aeabi_lasr)\r
- subs r3,r2,#0x20\r
- bpl L_Test\r
- rsb r3,r2,#0x20\r
- lsr r0,r0,r2\r
- orr r0,r0,r1,LSL r3\r
- asr r1,r1,r2\r
- bx lr\r
-L_Test:\r
- asr r0,r1,r3\r
- asr r1,r1,#31\r
- bx lr\r
+++ /dev/null
-//------------------------------------------------------------------------------\r
-//\r
-// Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-//\r
-// SPDX-License-Identifier: BSD-2-Clause-Patent\r
-//\r
-//------------------------------------------------------------------------------\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-//\r
-// A pair of (unsigned) long longs is returned in {{r0, r1}, {r2, r3}},\r
-// the quotient in {r0, r1}, and the remainder in {r2, r3}.\r
-//\r
-//__value_in_regs lldiv_t\r
-//EFIAPI\r
-//__aeabi_ldivmod (\r
-// IN UINT64 Dividen\r
-// IN UINT64 Divisor\r
-// )//\r
-//\r
-\r
-ASM_FUNC(__aeabi_ldivmod)\r
- push {r4,lr}\r
- asrs r4,r1,#1\r
- eor r4,r4,r3,LSR #1\r
- bpl L_Test1\r
- rsbs r0,r0,#0\r
- rsc r1,r1,#0\r
-L_Test1:\r
- tst r3,r3\r
- bpl L_Test2\r
- rsbs r2,r2,#0\r
- rsc r3,r3,#0\r
-L_Test2:\r
- bl ASM_PFX(__aeabi_uldivmod)\r
- tst r4,#0x40000000\r
- beq L_Test3\r
- rsbs r0,r0,#0\r
- rsc r1,r1,#0\r
-L_Test3:\r
- tst r4,#0x80000000\r
- beq L_Exit\r
- rsbs r2,r2,#0\r
- rsc r3,r3,#0\r
-L_Exit:\r
- pop {r4,pc}\r
-\r
-\r
-\r
+++ /dev/null
-//------------------------------------------------------------------------------\r
-//\r
-// Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-// Copyright (c) 2018, Pete Batard. All rights reserved.<BR>\r
-//\r
-// SPDX-License-Identifier: BSD-2-Clause-Patent\r
-//\r
-//------------------------------------------------------------------------------\r
-\r
-\r
- IMPORT __aeabi_uldivmod\r
- EXPORT __aeabi_ldivmod\r
- EXPORT __rt_sdiv64\r
-\r
- AREA s___aeabi_ldivmod, CODE, READONLY, ARM\r
-\r
- ARM\r
-\r
-;\r
-;INT64\r
-;EFIAPI\r
-;__rt_sdiv64 (\r
-; IN INT64 Divisor\r
-; IN INT64 Dividend\r
-; );\r
-;\r
-__rt_sdiv64\r
- ; Swap r0-r1 and r2-r3\r
- MOV r12, r0\r
- MOV r0, r2\r
- MOV r2, r12\r
- MOV r12, r1\r
- MOV r1, r3\r
- MOV r3, r12\r
- B __aeabi_ldivmod\r
-\r
-;\r
-;INT64\r
-;EFIAPI\r
-;__aeabi_ldivmod (\r
-; IN INT64 Dividend\r
-; IN INT64 Divisor\r
-; );\r
-;\r
-__aeabi_ldivmod\r
- PUSH {r4,lr}\r
- ASRS r4,r1,#1\r
- EOR r4,r4,r3,LSR #1\r
- BPL L_Test1\r
- RSBS r0,r0,#0\r
- RSC r1,r1,#0\r
-L_Test1\r
- TST r3,r3\r
- BPL L_Test2\r
- RSBS r2,r2,#0\r
- RSC r3,r3,#0\r
-L_Test2\r
- BL __aeabi_uldivmod\r
- TST r4,#0x40000000\r
- BEQ L_Test3\r
- RSBS r0,r0,#0\r
- RSC r1,r1,#0\r
-L_Test3\r
- TST r4,#0x80000000\r
- BEQ L_Exit\r
- RSBS r2,r2,#0\r
- RSC r3,r3,#0\r
-L_Exit\r
- POP {r4,pc}\r
-\r
- END\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2013, ARM. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-#\r
-#VOID\r
-#EFIAPI\r
-#__aeabi_llsl (\r
-# IN VOID *Destination,\r
-# IN VOID *Source,\r
-# IN UINT32 Size\r
-# );\r
-#\r
-ASM_FUNC(__aeabi_llsl)\r
- subs r3,r2,#0x20\r
- bpl 1f\r
- rsb r3,r2,#0x20\r
- lsl r1,r1,r2\r
- orr r1,r1,r0,lsr r3\r
- lsl r0,r0,r2\r
- bx lr\r
-1:\r
- lsl r1,r0,r3\r
- mov r0,#0\r
- bx lr\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2013, ARM. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-#VOID\r
-#EFIAPI\r
-#__aeabi_llsr (\r
-# IN VOID *Destination,\r
-# IN VOID *Source,\r
-# IN UINT32 Size\r
-# );\r
-#\r
-ASM_FUNC(__aeabi_llsr)\r
- subs r3,r2,#0x20\r
- bpl 1f\r
- rsb r3,r2,#0x20\r
- lsr r0,r0,r2\r
- orr r0,r0,r1,lsl r3\r
- lsr r1,r1,r2\r
- bx lr\r
-1:\r
- lsr r0,r1,r3\r
- mov r1,#0\r
- bx lr\r
+++ /dev/null
-//------------------------------------------------------------------------------\r
-//\r
-// Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-// Copyright (c) 2018, Pete Batard. All rights reserved.<BR>\r
-//\r
-// SPDX-License-Identifier: BSD-2-Clause-Patent\r
-//\r
-//------------------------------------------------------------------------------\r
-\r
- EXPORT __aeabi_llsr\r
- EXPORT __rt_srsh\r
-\r
- AREA s___aeabi_llsr, CODE, READONLY, ARM\r
-\r
- ARM\r
-\r
-;\r
-;VOID\r
-;EFIAPI\r
-;__aeabi_llsr (\r
-; IN UINT64 Value,\r
-; IN UINT32 Shift\r
-;)\r
-;\r
-__aeabi_llsr\r
-__rt_srsh\r
- SUBS r3,r2,#0x20\r
- BPL __aeabi_llsr_label1\r
- RSB r3,r2,#0x20\r
- LSR r0,r0,r2\r
- ORR r0,r0,r1,LSL r3\r
- LSR r1,r1,r2\r
- BX lr\r
-__aeabi_llsr_label1\r
- LSR r0,r1,r3\r
- MOV r1,#0\r
- BX lr\r
-\r
- END\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-ASM_FUNC(__lshrdi3)\r
- cmp r2, #31\r
- bls L2\r
- cmp r2, #63\r
- subls r2, r2, #32\r
- movls r2, r1, lsr r2\r
- movhi r2, #0\r
- mov r0, r2\r
- mov r1, #0\r
- bx lr\r
-L2:\r
- cmp r2, #0\r
- rsbne r3, r2, #32\r
- movne r3, r1, asl r3\r
- movne r1, r1, lsr r2\r
- orrne r0, r3, r0, lsr r2\r
- bx lr\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2011-2014, ARM Limited. All rights reserved.\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-# VOID\r
-# EFIAPI\r
-# memmove (\r
-# IN VOID *Destination,\r
-# IN CONST VOID *Source,\r
-# IN UINT32 Size\r
-# );\r
-ASM_FUNC(memmove)\r
- CMP r2, #0\r
- BXEQ lr\r
- CMP r0, r1\r
- BXEQ lr\r
- BHI memmove_backward\r
-\r
-memmove_forward:\r
- LDRB r3, [r1], #1\r
- STRB r3, [r0], #1\r
- SUBS r2, r2, #1\r
- BXEQ lr\r
- B memmove_forward\r
-\r
-memmove_backward:\r
- add r0, r2\r
- add r1, r2\r
-memmove_backward_loop:\r
- LDRB r3, [r1, #-1]!\r
- STRB r3, [r0, #-1]!\r
- SUBS r2, r2, #1\r
- BXEQ lr\r
- B memmove_backward_loop\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-ASM_FUNC(__moddi3)\r
- stmfd sp!, {r4, r5, r7, lr}\r
- mov r4, r1, asr #31\r
- add r7, sp, #8\r
- stmfd sp!, {r10, r11}\r
- mov r10, r3, asr #31\r
- sub sp, sp, #16\r
- mov r5, r4\r
- mov r11, r10\r
- eor r0, r0, r4\r
- eor r1, r1, r4\r
- eor r2, r2, r10\r
- eor r3, r3, r10\r
- add ip, sp, #8\r
- subs r0, r0, r4\r
- sbc r1, r1, r5\r
- subs r2, r2, r10\r
- sbc r3, r3, r11\r
- str ip, [sp, #0]\r
- bl ASM_PFX(__udivmoddi4)\r
- ldrd r0, [sp, #8]\r
- eor r0, r0, r4\r
- eor r1, r1, r4\r
- subs r0, r0, r4\r
- sbc r1, r1, r5\r
- sub sp, r7, #16\r
- ldmfd sp!, {r10, r11}\r
- ldmfd sp!, {r4, r5, r7, pc}\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-ASM_FUNC(__modsi3)\r
- stmfd sp!, {r4, r5, r7, lr}\r
- add r7, sp, #8\r
- mov r5, r0\r
- mov r4, r1\r
- bl ASM_PFX(__divsi3)\r
- mul r0, r4, r0\r
- rsb r0, r0, r5\r
- ldmfd sp!, {r4, r5, r7, pc}\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-ASM_FUNC(__muldi3)\r
- stmfd sp!, {r4, r5, r6, r7, lr}\r
- add r7, sp, #12\r
- stmfd sp!, {r8, r10, r11}\r
- ldr r11, L4\r
- mov r4, r0, lsr #16\r
- and r8, r0, r11\r
- and ip, r2, r11\r
- mul lr, ip, r8\r
- mul ip, r4, ip\r
- sub sp, sp, #8\r
- add r10, ip, lr, lsr #16\r
- and ip, r10, r11\r
- and lr, lr, r11\r
- mov r6, r2, lsr #16\r
- str r4, [sp, #4]\r
- add r4, lr, ip, asl #16\r
- mul ip, r8, r6\r
- mov r5, r10, lsr #16\r
- add r10, ip, r4, lsr #16\r
- and ip, r10, r11\r
- and lr, r4, r11\r
- add r4, lr, ip, asl #16\r
- mul r0, r3, r0\r
- add ip, r5, r10, lsr #16\r
- ldr r5, [sp, #4]\r
- mla r0, r2, r1, r0\r
- mla r5, r6, r5, ip\r
- mov r10, r4\r
- add r11, r0, r5\r
- mov r1, r11\r
- mov r0, r4\r
- sub sp, r7, #24\r
- ldmfd sp!, {r8, r10, r11}\r
- ldmfd sp!, {r4, r5, r6, r7, pc}\r
- .p2align 2\r
-L5:\r
- .align 2\r
-L4:\r
- .long 65535\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-.text\r
-\r
-GCC_ASM_EXPORT(__ARM_ll_mullu)\r
-GCC_ASM_EXPORT(__aeabi_lmul)\r
-#\r
-#INT64\r
-#EFIAPI\r
-#__aeabi_lmul (\r
-# IN INT64 Multiplicand\r
-# IN INT32 Multiplier\r
-# );\r
-#\r
-ASM_PFX(__ARM_ll_mullu):\r
- mov r3, #0\r
-# Make upper part of INT64 Multiplier 0 and use __aeabi_lmul\r
-\r
-#\r
-#INT64\r
-#EFIAPI\r
-#__aeabi_lmul (\r
-# IN INT64 Multiplicand\r
-# IN INT64 Multiplier\r
-# );\r
-#\r
-ASM_PFX(__aeabi_lmul):\r
- stmdb sp!, {lr}\r
- mov lr, r0\r
- umull r0, ip, r2, lr\r
- mla r1, r2, r1, ip\r
- mla r1, r3, lr, r1\r
- ldmia sp!, {pc}\r
+++ /dev/null
-#------s------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-\r
- .text\r
- .align 2\r
- GCC_ASM_EXPORT(__aeabi_ulcmp)\r
-\r
-ASM_PFX(__aeabi_ulcmp):\r
- stmfd sp!, {r4, r5, r8}\r
- cmp r3, r1\r
- mov r8, r0\r
- mov r9, r1\r
- mov r4, r2\r
- mov r5, r3\r
- bls L16\r
-L2:\r
- mvn r0, #0\r
-L1:\r
- ldmfd sp!, {r4, r5, r8}\r
- bx lr\r
-L16:\r
- beq L17\r
-L4:\r
- cmp r9, r5\r
- bhi L7\r
- beq L18\r
- cmp r8, r4\r
-L14:\r
- cmpeq r9, r5\r
- moveq r0, #0\r
- beq L1\r
- b L1\r
-L18:\r
- cmp r8, r4\r
- bls L14\r
-L7:\r
- mov r0, #1\r
- b L1\r
-L17:\r
- cmp r2, r0\r
- bhi L2\r
- b L4\r
-\r
+++ /dev/null
-#/** @file\r
-# Compiler intrinsic for ARM compiler\r
-#\r
-# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#**/\r
-#\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-.syntax unified\r
-\r
-ASM_FUNC(__switch16)\r
- ldrh ip, [lr, #-1]\r
- cmp r0, ip\r
- add r0, lr, r0, lsl #1\r
- ldrshcc r0, [r0, #1]\r
- add ip, lr, ip, lsl #1\r
- ldrshcs r0, [ip, #1]\r
- add ip, lr, r0, lsl #1\r
- bx ip\r
-\r
-\r
+++ /dev/null
-#/** @file\r
-# Compiler intrinsic for ARM compiler\r
-#\r
-# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#**/\r
-#\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-.syntax unified\r
-\r
-ASM_FUNC(__switch32)\r
- ldr ip, [lr, #-1]\r
- cmp r0, ip\r
- add r0, lr, r0, lsl #2\r
- ldrcc r0, [r0, #3]\r
- add ip, lr, ip, lsl #2\r
- ldrcs r0, [ip, #3]\r
- add ip, lr, r0\r
- bx ip\r
-\r
+++ /dev/null
-#/** @file\r
-# Compiler intrinsic for ARM compiler\r
-#\r
-# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#**/\r
-#\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-.syntax unified\r
-\r
-ASM_FUNC(__switch8)\r
- ldrb ip, [lr, #-1]\r
- cmp r0, ip\r
- ldrsbcc r0, [lr, r0]\r
- ldrsbcs r0, [lr, ip]\r
- add ip, lr, r0, lsl #1\r
- bx ip\r
-\r
+++ /dev/null
-#/** @file\r
-# Compiler intrinsic for ARM compiler\r
-#\r
-# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#**/\r
-#\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-.syntax unified\r
-\r
-ASM_FUNC(__switchu8)\r
- ldrb ip,[lr,#-1]\r
- cmp r0,ip\r
- ldrbcc r0,[lr,r0]\r
- ldrbcs r0,[lr,ip]\r
- add ip,lr,r0,LSL #1\r
- bx ip\r
-\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-ASM_FUNC(__ucmpdi2)\r
- stmfd sp!, {r4, r5, r8, lr}\r
- cmp r1, r3\r
- mov r8, r0\r
- mov r4, r2\r
- mov r5, r3\r
- bcc L2\r
- bhi L4\r
- cmp r0, r2\r
- bcc L2\r
- movls r0, #1\r
- bls L8\r
- b L4\r
-L2:\r
- mov r0, #0\r
- b L8\r
-L4:\r
- mov r0, #2\r
-L8:\r
- ldmfd sp!, {r4, r5, r8, pc}\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-ASM_FUNC(__udivdi3)\r
- stmfd sp!, {r7, lr}\r
- add r7, sp, #0\r
- sub sp, sp, #8\r
- mov ip, #0\r
- str ip, [sp, #0]\r
- bl ASM_PFX(__udivmoddi4)\r
- sub sp, r7, #0\r
- ldmfd sp!, {r7, pc}\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-#include <AsmMacroLib.h>\r
-\r
- .syntax unified\r
-\r
-ASM_FUNC(__udivmoddi4)\r
- stmfd sp!, {r4, r5, r6, r7, lr}\r
- add r7, sp, #12\r
- stmfd sp!, {r10, r11}\r
- sub sp, sp, #20\r
- stmia sp, {r2-r3}\r
- ldr r6, [sp, #48]\r
- orrs r2, r2, r3\r
- mov r10, r0\r
- mov r11, r1\r
- beq L2\r
- subs ip, r1, #0\r
- bne L4\r
- cmp r3, #0\r
- bne L6\r
- cmp r6, #0\r
- beq L8\r
- mov r1, r2\r
- bl ASM_PFX(__umodsi3)\r
- mov r1, #0\r
- stmia r6, {r0-r1}\r
-L8:\r
- ldr r1, [sp, #0]\r
- mov r0, r10\r
- b L45\r
-L6:\r
- cmp r6, #0\r
- movne r1, #0\r
- stmiane r6, {r0-r1}\r
- b L2\r
-L4:\r
- ldr r1, [sp, #0]\r
- cmp r1, #0\r
- bne L12\r
- ldr r2, [sp, #4]\r
- cmp r2, #0\r
- bne L14\r
- cmp r6, #0\r
- beq L16\r
- mov r1, r2\r
- mov r0, r11\r
- bl ASM_PFX(__umodsi3)\r
- mov r1, #0\r
- stmia r6, {r0-r1}\r
-L16:\r
- ldr r1, [sp, #4]\r
- mov r0, r11\r
-L45:\r
- bl ASM_PFX(__udivsi3)\r
-L46:\r
- mov r10, r0\r
- mov r11, #0\r
- b L10\r
-L14:\r
- subs r1, r0, #0\r
- bne L18\r
- cmp r6, #0\r
- beq L16\r
- ldr r1, [sp, #4]\r
- mov r0, r11\r
- bl ASM_PFX(__umodsi3)\r
- mov r4, r10\r
- mov r5, r0\r
- stmia r6, {r4-r5}\r
- b L16\r
-L18:\r
- sub r3, r2, #1\r
- tst r2, r3\r
- bne L22\r
- cmp r6, #0\r
- movne r4, r0\r
- andne r5, ip, r3\r
- stmiane r6, {r4-r5}\r
-L24:\r
- rsb r3, r2, #0\r
- and r3, r2, r3\r
- clz r3, r3\r
- rsb r3, r3, #31\r
- mov r0, ip, lsr r3\r
- b L46\r
-L22:\r
- clz r2, r2\r
- clz r3, ip\r
- rsb r3, r3, r2\r
- cmp r3, #30\r
- bhi L48\r
- rsb r2, r3, #31\r
- add lr, r3, #1\r
- mov r3, r1, asl r2\r
- str r3, [sp, #12]\r
- mov r3, r1, lsr lr\r
- ldr r0, [sp, #0]\r
- mov r5, ip, lsr lr\r
- orr r4, r3, ip, asl r2\r
- str r0, [sp, #8]\r
- b L29\r
-L12:\r
- ldr r3, [sp, #4]\r
- cmp r3, #0\r
- bne L30\r
- sub r3, r1, #1\r
- tst r1, r3\r
- bne L32\r
- cmp r6, #0\r
- andne r3, r3, r0\r
- movne r2, r3\r
- movne r3, #0\r
- stmiane r6, {r2-r3}\r
-L34:\r
- cmp r1, #1\r
- beq L10\r
- rsb r3, r1, #0\r
- and r3, r1, r3\r
- clz r3, r3\r
- rsb r0, r3, #31\r
- mov r1, ip, lsr r0\r
- rsb r3, r0, #32\r
- mov r0, r10, lsr r0\r
- orr ip, r0, ip, asl r3\r
- str r1, [sp, #12]\r
- str ip, [sp, #8]\r
- ldrd r10, [sp, #8]\r
- b L10\r
-L32:\r
- clz r2, r1\r
- clz r3, ip\r
- rsb r3, r3, r2\r
- rsb r4, r3, #31\r
- mov r2, r0, asl r4\r
- mvn r1, r3\r
- and r2, r2, r1, asr #31\r
- add lr, r3, #33\r
- str r2, [sp, #8]\r
- add r2, r3, #1\r
- mov r3, r3, asr #31\r
- and r0, r3, r0, asl r1\r
- mov r3, r10, lsr r2\r
- orr r3, r3, ip, asl r4\r
- and r3, r3, r1, asr #31\r
- orr r0, r0, r3\r
- mov r3, ip, lsr lr\r
- str r0, [sp, #12]\r
- mov r0, r10, lsr lr\r
- and r5, r3, r2, asr #31\r
- rsb r3, lr, #31\r
- mov r3, r3, asr #31\r
- orr r0, r0, ip, asl r1\r
- and r3, r3, ip, lsr r2\r
- and r0, r0, r2, asr #31\r
- orr r4, r3, r0\r
- b L29\r
-L30:\r
- clz r2, r3\r
- clz r3, ip\r
- rsb r3, r3, r2\r
- cmp r3, #31\r
- bls L37\r
-L48:\r
- cmp r6, #0\r
- stmiane r6, {r10-r11}\r
- b L2\r
-L37:\r
- rsb r1, r3, #31\r
- mov r0, r0, asl r1\r
- add lr, r3, #1\r
- mov r2, #0\r
- str r0, [sp, #12]\r
- mov r0, r10, lsr lr\r
- str r2, [sp, #8]\r
- sub r2, r3, #31\r
- and r0, r0, r2, asr #31\r
- mov r3, ip, lsr lr\r
- orr r4, r0, ip, asl r1\r
- and r5, r3, r2, asr #31\r
-L29:\r
- mov ip, #0\r
- mov r10, ip\r
- b L40\r
-L41:\r
- ldr r1, [sp, #12]\r
- ldr r2, [sp, #8]\r
- mov r3, r4, lsr #31\r
- orr r5, r3, r5, asl #1\r
- mov r3, r1, lsr #31\r
- orr r4, r3, r4, asl #1\r
- mov r3, r2, lsr #31\r
- orr r0, r3, r1, asl #1\r
- orr r1, ip, r2, asl #1\r
- ldmia sp, {r2-r3}\r
- str r0, [sp, #12]\r
- subs r2, r2, r4\r
- sbc r3, r3, r5\r
- str r1, [sp, #8]\r
- subs r0, r2, #1\r
- sbc r1, r3, #0\r
- mov r2, r1, asr #31\r
- ldmia sp, {r0-r1}\r
- mov r3, r2\r
- and ip, r2, #1\r
- and r3, r3, r1\r
- and r2, r2, r0\r
- subs r4, r4, r2\r
- sbc r5, r5, r3\r
- add r10, r10, #1\r
-L40:\r
- cmp r10, lr\r
- bne L41\r
- ldrd r0, [sp, #8]\r
- adds r0, r0, r0\r
- adc r1, r1, r1\r
- cmp r6, #0\r
- orr r10, r0, ip\r
- mov r11, r1\r
- stmiane r6, {r4-r5}\r
- b L10\r
-L2:\r
- mov r10, #0\r
- mov r11, #0\r
-L10:\r
- mov r0, r10\r
- mov r1, r11\r
- sub sp, r7, #20\r
- ldmfd sp!, {r10, r11}\r
- ldmfd sp!, {r4, r5, r6, r7, pc}\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-#include <AsmMacroLib.h>\r
-\r
- .syntax unified\r
-\r
-ASM_FUNC(__udivsi3)\r
- cmp r1, #0\r
- cmpne r0, #0\r
- stmfd sp!, {r4, r5, r7, lr}\r
- add r7, sp, #8\r
- beq L2\r
- clz r2, r1\r
- clz r3, r0\r
- rsb r3, r3, r2\r
- cmp r3, #31\r
- bhi L2\r
- ldmfdeq sp!, {r4, r5, r7, pc}\r
- add r5, r3, #1\r
- rsb r3, r3, #31\r
- mov lr, #0\r
- mov r2, r0, asl r3\r
- mov ip, r0, lsr r5\r
- mov r4, lr\r
- b L8\r
-L9:\r
- mov r0, r2, lsr #31\r
- orr ip, r0, ip, asl #1\r
- orr r2, r3, lr\r
- rsb r3, ip, r1\r
- sub r3, r3, #1\r
- and r0, r1, r3, asr #31\r
- mov lr, r3, lsr #31\r
- rsb ip, r0, ip\r
- add r4, r4, #1\r
-L8:\r
- cmp r4, r5\r
- mov r3, r2, asl #1\r
- bne L9\r
- orr r0, r3, lr\r
- ldmfd sp!, {r4, r5, r7, pc}\r
-L2:\r
- mov r0, #0\r
- ldmfd sp!, {r4, r5, r7, pc}\r
-\r
+++ /dev/null
-//------------------------------------------------------------------------------\r
-//\r
-// Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-//\r
-// SPDX-License-Identifier: BSD-2-Clause-Patent\r
-//\r
-//------------------------------------------------------------------------------\r
-\r
-\r
-\r
- .text\r
- .align 2\r
- GCC_ASM_EXPORT(__aeabi_uldivmod)\r
-\r
-//\r
-//UINT64\r
-//EFIAPI\r
-//__aeabi_uldivmod (\r
-// IN UINT64 Dividend\r
-// IN UINT64 Divisor\r
-// )\r
-//\r
-ASM_PFX(__aeabi_uldivmod):\r
- stmdb sp!, {r4, r5, r6, lr}\r
- mov r4, r1\r
- mov r5, r0\r
- mov r6, #0 // 0x0\r
- orrs ip, r3, r2, lsr #31\r
- bne ASM_PFX(__aeabi_uldivmod_label1)\r
- tst r2, r2\r
- beq ASM_PFX(_ll_div0)\r
- movs ip, r2, lsr #15\r
- addeq r6, r6, #16 // 0x10\r
- mov ip, r2, lsl r6\r
- movs lr, ip, lsr #23\r
- moveq ip, ip, lsl #8\r
- addeq r6, r6, #8 // 0x8\r
- movs lr, ip, lsr #27\r
- moveq ip, ip, lsl #4\r
- addeq r6, r6, #4 // 0x4\r
- movs lr, ip, lsr #29\r
- moveq ip, ip, lsl #2\r
- addeq r6, r6, #2 // 0x2\r
- movs lr, ip, lsr #30\r
- moveq ip, ip, lsl #1\r
- addeq r6, r6, #1 // 0x1\r
- b ASM_PFX(_ll_udiv_small)\r
-ASM_PFX(__aeabi_uldivmod_label1):\r
- tst r3, #-2147483648 // 0x80000000\r
- bne ASM_PFX(__aeabi_uldivmod_label2)\r
- movs ip, r3, lsr #15\r
- addeq r6, r6, #16 // 0x10\r
- mov ip, r3, lsl r6\r
- movs lr, ip, lsr #23\r
- moveq ip, ip, lsl #8\r
- addeq r6, r6, #8 // 0x8\r
- movs lr, ip, lsr #27\r
- moveq ip, ip, lsl #4\r
- addeq r6, r6, #4 // 0x4\r
- movs lr, ip, lsr #29\r
- moveq ip, ip, lsl #2\r
- addeq r6, r6, #2 // 0x2\r
- movs lr, ip, lsr #30\r
- addeq r6, r6, #1 // 0x1\r
- rsb r3, r6, #32 // 0x20\r
- moveq ip, ip, lsl #1\r
- orr ip, ip, r2, lsr r3\r
- mov lr, r2, lsl r6\r
- b ASM_PFX(_ll_udiv_big)\r
-ASM_PFX(__aeabi_uldivmod_label2):\r
- mov ip, r3\r
- mov lr, r2\r
- b ASM_PFX(_ll_udiv_ginormous)\r
-\r
-ASM_PFX(_ll_udiv_small):\r
- cmp r4, ip, lsl #1\r
- mov r3, #0 // 0x0\r
- subcs r4, r4, ip, lsl #1\r
- addcs r3, r3, #2 // 0x2\r
- cmp r4, ip\r
- subcs r4, r4, ip\r
- adcs r3, r3, #0 // 0x0\r
- add r2, r6, #32 // 0x20\r
- cmp r2, #32 // 0x20\r
- rsb ip, ip, #0 // 0x0\r
- bcc ASM_PFX(_ll_udiv_small_label1)\r
- orrs r0, r4, r5, lsr #30\r
- moveq r4, r5\r
- moveq r5, #0 // 0x0\r
- subeq r2, r2, #32 // 0x20\r
-ASM_PFX(_ll_udiv_small_label1):\r
- mov r1, #0 // 0x0\r
- cmp r2, #16 // 0x10\r
- bcc ASM_PFX(_ll_udiv_small_label2)\r
- movs r0, r4, lsr #14\r
- moveq r4, r4, lsl #16\r
- addeq r1, r1, #16 // 0x10\r
-ASM_PFX(_ll_udiv_small_label2):\r
- sub lr, r2, r1\r
- cmp lr, #8 // 0x8\r
- bcc ASM_PFX(_ll_udiv_small_label3)\r
- movs r0, r4, lsr #22\r
- moveq r4, r4, lsl #8\r
- addeq r1, r1, #8 // 0x8\r
-ASM_PFX(_ll_udiv_small_label3):\r
- rsb r0, r1, #32 // 0x20\r
- sub r2, r2, r1\r
- orr r4, r4, r5, lsr r0\r
- mov r5, r5, lsl r1\r
- cmp r2, #1 // 0x1\r
- bcc ASM_PFX(_ll_udiv_small_label5)\r
- sub r2, r2, #1 // 0x1\r
- and r0, r2, #7 // 0x7\r
- eor r0, r0, #7 // 0x7\r
- adds r0, r0, r0, lsl #1\r
- add pc, pc, r0, lsl #2\r
- nop // (mov r0,r0)\r
-ASM_PFX(_ll_udiv_small_label4):\r
- adcs r5, r5, r5\r
- adcs r4, ip, r4, lsl #1\r
- rsbcc r4, ip, r4\r
- adcs r5, r5, r5\r
- adcs r4, ip, r4, lsl #1\r
- rsbcc r4, ip, r4\r
- adcs r5, r5, r5\r
- adcs r4, ip, r4, lsl #1\r
- rsbcc r4, ip, r4\r
- adcs r5, r5, r5\r
- adcs r4, ip, r4, lsl #1\r
- rsbcc r4, ip, r4\r
- adcs r5, r5, r5\r
- adcs r4, ip, r4, lsl #1\r
- rsbcc r4, ip, r4\r
- adcs r5, r5, r5\r
- adcs r4, ip, r4, lsl #1\r
- rsbcc r4, ip, r4\r
- adcs r5, r5, r5\r
- adcs r4, ip, r4, lsl #1\r
- rsbcc r4, ip, r4\r
- adcs r5, r5, r5\r
- adcs r4, ip, r4, lsl #1\r
- sub r2, r2, #8 // 0x8\r
- tst r2, r2\r
- rsbcc r4, ip, r4\r
- bpl ASM_PFX(_ll_udiv_small_label4)\r
-ASM_PFX(_ll_udiv_small_label5):\r
- mov r2, r4, lsr r6\r
- bic r4, r4, r2, lsl r6\r
- adcs r0, r5, r5\r
- adc r1, r4, r4\r
- add r1, r1, r3, lsl r6\r
- mov r3, #0 // 0x0\r
- ldmia sp!, {r4, r5, r6, pc}\r
-\r
-ASM_PFX(_ll_udiv_big):\r
- subs r0, r5, lr\r
- mov r3, #0 // 0x0\r
- sbcs r1, r4, ip\r
- movcs r5, r0\r
- movcs r4, r1\r
- adcs r3, r3, #0 // 0x0\r
- subs r0, r5, lr\r
- sbcs r1, r4, ip\r
- movcs r5, r0\r
- movcs r4, r1\r
- adcs r3, r3, #0 // 0x0\r
- subs r0, r5, lr\r
- sbcs r1, r4, ip\r
- movcs r5, r0\r
- movcs r4, r1\r
- adcs r3, r3, #0 // 0x0\r
- mov r1, #0 // 0x0\r
- rsbs lr, lr, #0 // 0x0\r
- rsc ip, ip, #0 // 0x0\r
- cmp r6, #16 // 0x10\r
- bcc ASM_PFX(_ll_udiv_big_label1)\r
- movs r0, r4, lsr #14\r
- moveq r4, r4, lsl #16\r
- addeq r1, r1, #16 // 0x10\r
-ASM_PFX(_ll_udiv_big_label1):\r
- sub r2, r6, r1\r
- cmp r2, #8 // 0x8\r
- bcc ASM_PFX(_ll_udiv_big_label2)\r
- movs r0, r4, lsr #22\r
- moveq r4, r4, lsl #8\r
- addeq r1, r1, #8 // 0x8\r
-ASM_PFX(_ll_udiv_big_label2):\r
- rsb r0, r1, #32 // 0x20\r
- sub r2, r6, r1\r
- orr r4, r4, r5, lsr r0\r
- mov r5, r5, lsl r1\r
- cmp r2, #1 // 0x1\r
- bcc ASM_PFX(_ll_udiv_big_label4)\r
- sub r2, r2, #1 // 0x1\r
- and r0, r2, #3 // 0x3\r
- rsb r0, r0, #3 // 0x3\r
- adds r0, r0, r0, lsl #1\r
- add pc, pc, r0, lsl #3\r
- nop // (mov r0,r0)\r
-ASM_PFX(_ll_udiv_big_label3):\r
- adcs r5, r5, r5\r
- adcs r4, r4, r4\r
- adcs r0, lr, r5\r
- adcs r1, ip, r4\r
- movcs r5, r0\r
- movcs r4, r1\r
- adcs r5, r5, r5\r
- adcs r4, r4, r4\r
- adcs r0, lr, r5\r
- adcs r1, ip, r4\r
- movcs r5, r0\r
- movcs r4, r1\r
- adcs r5, r5, r5\r
- adcs r4, r4, r4\r
- adcs r0, lr, r5\r
- adcs r1, ip, r4\r
- movcs r5, r0\r
- movcs r4, r1\r
- sub r2, r2, #4 // 0x4\r
- adcs r5, r5, r5\r
- adcs r4, r4, r4\r
- adcs r0, lr, r5\r
- adcs r1, ip, r4\r
- tst r2, r2\r
- movcs r5, r0\r
- movcs r4, r1\r
- bpl ASM_PFX(_ll_udiv_big_label3)\r
-ASM_PFX(_ll_udiv_big_label4):\r
- mov r1, #0 // 0x0\r
- mov r2, r5, lsr r6\r
- bic r5, r5, r2, lsl r6\r
- adcs r0, r5, r5\r
- adc r1, r1, #0 // 0x0\r
- movs lr, r3, lsl r6\r
- mov r3, r4, lsr r6\r
- bic r4, r4, r3, lsl r6\r
- adc r1, r1, #0 // 0x0\r
- adds r0, r0, lr\r
- orr r2, r2, r4, ror r6\r
- adc r1, r1, #0 // 0x0\r
- ldmia sp!, {r4, r5, r6, pc}\r
-\r
-ASM_PFX(_ll_udiv_ginormous):\r
- subs r2, r5, lr\r
- mov r1, #0 // 0x0\r
- sbcs r3, r4, ip\r
- adc r0, r1, r1\r
- movcc r2, r5\r
- movcc r3, r4\r
- ldmia sp!, {r4, r5, r6, pc}\r
-\r
-ASM_PFX(_ll_div0):\r
- ldmia sp!, {r4, r5, r6, lr}\r
- mov r0, #0 // 0x0\r
- mov r1, #0 // 0x0\r
- b ASM_PFX(__aeabi_ldiv0)\r
-\r
-ASM_PFX(__aeabi_ldiv0):\r
- bx r14\r
-\r
-\r
+++ /dev/null
-//------------------------------------------------------------------------------\r
-//\r
-// Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-// Copyright (c) 2018, Pete Batard. All rights reserved.<BR>\r
-//\r
-// SPDX-License-Identifier: BSD-2-Clause-Patent\r
-//\r
-//------------------------------------------------------------------------------\r
-\r
-\r
- EXPORT __aeabi_uldivmod\r
- EXPORT __rt_udiv64\r
-\r
- AREA s___aeabi_uldivmod, CODE, READONLY, ARM\r
-\r
- ARM\r
-\r
-;\r
-;UINT64\r
-;EFIAPI\r
-;__rt_udiv64 (\r
-; IN UINT64 Divisor\r
-; IN UINT64 Dividend\r
-; )\r
-;\r
-__rt_udiv64\r
- ; Swap r0-r1 and r2-r3\r
- mov r12, r0\r
- mov r0, r2\r
- mov r2, r12\r
- mov r12, r1\r
- mov r1, r3\r
- mov r3, r12\r
- b __aeabi_uldivmod\r
-\r
-;\r
-;UINT64\r
-;EFIAPI\r
-;__aeabi_uldivmod (\r
-; IN UINT64 Dividend\r
-; IN UINT64 Divisor\r
-; )\r
-;\r
-__aeabi_uldivmod\r
- stmdb sp!, {r4, r5, r6, lr}\r
- mov r4, r1\r
- mov r5, r0\r
- mov r6, #0 ; 0x0\r
- orrs ip, r3, r2, lsr #31\r
- bne __aeabi_uldivmod_label1\r
- tst r2, r2\r
- beq _ll_div0\r
- movs ip, r2, lsr #15\r
- addeq r6, r6, #16 ; 0x10\r
- mov ip, r2, lsl r6\r
- movs lr, ip, lsr #23\r
- moveq ip, ip, lsl #8\r
- addeq r6, r6, #8 ; 0x8\r
- movs lr, ip, lsr #27\r
- moveq ip, ip, lsl #4\r
- addeq r6, r6, #4 ; 0x4\r
- movs lr, ip, lsr #29\r
- moveq ip, ip, lsl #2\r
- addeq r6, r6, #2 ; 0x2\r
- movs lr, ip, lsr #30\r
- moveq ip, ip, lsl #1\r
- addeq r6, r6, #1 ; 0x1\r
- b _ll_udiv_small\r
-__aeabi_uldivmod_label1\r
- tst r3, #-2147483648 ; 0x80000000\r
- bne __aeabi_uldivmod_label2\r
- movs ip, r3, lsr #15\r
- addeq r6, r6, #16 ; 0x10\r
- mov ip, r3, lsl r6\r
- movs lr, ip, lsr #23\r
- moveq ip, ip, lsl #8\r
- addeq r6, r6, #8 ; 0x8\r
- movs lr, ip, lsr #27\r
- moveq ip, ip, lsl #4\r
- addeq r6, r6, #4 ; 0x4\r
- movs lr, ip, lsr #29\r
- moveq ip, ip, lsl #2\r
- addeq r6, r6, #2 ; 0x2\r
- movs lr, ip, lsr #30\r
- addeq r6, r6, #1 ; 0x1\r
- rsb r3, r6, #32 ; 0x20\r
- moveq ip, ip, lsl #1\r
- orr ip, ip, r2, lsr r3\r
- mov lr, r2, lsl r6\r
- b _ll_udiv_big\r
-__aeabi_uldivmod_label2\r
- mov ip, r3\r
- mov lr, r2\r
- b _ll_udiv_ginormous\r
-\r
-_ll_udiv_small\r
- cmp r4, ip, lsl #1\r
- mov r3, #0 ; 0x0\r
- subcs r4, r4, ip, lsl #1\r
- addcs r3, r3, #2 ; 0x2\r
- cmp r4, ip\r
- subcs r4, r4, ip\r
- adcs r3, r3, #0 ; 0x0\r
- add r2, r6, #32 ; 0x20\r
- cmp r2, #32 ; 0x20\r
- rsb ip, ip, #0 ; 0x0\r
- bcc _ll_udiv_small_label1\r
- orrs r0, r4, r5, lsr #30\r
- moveq r4, r5\r
- moveq r5, #0 ; 0x0\r
- subeq r2, r2, #32 ; 0x20\r
-_ll_udiv_small_label1\r
- mov r1, #0 ; 0x0\r
- cmp r2, #16 ; 0x10\r
- bcc _ll_udiv_small_label2\r
- movs r0, r4, lsr #14\r
- moveq r4, r4, lsl #16\r
- addeq r1, r1, #16 ; 0x10\r
-_ll_udiv_small_label2\r
- sub lr, r2, r1\r
- cmp lr, #8 ; 0x8\r
- bcc _ll_udiv_small_label3\r
- movs r0, r4, lsr #22\r
- moveq r4, r4, lsl #8\r
- addeq r1, r1, #8 ; 0x8\r
-_ll_udiv_small_label3\r
- rsb r0, r1, #32 ; 0x20\r
- sub r2, r2, r1\r
- orr r4, r4, r5, lsr r0\r
- mov r5, r5, lsl r1\r
- cmp r2, #1 ; 0x1\r
- bcc _ll_udiv_small_label5\r
- sub r2, r2, #1 ; 0x1\r
- and r0, r2, #7 ; 0x7\r
- eor r0, r0, #7 ; 0x7\r
- adds r0, r0, r0, lsl #1\r
- add pc, pc, r0, lsl #2\r
- nop ; (mov r0,r0)\r
-_ll_udiv_small_label4\r
- adcs r5, r5, r5\r
- adcs r4, ip, r4, lsl #1\r
- rsbcc r4, ip, r4\r
- adcs r5, r5, r5\r
- adcs r4, ip, r4, lsl #1\r
- rsbcc r4, ip, r4\r
- adcs r5, r5, r5\r
- adcs r4, ip, r4, lsl #1\r
- rsbcc r4, ip, r4\r
- adcs r5, r5, r5\r
- adcs r4, ip, r4, lsl #1\r
- rsbcc r4, ip, r4\r
- adcs r5, r5, r5\r
- adcs r4, ip, r4, lsl #1\r
- rsbcc r4, ip, r4\r
- adcs r5, r5, r5\r
- adcs r4, ip, r4, lsl #1\r
- rsbcc r4, ip, r4\r
- adcs r5, r5, r5\r
- adcs r4, ip, r4, lsl #1\r
- rsbcc r4, ip, r4\r
- adcs r5, r5, r5\r
- adcs r4, ip, r4, lsl #1\r
- sub r2, r2, #8 ; 0x8\r
- tst r2, r2\r
- rsbcc r4, ip, r4\r
- bpl _ll_udiv_small_label4\r
-_ll_udiv_small_label5\r
- mov r2, r4, lsr r6\r
- bic r4, r4, r2, lsl r6\r
- adcs r0, r5, r5\r
- adc r1, r4, r4\r
- add r1, r1, r3, lsl r6\r
- mov r3, #0 ; 0x0\r
- ldmia sp!, {r4, r5, r6, pc}\r
-\r
-_ll_udiv_big\r
- subs r0, r5, lr\r
- mov r3, #0 ; 0x0\r
- sbcs r1, r4, ip\r
- movcs r5, r0\r
- movcs r4, r1\r
- adcs r3, r3, #0 ; 0x0\r
- subs r0, r5, lr\r
- sbcs r1, r4, ip\r
- movcs r5, r0\r
- movcs r4, r1\r
- adcs r3, r3, #0 ; 0x0\r
- subs r0, r5, lr\r
- sbcs r1, r4, ip\r
- movcs r5, r0\r
- movcs r4, r1\r
- adcs r3, r3, #0 ; 0x0\r
- mov r1, #0 ; 0x0\r
- rsbs lr, lr, #0 ; 0x0\r
- rsc ip, ip, #0 ; 0x0\r
- cmp r6, #16 ; 0x10\r
- bcc _ll_udiv_big_label1\r
- movs r0, r4, lsr #14\r
- moveq r4, r4, lsl #16\r
- addeq r1, r1, #16 ; 0x10\r
-_ll_udiv_big_label1\r
- sub r2, r6, r1\r
- cmp r2, #8 ; 0x8\r
- bcc _ll_udiv_big_label2\r
- movs r0, r4, lsr #22\r
- moveq r4, r4, lsl #8\r
- addeq r1, r1, #8 ; 0x8\r
-_ll_udiv_big_label2\r
- rsb r0, r1, #32 ; 0x20\r
- sub r2, r6, r1\r
- orr r4, r4, r5, lsr r0\r
- mov r5, r5, lsl r1\r
- cmp r2, #1 ; 0x1\r
- bcc _ll_udiv_big_label4\r
- sub r2, r2, #1 ; 0x1\r
- and r0, r2, #3 ; 0x3\r
- rsb r0, r0, #3 ; 0x3\r
- adds r0, r0, r0, lsl #1\r
- add pc, pc, r0, lsl #3\r
- nop ; (mov r0,r0)\r
-_ll_udiv_big_label3\r
- adcs r5, r5, r5\r
- adcs r4, r4, r4\r
- adcs r0, lr, r5\r
- adcs r1, ip, r4\r
- movcs r5, r0\r
- movcs r4, r1\r
- adcs r5, r5, r5\r
- adcs r4, r4, r4\r
- adcs r0, lr, r5\r
- adcs r1, ip, r4\r
- movcs r5, r0\r
- movcs r4, r1\r
- adcs r5, r5, r5\r
- adcs r4, r4, r4\r
- adcs r0, lr, r5\r
- adcs r1, ip, r4\r
- movcs r5, r0\r
- movcs r4, r1\r
- sub r2, r2, #4 ; 0x4\r
- adcs r5, r5, r5\r
- adcs r4, r4, r4\r
- adcs r0, lr, r5\r
- adcs r1, ip, r4\r
- tst r2, r2\r
- movcs r5, r0\r
- movcs r4, r1\r
- bpl _ll_udiv_big_label3\r
-_ll_udiv_big_label4\r
- mov r1, #0 ; 0x0\r
- mov r2, r5, lsr r6\r
- bic r5, r5, r2, lsl r6\r
- adcs r0, r5, r5\r
- adc r1, r1, #0 ; 0x0\r
- movs lr, r3, lsl r6\r
- mov r3, r4, lsr r6\r
- bic r4, r4, r3, lsl r6\r
- adc r1, r1, #0 ; 0x0\r
- adds r0, r0, lr\r
- orr r2, r2, r4, ror r6\r
- adc r1, r1, #0 ; 0x0\r
- ldmia sp!, {r4, r5, r6, pc}\r
-\r
-_ll_udiv_ginormous\r
- subs r2, r5, lr\r
- mov r1, #0 ; 0x0\r
- sbcs r3, r4, ip\r
- adc r0, r1, r1\r
- movcc r2, r5\r
- movcc r3, r4\r
- ldmia sp!, {r4, r5, r6, pc}\r
-\r
-_ll_div0\r
- ldmia sp!, {r4, r5, r6, lr}\r
- mov r0, #0 ; 0x0\r
- mov r1, #0 ; 0x0\r
- b __aeabi_ldiv0\r
-\r
-__aeabi_ldiv0\r
- bx r14\r
-\r
- END\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-ASM_FUNC(__umoddi3)\r
- stmfd sp!, {r7, lr}\r
- add r7, sp, #0\r
- sub sp, sp, #16\r
- add ip, sp, #8\r
- str ip, [sp, #0]\r
- bl ASM_PFX(__udivmoddi4)\r
- ldrd r0, [sp, #8]\r
- sub sp, r7, #0\r
- ldmfd sp!, {r7, pc}\r
-\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-ASM_FUNC(__umodsi3)\r
- stmfd sp!, {r4, r5, r7, lr}\r
- add r7, sp, #8\r
- mov r5, r0\r
- mov r4, r1\r
- bl ASM_PFX(__udivsi3)\r
- mul r0, r4, r0\r
- rsb r0, r0, r5\r
- ldmfd sp!, {r4, r5, r7, pc}\r
-\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-#\r
-#UINT32\r
-#EFIAPI\r
-#__aeabi_uread4 (\r
-# IN VOID *Pointer\r
-# );\r
-#\r
-ASM_FUNC(__aeabi_uread4)\r
- ldrb r1, [r0]\r
- ldrb r2, [r0, #1]\r
- ldrb r3, [r0, #2]\r
- ldrb r0, [r0, #3]\r
- orr r1, r1, r2, lsl #8\r
- orr r1, r1, r3, lsl #16\r
- orr r0, r1, r0, lsl #24\r
- bx lr\r
-\r
-#\r
-#UINT64\r
-#EFIAPI\r
-#__aeabi_uread8 (\r
-# IN VOID *Pointer\r
-# );\r
-#\r
-ASM_FUNC(__aeabi_uread8)\r
- mov r3, r0\r
-\r
- ldrb r1, [r3]\r
- ldrb r2, [r3, #1]\r
- orr r1, r1, r2, lsl #8\r
- ldrb r2, [r3, #2]\r
- orr r1, r1, r2, lsl #16\r
- ldrb r0, [r3, #3]\r
- orr r0, r1, r0, lsl #24\r
-\r
- ldrb r1, [r3, #4]\r
- ldrb r2, [r3, #5]\r
- orr r1, r1, r2, lsl #8\r
- ldrb r2, [r3, #6]\r
- orr r1, r1, r2, lsl #16\r
- ldrb r2, [r3, #7]\r
- orr r1, r1, r2, lsl #24\r
-\r
- bx lr\r
+++ /dev/null
-#------------------------------------------------------------------------------\r
-#\r
-# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#------------------------------------------------------------------------------\r
-\r
-#include <AsmMacroLib.h>\r
-\r
-#\r
-#UINT32\r
-#EFIAPI\r
-#__aeabi_uwrite4 (\r
-# IN UINT32 Data,\r
-# IN VOID *Pointer\r
-# );\r
-#\r
-ASM_FUNC(__aeabi_uwrite4)\r
- mov r2, r0, lsr #8\r
- strb r0, [r1]\r
- strb r2, [r1, #1]\r
- mov r2, r0, lsr #16\r
- strb r2, [r1, #2]\r
- mov r2, r0, lsr #24\r
- strb r2, [r1, #3]\r
- bx lr\r
-\r
-#\r
-#UINT64\r
-#EFIAPI\r
-#__aeabi_uwrite8 (\r
-# IN UINT64 Data,\r
-# IN VOID *Pointer\r
-# );\r
-#\r
-ASM_FUNC(__aeabi_uwrite8)\r
- mov r3, r0, lsr #8\r
- strb r0, [r2]\r
- strb r3, [r2, #1]\r
- mov r3, r0, lsr #16\r
- strb r3, [r2, #2]\r
- mov r3, r0, lsr #24\r
- strb r3, [r2, #3]\r
-\r
- mov r3, r1, lsr #8\r
- strb r1, [r2, #4]\r
- strb r3, [r2, #5]\r
- mov r3, r1, lsr #16\r
- strb r3, [r2, #6]\r
- mov r3, r1, lsr #24\r
- strb r3, [r2, #7]\r
- bx lr\r
+++ /dev/null
-#/** @file\r
-# Base Library implementation.\r
-#\r
-# Copyright (c) 2009, Apple Inc. All rights reserved.<BR>\r
-# Copyright (c) 2011-2013, ARM Limited. All rights reserved.\r
-#\r
-# SPDX-License-Identifier: BSD-2-Clause-Patent\r
-#\r
-#\r
-#**/\r
-\r
-[Defines]\r
- INF_VERSION = 0x00010005\r
- BASE_NAME = CompilerIntrinsicsLib\r
- FILE_GUID = 855274FA-3575-4C20-9709-C031DC5589FA\r
- MODULE_TYPE = BASE\r
- VERSION_STRING = 1.0\r
- LIBRARY_CLASS = CompilerIntrinsicsLib\r
-\r
-[Sources]\r
- memcpy.c | GCC\r
- memset.c | GCC\r
-\r
- memcpy_ms.c | MSFT\r
- memset_ms.c | MSFT\r
- memcmp_ms.c | MSFT\r
- memmove_ms.c | MSFT\r
-\r
-[Sources.ARM]\r
- Arm/ashrdi3.S | GCC\r
- Arm/ashldi3.S | GCC\r
- Arm/div.S | GCC\r
- Arm/divdi3.S | GCC\r
- Arm/divsi3.S | GCC\r
- Arm/lshrdi3.S | GCC\r
- Arm/memmove.S | GCC\r
- Arm/modsi3.S | GCC\r
- Arm/moddi3.S | GCC\r
- Arm/muldi3.S | GCC\r
- Arm/mullu.S | GCC\r
- Arm/udivsi3.S | GCC\r
- Arm/umodsi3.S | GCC\r
- Arm/udivdi3.S | GCC\r
- Arm/umoddi3.S | GCC\r
- Arm/udivmoddi4.S | GCC\r
- Arm/clzsi2.S | GCC\r
- Arm/ctzsi2.S | GCC\r
- Arm/ucmpdi2.S | GCC\r
- Arm/switch8.S | GCC\r
- Arm/switchu8.S | GCC\r
- Arm/switch16.S | GCC\r
- Arm/switch32.S | GCC\r
- Arm/sourcery.S | GCC\r
- Arm/uldiv.S | GCC\r
- Arm/ldivmod.S | GCC\r
- Arm/lasr.S | GCC\r
- Arm/llsr.S | GCC\r
- Arm/llsl.S | GCC\r
- Arm/uread.S | GCC\r
- Arm/uwrite.S | GCC\r
-\r
- Arm/div.asm | MSFT\r
- Arm/uldiv.asm | MSFT\r
- Arm/ldivmod.asm | MSFT\r
- Arm/llsr.asm | MSFT\r
-\r
-[Sources.AARCH64]\r
- AArch64/Atomics.S | GCC\r
- AArch64/ashlti3.S | GCC\r
-\r
-[Packages]\r
- MdePkg/MdePkg.dec\r
- ArmPkg/ArmPkg.dec\r
-\r
-[BuildOptions]\r
- MSFT:*_*_*_CC_FLAGS = /GL-\r
- MSFT:*_*_ARM_ASM_FLAGS = /oldit\r
+++ /dev/null
-// ------------------------------------------------------------------------------\r
-//\r
-// Copyright (c) 2019, Pete Batard. All rights reserved.\r
-// Copyright (c) 2021, Arm Limited. All rights reserved.<BR>\r
-//\r
-// SPDX-License-Identifier: BSD-2-Clause-Patent\r
-//\r
-// ------------------------------------------------------------------------------\r
-\r
-#if defined (_M_ARM64)\r
-typedef unsigned __int64 size_t;\r
-#else\r
-typedef unsigned __int32 size_t;\r
-#endif\r
-\r
-int\r
-memcmp (\r
- void *,\r
- void *,\r
- size_t\r
- );\r
-\r
-#pragma intrinsic(memcmp)\r
-#pragma function(memcmp)\r
-int\r
-memcmp (\r
- const void *s1,\r
- const void *s2,\r
- size_t n\r
- )\r
-{\r
- unsigned char const *t1;\r
- unsigned char const *t2;\r
-\r
- t1 = s1;\r
- t2 = s2;\r
-\r
- while (n-- != 0) {\r
- if (*t1 != *t2) {\r
- return (int)*t1 - (int)*t2;\r
- }\r
-\r
- t1++;\r
- t2++;\r
- }\r
-\r
- return 0;\r
-}\r
+++ /dev/null
-// ------------------------------------------------------------------------------\r
-//\r
-// Copyright (c) 2016, Linaro Ltd. All rights reserved.<BR>\r
-// Copyright (c) 2021, Arm Limited. All rights reserved.<BR>\r
-//\r
-// SPDX-License-Identifier: BSD-2-Clause-Patent\r
-//\r
-// ------------------------------------------------------------------------------\r
-\r
-typedef __SIZE_TYPE__ size_t;\r
-\r
-static void\r
-__memcpy (\r
- void *dest,\r
- const void *src,\r
- size_t n\r
- )\r
-{\r
- unsigned char *d;\r
- unsigned char const *s;\r
-\r
- d = dest;\r
- s = src;\r
-\r
- while (n-- != 0) {\r
- *d++ = *s++;\r
- }\r
-}\r
-\r
-void *\r
-memcpy (\r
- void *dest,\r
- const void *src,\r
- size_t n\r
- )\r
-{\r
- __memcpy (dest, src, n);\r
- return dest;\r
-}\r
-\r
-#ifdef __arm__\r
-\r
-__attribute__ ((__alias__ ("__memcpy")))\r
-void\r
-__aeabi_memcpy (\r
- void *dest,\r
- const void *src,\r
- size_t n\r
- );\r
-\r
-__attribute__ ((__alias__ ("__memcpy")))\r
-void\r
-__aeabi_memcpy4 (\r
- void *dest,\r
- const void *src,\r
- size_t n\r
- );\r
-\r
-__attribute__ ((__alias__ ("__memcpy")))\r
-void\r
-__aeabi_memcpy8 (\r
- void *dest,\r
- const void *src,\r
- size_t n\r
- );\r
-\r
-#endif\r
+++ /dev/null
-// ------------------------------------------------------------------------------\r
-//\r
-// Copyright (c) 2017, Pete Batard. All rights reserved.<BR>\r
-// Copyright (c) 2021, Arm Limited. All rights reserved.<BR>\r
-//\r
-// SPDX-License-Identifier: BSD-2-Clause-Patent\r
-//\r
-// ------------------------------------------------------------------------------\r
-\r
-#if defined (_M_ARM64)\r
-typedef unsigned __int64 size_t;\r
-#else\r
-typedef unsigned __int32 size_t;\r
-#endif\r
-\r
-void *\r
-memcpy (\r
- void *,\r
- const void *,\r
- size_t\r
- );\r
-\r
-#pragma intrinsic(memcpy)\r
-#pragma function(memcpy)\r
-void *\r
-memcpy (\r
- void *dest,\r
- const void *src,\r
- size_t n\r
- )\r
-{\r
- unsigned char *d;\r
- unsigned char const *s;\r
-\r
- d = dest;\r
- s = src;\r
-\r
- while (n-- != 0) {\r
- *d++ = *s++;\r
- }\r
-\r
- return dest;\r
-}\r
+++ /dev/null
-// ------------------------------------------------------------------------------\r
-//\r
-// Copyright (c) 2019, Pete Batard. All rights reserved.\r
-// Copyright (c) 2021, Arm Limited. All rights reserved.<BR>\r
-//\r
-// SPDX-License-Identifier: BSD-2-Clause-Patent\r
-//\r
-// ------------------------------------------------------------------------------\r
-\r
-#if defined (_M_ARM64)\r
-typedef unsigned __int64 size_t;\r
-#else\r
-typedef unsigned __int32 size_t;\r
-#endif\r
-\r
-void *\r
-memmove (\r
- void *,\r
- const void *,\r
- size_t\r
- );\r
-\r
-#pragma intrinsic(memmove)\r
-#pragma function(memmove)\r
-void *\r
-memmove (\r
- void *dest,\r
- const void *src,\r
- size_t n\r
- )\r
-{\r
- unsigned char *d;\r
- unsigned char const *s;\r
-\r
- d = dest;\r
- s = src;\r
-\r
- if (d < s) {\r
- while (n-- != 0) {\r
- *d++ = *s++;\r
- }\r
- } else {\r
- d += n;\r
- s += n;\r
- while (n-- != 0) {\r
- *--d = *--s;\r
- }\r
- }\r
-\r
- return dest;\r
-}\r
+++ /dev/null
-// ------------------------------------------------------------------------------\r
-//\r
-// Copyright (c) 2016, Linaro Ltd. All rights reserved.<BR>\r
-// Copyright (c) 2021, Arm Limited. All rights reserved.<BR>\r
-//\r
-// SPDX-License-Identifier: BSD-2-Clause-Patent\r
-//\r
-// ------------------------------------------------------------------------------\r
-\r
-typedef __SIZE_TYPE__ size_t;\r
-\r
-static __attribute__ ((__used__))\r
-void *\r
-__memset (\r
- void *s,\r
- int c,\r
- size_t n\r
- )\r
-{\r
- unsigned char *d;\r
-\r
- d = s;\r
-\r
- while (n-- != 0) {\r
- *d++ = c;\r
- }\r
-\r
- return s;\r
-}\r
-\r
-//\r
-// Other modules (such as CryptoPkg/IntrinsicLib) may provide another\r
-// implementation of memset(), which may conflict with this one if this\r
-// object was pulled into the link due to the definitions below. So make\r
-// our memset() 'weak' to let the other implementation take precedence.\r
-//\r
-__attribute__ ((__weak__, __alias__ ("__memset")))\r
-void *\r
-memset (\r
- void *dest,\r
- int c,\r
- size_t n\r
- );\r
-\r
-#ifdef __arm__\r
-\r
-void\r
-__aeabi_memset (\r
- void *dest,\r
- size_t n,\r
- int c\r
- )\r
-{\r
- __memset (dest, c, n);\r
-}\r
-\r
-__attribute__ ((__alias__ ("__aeabi_memset")))\r
-void\r
-__aeabi_memset4 (\r
- void *dest,\r
- size_t n,\r
- int c\r
- );\r
-\r
-__attribute__ ((__alias__ ("__aeabi_memset")))\r
-void\r
-__aeabi_memset8 (\r
- void *dest,\r
- size_t n,\r
- int c\r
- );\r
-\r
-void\r
-__aeabi_memclr (\r
- void *dest,\r
- size_t n\r
- )\r
-{\r
- __memset (dest, 0, n);\r
-}\r
-\r
-__attribute__ ((__alias__ ("__aeabi_memclr")))\r
-void\r
-__aeabi_memclr4 (\r
- void *dest,\r
- size_t n\r
- );\r
-\r
-__attribute__ ((__alias__ ("__aeabi_memclr")))\r
-void\r
-__aeabi_memclr8 (\r
- void *dest,\r
- size_t n\r
- );\r
-\r
-#endif\r
+++ /dev/null
-// ------------------------------------------------------------------------------\r
-//\r
-// Copyright (c) 2017, Pete Batard. All rights reserved.<BR>\r
-// Copyright (c) 2021, Arm Limited. All rights reserved.<BR>\r
-//\r
-// SPDX-License-Identifier: BSD-2-Clause-Patent\r
-//\r
-// ------------------------------------------------------------------------------\r
-\r
-#if defined (_M_ARM64)\r
-typedef unsigned __int64 size_t;\r
-#else\r
-typedef unsigned __int32 size_t;\r
-#endif\r
-\r
-void *\r
-memset (\r
- void *,\r
- int,\r
- size_t\r
- );\r
-\r
-#pragma intrinsic(memset)\r
-#pragma function(memset)\r
-void *\r
-memset (\r
- void *s,\r
- int c,\r
- size_t n\r
- )\r
-{\r
- unsigned char *d;\r
-\r
- d = s;\r
-\r
- while (n-- != 0) {\r
- *d++ = (unsigned char)c;\r
- }\r
-\r
- return s;\r
-}\r
DevicePathLib|MdePkg/Library/UefiDevicePathLib/UefiDevicePathLib.inf\r
UefiRuntimeServicesTableLib|MdePkg/Library/UefiRuntimeServicesTableLib/UefiRuntimeServicesTableLib.inf\r
\r
- NULL|ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
NULL|MdePkg/Library/BaseStackCheckLib/BaseStackCheckLib.inf\r
\r
[LibraryClasses.common.PEIM]\r
# Networking Requirements\r
!include NetworkPkg/NetworkLibs.dsc.inc\r
\r
- #\r
- # It is not possible to prevent the ARM compiler from inserting calls to intrinsic functions.\r
- # This library provides the instrinsic functions such a compiler may generate calls to.\r
- #\r
- NULL|ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
-\r
# Add support for GCC stack protector\r
NULL|MdePkg/Library/BaseStackCheckLib/BaseStackCheckLib.inf\r
\r
\r
[LibraryClasses.ARM, LibraryClasses.AARCH64]\r
ArmLib|ArmPkg/Library/ArmLib/ArmBaseLib.inf\r
- #\r
- # It is not possible to prevent the ARM compiler for generic intrinsic functions.\r
- # This library provides the instrinsic functions generate by a given compiler.\r
- # [LibraryClasses.ARM, LibraryClasses.AARCH64] and NULL mean link this library\r
- # into all ARM and AARCH64 images.\r
- #\r
- NULL|ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
\r
# Add support for stack protector\r
NULL|MdePkg/Library/BaseStackCheckLib/BaseStackCheckLib.inf\r
SynchronizationLib|MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf\r
\r
[LibraryClasses.ARM, LibraryClasses.AARCH64]\r
- #\r
- # It is not possible to prevent the ARM compiler for generic intrinsic functions.\r
- # This library provides the instrinsic functions generate by a given compiler.\r
- # [LibraryClasses.ARM, LibraryClasses.AARCH64] and NULL mean link this library\r
- # into all ARM and AARCH64 images.\r
- #\r
- NULL|ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
-\r
# Add support for stack protector\r
NULL|MdePkg/Library/BaseStackCheckLib/BaseStackCheckLib.inf\r
\r
UefiDriverEntryPoint|MdePkg/Library/UefiDriverEntryPoint/UefiDriverEntryPoint.inf\r
\r
[LibraryClasses.ARM, LibraryClasses.AARCH64]\r
- NULL|ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
NULL|MdePkg/Library/BaseStackCheckLib/BaseStackCheckLib.inf\r
PL011UartLib|ArmPlatformPkg/Library/PL011UartLib/PL011UartLib.inf\r
\r
ArmGicLib|ArmPkg/Drivers/ArmGic/ArmGicLib.inf\r
ArmSmcLib|ArmPkg/Library/ArmSmcLib/ArmSmcLib.inf\r
SemihostLib|ArmPkg/Library/SemihostLib/SemihostLib.inf\r
- NULL|ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
\r
# Add support for GCC stack protector\r
NULL|MdePkg/Library/BaseStackCheckLib/BaseStackCheckLib.inf\r
MemoryAllocationLib|MdePkg/Library/PeiMemoryAllocationLib/PeiMemoryAllocationLib.inf\r
\r
[LibraryClasses.ARM, LibraryClasses.AARCH64]\r
- NULL|ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
NULL|MdePkg/Library/BaseStackCheckLib/BaseStackCheckLib.inf\r
\r
###################################################################################################\r
TimerLib|MdePkg/Library/BaseTimerLibNullTemplate/BaseTimerLibNullTemplate.inf\r
\r
[LibraryClasses.ARM, LibraryClasses.AARCH64]\r
- #\r
- # It is not possible to prevent the ARM compiler for generic intrinsic functions.\r
- # This library provides the intrinsic functions generate by a given compiler.\r
- # [LibraryClasses.ARM, LibraryClasses.AARCH64] and NULL mean link this library\r
- # into all ARM and AARCH64 images.\r
- #\r
- NULL|ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
-\r
# Add support for stack protector\r
NULL|MdePkg/Library/BaseStackCheckLib/BaseStackCheckLib.inf\r
\r
[LibraryClasses.ARM, LibraryClasses.AARCH64]\r
LockBoxLib|MdeModulePkg/Library/LockBoxNullLib/LockBoxNullLib.inf\r
\r
- #\r
- # It is not possible to prevent ARM compiler calls to generic intrinsic functions.\r
- # This library provides the instrinsic functions generated by a given compiler.\r
- # [LibraryClasses.ARM] and NULL mean link this library into all ARM images.\r
- #\r
- NULL|ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
-\r
#\r
# Since software stack checking may be heuristically enabled by the compiler\r
# include BaseStackCheckLib unconditionally.\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2020, Arm, Limited. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+ /*\r
+ * Provide the GCC intrinsics that are required when using GCC 9 or\r
+ * later with the -moutline-atomics options (which became the default\r
+ * in GCC 10)\r
+ */\r
+ .arch armv8-a\r
+\r
+ .macro reg_alias, pfx, sz\r
+ r0_\sz .req \pfx\()0\r
+ r1_\sz .req \pfx\()1\r
+ tmp0_\sz .req \pfx\()16\r
+ tmp1_\sz .req \pfx\()17\r
+ .endm\r
+\r
+ /*\r
+ * Define register aliases of the right type for each size\r
+ * (xN for 8 bytes, wN for everything smaller)\r
+ */\r
+ reg_alias w, 1\r
+ reg_alias w, 2\r
+ reg_alias w, 4\r
+ reg_alias x, 8\r
+\r
+ .macro fn_start, name:req\r
+ .section .text.\name\r
+ .globl \name\r
+ .type \name, %function\r
+\name\():\r
+ .endm\r
+\r
+ .macro fn_end, name:req\r
+ .size \name, . - \name\r
+ .endm\r
+\r
+ /*\r
+ * Emit an atomic helper for \model with operands of size \sz, using\r
+ * the operation specified by \insn (which is the LSE name), and which\r
+ * can be implemented using the generic load-locked/store-conditional\r
+ * (LL/SC) sequence below, using the arithmetic operation given by\r
+ * \opc.\r
+ */\r
+ .macro emit_ld_sz, sz:req, insn:req, opc:req, model:req, s, a, l\r
+ fn_start __aarch64_\insn\()\sz\()\model\r
+ mov tmp0_\sz, r0_\sz\r
+0: ld\a\()xr\s r0_\sz, [x1]\r
+ .ifnc \insn, swp\r
+ \opc tmp1_\sz, r0_\sz, tmp0_\sz\r
+ st\l\()xr\s w15, tmp1_\sz, [x1]\r
+ .else\r
+ st\l\()xr\s w15, tmp0_\sz, [x1]\r
+ .endif\r
+ cbnz w15, 0b\r
+ ret\r
+ fn_end __aarch64_\insn\()\sz\()\model\r
+ .endm\r
+\r
+ /*\r
+ * Emit atomic helpers for \model for operand sizes in the\r
+ * set {1, 2, 4, 8}, for the instruction pattern given by\r
+ * \insn. (This is the LSE name, but this implementation uses\r
+ * the generic LL/SC sequence using \opc as the arithmetic\r
+ * operation on the target.)\r
+ */\r
+ .macro emit_ld, insn:req, opc:req, model:req, a, l\r
+ emit_ld_sz 1, \insn, \opc, \model, b, \a, \l\r
+ emit_ld_sz 2, \insn, \opc, \model, h, \a, \l\r
+ emit_ld_sz 4, \insn, \opc, \model, , \a, \l\r
+ emit_ld_sz 8, \insn, \opc, \model, , \a, \l\r
+ .endm\r
+\r
+ /*\r
+ * Emit the compare and swap helper for \model and size \sz\r
+ * using LL/SC instructions.\r
+ */\r
+ .macro emit_cas_sz, sz:req, model:req, uxt:req, s, a, l\r
+ fn_start __aarch64_cas\sz\()\model\r
+ \uxt tmp0_\sz, r0_\sz\r
+0: ld\a\()xr\s r0_\sz, [x2]\r
+ cmp r0_\sz, tmp0_\sz\r
+ bne 1f\r
+ st\l\()xr\s w15, r1_\sz, [x2]\r
+ cbnz w15, 0b\r
+1: ret\r
+ fn_end __aarch64_cas\sz\()\model\r
+ .endm\r
+\r
+ /*\r
+ * Emit compare-and-swap helpers for \model for operand sizes in the\r
+ * set {1, 2, 4, 8, 16}.\r
+ */\r
+ .macro emit_cas, model:req, a, l\r
+ emit_cas_sz 1, \model, uxtb, b, \a, \l\r
+ emit_cas_sz 2, \model, uxth, h, \a, \l\r
+ emit_cas_sz 4, \model, mov , , \a, \l\r
+ emit_cas_sz 8, \model, mov , , \a, \l\r
+\r
+ /*\r
+ * We cannot use the parameterized sequence for 16 byte CAS, so we\r
+ * need to define it explicitly.\r
+ */\r
+ fn_start __aarch64_cas16\model\r
+ mov x16, x0\r
+ mov x17, x1\r
+0: ld\a\()xp x0, x1, [x4]\r
+ cmp x0, x16\r
+ ccmp x1, x17, #0, eq\r
+ bne 1f\r
+ st\l\()xp w15, x16, x17, [x4]\r
+ cbnz w15, 0b\r
+1: ret\r
+ fn_end __aarch64_cas16\model\r
+ .endm\r
+\r
+ /*\r
+ * Emit the set of GCC outline atomic helper functions for\r
+ * the memory ordering model given by \model:\r
+ * - relax unordered loads and stores\r
+ * - acq load-acquire, unordered store\r
+ * - rel unordered load, store-release\r
+ * - acq_rel load-acquire, store-release\r
+ */\r
+ .macro emit_model, model:req, a, l\r
+ emit_ld ldadd, add, \model, \a, \l\r
+ emit_ld ldclr, bic, \model, \a, \l\r
+ emit_ld ldeor, eor, \model, \a, \l\r
+ emit_ld ldset, orr, \model, \a, \l\r
+ emit_ld swp, mov, \model, \a, \l\r
+ emit_cas \model, \a, \l\r
+ .endm\r
+\r
+ emit_model _relax\r
+ emit_model _acq, a\r
+ emit_model _rel,, l\r
+ emit_model _acq_rel, a, l\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+ASM_FUNC(__ashlti3)\r
+ # return if shift is 0\r
+ cbz x2, 1f\r
+\r
+ mov x3, #64\r
+ sub x3, x3, x2\r
+ cmp x3, #0\r
+ b.le 2f\r
+\r
+ # shift is <= 64 bits\r
+ lsr x3, x0, x3\r
+ lsl x1, x1, x2\r
+ orr x1, x1, x3\r
+ lsl x0, x0, x2\r
+1:\r
+ ret\r
+\r
+2:\r
+ # shift is > 64\r
+ neg w3, w3\r
+ lsl x1, x0, x3\r
+ mov x0, #0\r
+ ret\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+ASM_FUNC(__ashldi3)\r
+ cmp r2, #31\r
+ bls L2\r
+ cmp r2, #63\r
+ subls r2, r2, #32\r
+ movls r2, r0, asl r2\r
+ movhi r2, #0\r
+ mov r1, r2\r
+ mov r0, #0\r
+ bx lr\r
+L2:\r
+ cmp r2, #0\r
+ rsbne r3, r2, #32\r
+ movne r3, r0, lsr r3\r
+ movne r0, r0, asl r2\r
+ orrne r1, r3, r1, asl r2\r
+ bx lr\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+ASM_FUNC(__ashrdi3)\r
+ cmp r2, #31\r
+ bls L2\r
+ cmp r2, #63\r
+ subls r2, r2, #32\r
+ mov ip, r1, asr #31\r
+ movls r2, r1, asr r2\r
+ movhi r2, ip\r
+ mov r0, r2\r
+ mov r1, ip\r
+ bx lr\r
+L2:\r
+ cmp r2, #0\r
+ rsbne r3, r2, #32\r
+ movne r3, r1, asl r3\r
+ movne r1, r1, asr r2\r
+ orrne r0, r3, r0, lsr r2\r
+ bx lr\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+ASM_FUNC(__clzsi2)\r
+ @ frame_needed = 1, uses_anonymous_args = 0\r
+ stmfd sp!, {r7, lr}\r
+ add r7, sp, #0\r
+ movs r3, r0, lsr #16\r
+ movne r3, #16\r
+ moveq r3, #0\r
+ movne r9, #0\r
+ moveq r9, #16\r
+ mov r3, r0, lsr r3\r
+ tst r3, #65280\r
+ movne r0, #8\r
+ moveq r0, #0\r
+ movne lr, #0\r
+ moveq lr, #8\r
+ mov r3, r3, lsr r0\r
+ tst r3, #240\r
+ movne r0, #4\r
+ moveq r0, #0\r
+ movne ip, #0\r
+ moveq ip, #4\r
+ mov r3, r3, lsr r0\r
+ tst r3, #12\r
+ movne r0, #2\r
+ moveq r0, #0\r
+ movne r1, #0\r
+ moveq r1, #2\r
+ mov r2, r3, lsr r0\r
+ add r3, lr, r9\r
+ add r0, r3, ip\r
+ add r1, r0, r1\r
+ mov r0, r2, lsr #1\r
+ eor r0, r0, #1\r
+ ands r0, r0, #1\r
+ mvnne r0, #0\r
+ rsb r3, r2, #2\r
+ and r0, r0, r3\r
+ add r0, r1, r0\r
+ ldmfd sp!, {r7, pc}\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+ASM_FUNC(__ctzsi2)\r
+ uxth r3, r0\r
+ cmp r3, #0\r
+ moveq ip, #16\r
+ movne ip, #0\r
+ @ lr needed for prologue\r
+ mov r0, r0, lsr ip\r
+ tst r0, #255\r
+ movne r3, #0\r
+ moveq r3, #8\r
+ mov r0, r0, lsr r3\r
+ tst r0, #15\r
+ movne r1, #0\r
+ moveq r1, #4\r
+ add r3, r3, ip\r
+ mov r0, r0, lsr r1\r
+ tst r0, #3\r
+ movne r2, #0\r
+ moveq r2, #2\r
+ add r3, r3, r1\r
+ mov r0, r0, lsr r2\r
+ and r0, r0, #3\r
+ add r2, r3, r2\r
+ eor r3, r0, #1\r
+ mov r0, r0, lsr #1\r
+ ands r3, r3, #1\r
+ mvnne r3, #0\r
+ rsb r0, r0, #2\r
+ and r0, r3, r0\r
+ add r0, r2, r0\r
+ bx lr\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2011, ARM. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+.text\r
+.align 2\r
+GCC_ASM_EXPORT(__aeabi_uidiv)\r
+GCC_ASM_EXPORT(__aeabi_uidivmod)\r
+GCC_ASM_EXPORT(__aeabi_idiv)\r
+GCC_ASM_EXPORT(__aeabi_idivmod)\r
+\r
+# AREA Math, CODE, READONLY\r
+\r
+#\r
+#UINT32\r
+#EFIAPI\r
+#__aeabi_uidivmode (\r
+# IN UINT32 Dividen\r
+# IN UINT32 Divisor\r
+# );\r
+#\r
+\r
+ASM_PFX(__aeabi_uidiv):\r
+ASM_PFX(__aeabi_uidivmod):\r
+ rsbs r12, r1, r0, LSR #4\r
+ mov r2, #0\r
+ bcc ASM_PFX(__arm_div4)\r
+ rsbs r12, r1, r0, LSR #8\r
+ bcc ASM_PFX(__arm_div8)\r
+ mov r3, #0\r
+ b ASM_PFX(__arm_div_large)\r
+\r
+#\r
+#INT32\r
+#EFIAPI\r
+#__aeabi_idivmode (\r
+# IN INT32 Dividen\r
+# IN INT32 Divisor\r
+# );\r
+#\r
+ASM_PFX(__aeabi_idiv):\r
+ASM_PFX(__aeabi_idivmod):\r
+ orrs r12, r0, r1\r
+ bmi ASM_PFX(__arm_div_negative)\r
+ rsbs r12, r1, r0, LSR #1\r
+ mov r2, #0\r
+ bcc ASM_PFX(__arm_div1)\r
+ rsbs r12, r1, r0, LSR #4\r
+ bcc ASM_PFX(__arm_div4)\r
+ rsbs r12, r1, r0, LSR #8\r
+ bcc ASM_PFX(__arm_div8)\r
+ mov r3, #0\r
+ b ASM_PFX(__arm_div_large)\r
+ASM_PFX(__arm_div8):\r
+ rsbs r12, r1, r0, LSR #7\r
+ subcs r0, r0, r1, LSL #7\r
+ adc r2, r2, r2\r
+ rsbs r12, r1, r0,LSR #6\r
+ subcs r0, r0, r1, LSL #6\r
+ adc r2, r2, r2\r
+ rsbs r12, r1, r0, LSR #5\r
+ subcs r0, r0, r1, LSL #5\r
+ adc r2, r2, r2\r
+ rsbs r12, r1, r0, LSR #4\r
+ subcs r0, r0, r1, LSL #4\r
+ adc r2, r2, r2\r
+ASM_PFX(__arm_div4):\r
+ rsbs r12, r1, r0, LSR #3\r
+ subcs r0, r0, r1, LSL #3\r
+ adc r2, r2, r2\r
+ rsbs r12, r1, r0, LSR #2\r
+ subcs r0, r0, r1, LSL #2\r
+ adcs r2, r2, r2\r
+ rsbs r12, r1, r0, LSR #1\r
+ subcs r0, r0, r1, LSL #1\r
+ adc r2, r2, r2\r
+ASM_PFX(__arm_div1):\r
+ subs r1, r0, r1\r
+ movcc r1, r0\r
+ adc r0, r2, r2\r
+ bx r14\r
+ASM_PFX(__arm_div_negative):\r
+ ands r2, r1, #0x80000000\r
+ rsbmi r1, r1, #0\r
+ eors r3, r2, r0, ASR #32\r
+ rsbcs r0, r0, #0\r
+ rsbs r12, r1, r0, LSR #4\r
+ bcc label1\r
+ rsbs r12, r1, r0, LSR #8\r
+ bcc label2\r
+ASM_PFX(__arm_div_large):\r
+ lsl r1, r1, #6\r
+ rsbs r12, r1, r0, LSR #8\r
+ orr r2, r2, #0xfc000000\r
+ bcc label2\r
+ lsl r1, r1, #6\r
+ rsbs r12, r1, r0, LSR #8\r
+ orr r2, r2, #0x3f00000\r
+ bcc label2\r
+ lsl r1, r1, #6\r
+ rsbs r12, r1, r0, LSR #8\r
+ orr r2, r2, #0xfc000\r
+ orrcs r2, r2, #0x3f00\r
+ lslcs r1, r1, #6\r
+ rsbs r12, r1, #0\r
+ bcs ASM_PFX(__aeabi_idiv0)\r
+label3:\r
+ lsrcs r1, r1, #6\r
+label2:\r
+ rsbs r12, r1, r0, LSR #7\r
+ subcs r0, r0, r1, LSL #7\r
+ adc r2, r2, r2\r
+ rsbs r12, r1, r0, LSR #6\r
+ subcs r0, r0, r1, LSL #6\r
+ adc r2, r2, r2\r
+ rsbs r12, r1, r0, LSR #5\r
+ subcs r0, r0, r1, LSL #5\r
+ adc r2, r2, r2\r
+ rsbs r12, r1, r0, LSR #4\r
+ subcs r0, r0, r1, LSL #4\r
+ adc r2, r2, r2\r
+label1:\r
+ rsbs r12, r1, r0, LSR #3\r
+ subcs r0, r0, r1, LSL #3\r
+ adc r2, r2, r2\r
+ rsbs r12, r1, r0, LSR #2\r
+ subcs r0, r0, r1, LSL #2\r
+ adcs r2, r2, r2\r
+ bcs label3\r
+ rsbs r12, r1, r0, LSR #1\r
+ subcs r0, r0, r1, LSL #1\r
+ adc r2, r2, r2\r
+ subs r1, r0, r1\r
+ movcc r1, r0\r
+ adc r0, r2, r2\r
+ asrs r3, r3, #31\r
+ rsbmi r0, r0, #0\r
+ rsbcs r1, r1, #0\r
+ bx r14\r
+\r
+ @ What to do about division by zero? For now, just return.\r
+ASM_PFX(__aeabi_idiv0):\r
+ bx r14\r
--- /dev/null
+//------------------------------------------------------------------------------\r
+//\r
+// Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+// Copyright (c) 2018, Pete Batard. All rights reserved.<BR>\r
+//\r
+// SPDX-License-Identifier: BSD-2-Clause-Patent\r
+//\r
+//------------------------------------------------------------------------------\r
+\r
+\r
+ EXPORT __aeabi_uidiv\r
+ EXPORT __aeabi_uidivmod\r
+ EXPORT __aeabi_idiv\r
+ EXPORT __aeabi_idivmod\r
+ EXPORT __rt_udiv\r
+ EXPORT __rt_sdiv\r
+\r
+ AREA Math, CODE, READONLY\r
+\r
+;\r
+;UINT32\r
+;EFIAPI\r
+;__aeabi_uidivmod (\r
+; IN UINT32 Dividend\r
+; IN UINT32 Divisor\r
+; );\r
+;\r
+__aeabi_uidiv\r
+__aeabi_uidivmod\r
+ RSBS r12, r1, r0, LSR #4\r
+ MOV r2, #0\r
+ BCC __arm_div4\r
+ RSBS r12, r1, r0, LSR #8\r
+ BCC __arm_div8\r
+ MOV r3, #0\r
+ B __arm_div_large\r
+\r
+;\r
+;UINT64\r
+;EFIAPI\r
+;__rt_udiv (\r
+; IN UINT32 Divisor,\r
+; IN UINT32 Dividend\r
+; );\r
+;\r
+__rt_udiv\r
+ ; Swap R0 and R1\r
+ MOV r12, r0\r
+ MOV r0, r1\r
+ MOV r1, r12\r
+ B __aeabi_uidivmod\r
+\r
+;\r
+;UINT64\r
+;EFIAPI\r
+;__rt_sdiv (\r
+; IN INT32 Divisor,\r
+; IN INT32 Dividend\r
+; );\r
+;\r
+__rt_sdiv\r
+ ; Swap R0 and R1\r
+ MOV r12, r0\r
+ MOV r0, r1\r
+ MOV r1, r12\r
+ B __aeabi_idivmod\r
+\r
+;\r
+;INT32\r
+;EFIAPI\r
+;__aeabi_idivmod (\r
+; IN INT32 Dividend\r
+; IN INT32 Divisor\r
+; );\r
+;\r
+__aeabi_idiv\r
+__aeabi_idivmod\r
+ ORRS r12, r0, r1\r
+ BMI __arm_div_negative\r
+ RSBS r12, r1, r0, LSR #1\r
+ MOV r2, #0\r
+ BCC __arm_div1\r
+ RSBS r12, r1, r0, LSR #4\r
+ BCC __arm_div4\r
+ RSBS r12, r1, r0, LSR #8\r
+ BCC __arm_div8\r
+ MOV r3, #0\r
+ B __arm_div_large\r
+__arm_div8\r
+ RSBS r12, r1, r0, LSR #7\r
+ SUBCS r0, r0, r1, LSL #7\r
+ ADC r2, r2, r2\r
+ RSBS r12, r1, r0,LSR #6\r
+ SUBCS r0, r0, r1, LSL #6\r
+ ADC r2, r2, r2\r
+ RSBS r12, r1, r0, LSR #5\r
+ SUBCS r0, r0, r1, LSL #5\r
+ ADC r2, r2, r2\r
+ RSBS r12, r1, r0, LSR #4\r
+ SUBCS r0, r0, r1, LSL #4\r
+ ADC r2, r2, r2\r
+__arm_div4\r
+ RSBS r12, r1, r0, LSR #3\r
+ SUBCS r0, r0, r1, LSL #3\r
+ ADC r2, r2, r2\r
+ RSBS r12, r1, r0, LSR #2\r
+ SUBCS r0, r0, r1, LSL #2\r
+ ADCS r2, r2, r2\r
+ RSBS r12, r1, r0, LSR #1\r
+ SUBCS r0, r0, r1, LSL #1\r
+ ADC r2, r2, r2\r
+__arm_div1\r
+ SUBS r1, r0, r1\r
+ MOVCC r1, r0\r
+ ADC r0, r2, r2\r
+ BX r14\r
+__arm_div_negative\r
+ ANDS r2, r1, #0x80000000\r
+ RSBMI r1, r1, #0\r
+ EORS r3, r2, r0, ASR #32\r
+ RSBCS r0, r0, #0\r
+ RSBS r12, r1, r0, LSR #4\r
+ BCC label1\r
+ RSBS r12, r1, r0, LSR #8\r
+ BCC label2\r
+__arm_div_large\r
+ LSL r1, r1, #6\r
+ RSBS r12, r1, r0, LSR #8\r
+ ORR r2, r2, #0xfc000000\r
+ BCC label2\r
+ LSL r1, r1, #6\r
+ RSBS r12, r1, r0, LSR #8\r
+ ORR r2, r2, #0x3f00000\r
+ BCC label2\r
+ LSL r1, r1, #6\r
+ RSBS r12, r1, r0, LSR #8\r
+ ORR r2, r2, #0xfc000\r
+ ORRCS r2, r2, #0x3f00\r
+ LSLCS r1, r1, #6\r
+ RSBS r12, r1, #0\r
+ BCS __aeabi_idiv0\r
+label3\r
+ LSRCS r1, r1, #6\r
+label2\r
+ RSBS r12, r1, r0, LSR #7\r
+ SUBCS r0, r0, r1, LSL #7\r
+ ADC r2, r2, r2\r
+ RSBS r12, r1, r0, LSR #6\r
+ SUBCS r0, r0, r1, LSL #6\r
+ ADC r2, r2, r2\r
+ RSBS r12, r1, r0, LSR #5\r
+ SUBCS r0, r0, r1, LSL #5\r
+ ADC r2, r2, r2\r
+ RSBS r12, r1, r0, LSR #4\r
+ SUBCS r0, r0, r1, LSL #4\r
+ ADC r2, r2, r2\r
+label1\r
+ RSBS r12, r1, r0, LSR #3\r
+ SUBCS r0, r0, r1, LSL #3\r
+ ADC r2, r2, r2\r
+ RSBS r12, r1, r0, LSR #2\r
+ SUBCS r0, r0, r1, LSL #2\r
+ ADCS r2, r2, r2\r
+ BCS label3\r
+ RSBS r12, r1, r0, LSR #1\r
+ SUBCS r0, r0, r1, LSL #1\r
+ ADC r2, r2, r2\r
+ SUBS r1, r0, r1\r
+ MOVCC r1, r0\r
+ ADC r0, r2, r2\r
+ ASRS r3, r3, #31\r
+ RSBMI r0, r0, #0\r
+ RSBCS r1, r1, #0\r
+ BX r14\r
+\r
+ ; What to do about division by zero? For now, just return.\r
+__aeabi_idiv0\r
+ BX r14\r
+\r
+ END\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+ASM_FUNC(__divdi3)\r
+ @ args = 0, pretend = 0, frame = 0\r
+ @ frame_needed = 1, uses_anonymous_args = 0\r
+ stmfd sp!, {r4, r5, r7, lr}\r
+ mov r4, r3, asr #31\r
+ add r7, sp, #8\r
+ stmfd sp!, {r10, r11}\r
+ mov r10, r1, asr #31\r
+ sub sp, sp, #8\r
+ mov r11, r10\r
+ mov r5, r4\r
+ eor r0, r0, r10\r
+ eor r1, r1, r10\r
+ eor r2, r2, r4\r
+ eor r3, r3, r4\r
+ subs r2, r2, r4\r
+ sbc r3, r3, r5\r
+ mov ip, #0\r
+ subs r0, r0, r10\r
+ sbc r1, r1, r11\r
+ str ip, [sp, #0]\r
+ bl ASM_PFX(__udivmoddi4)\r
+ eor r2, r10, r4\r
+ eor r3, r10, r4\r
+ eor r0, r0, r2\r
+ eor r1, r1, r3\r
+ subs r0, r0, r2\r
+ sbc r1, r1, r3\r
+ sub sp, r7, #16\r
+ ldmfd sp!, {r10, r11}\r
+ ldmfd sp!, {r4, r5, r7, pc}\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+ASM_FUNC(__divsi3)\r
+ eor r3, r0, r0, asr #31\r
+ eor r2, r1, r1, asr #31\r
+ stmfd sp!, {r4, r5, r7, lr}\r
+ mov r5, r0, asr #31\r
+ add r7, sp, #8\r
+ mov r4, r1, asr #31\r
+ sub r0, r3, r0, asr #31\r
+ sub r1, r2, r1, asr #31\r
+ bl ASM_PFX(__udivsi3)\r
+ eor r1, r5, r4\r
+ eor r0, r0, r1\r
+ rsb r0, r1, r0\r
+ ldmfd sp!, {r4, r5, r7, pc}\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+#\r
+#UINT64\r
+#EFIAPI\r
+#__aeabi_lasr (\r
+# IN UINT64 Value\r
+# IN UINT32 Shift\r
+# );\r
+#\r
+ASM_FUNC(__aeabi_lasr)\r
+ subs r3,r2,#0x20\r
+ bpl L_Test\r
+ rsb r3,r2,#0x20\r
+ lsr r0,r0,r2\r
+ orr r0,r0,r1,LSL r3\r
+ asr r1,r1,r2\r
+ bx lr\r
+L_Test:\r
+ asr r0,r1,r3\r
+ asr r1,r1,#31\r
+ bx lr\r
--- /dev/null
+//------------------------------------------------------------------------------\r
+//\r
+// Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+//\r
+// SPDX-License-Identifier: BSD-2-Clause-Patent\r
+//\r
+//------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+//\r
+// A pair of (unsigned) long longs is returned in {{r0, r1}, {r2, r3}},\r
+// the quotient in {r0, r1}, and the remainder in {r2, r3}.\r
+//\r
+//__value_in_regs lldiv_t\r
+//EFIAPI\r
+//__aeabi_ldivmod (\r
+// IN UINT64 Dividen\r
+// IN UINT64 Divisor\r
+// )//\r
+//\r
+\r
+ASM_FUNC(__aeabi_ldivmod)\r
+ push {r4,lr}\r
+ asrs r4,r1,#1\r
+ eor r4,r4,r3,LSR #1\r
+ bpl L_Test1\r
+ rsbs r0,r0,#0\r
+ rsc r1,r1,#0\r
+L_Test1:\r
+ tst r3,r3\r
+ bpl L_Test2\r
+ rsbs r2,r2,#0\r
+ rsc r3,r3,#0\r
+L_Test2:\r
+ bl ASM_PFX(__aeabi_uldivmod)\r
+ tst r4,#0x40000000\r
+ beq L_Test3\r
+ rsbs r0,r0,#0\r
+ rsc r1,r1,#0\r
+L_Test3:\r
+ tst r4,#0x80000000\r
+ beq L_Exit\r
+ rsbs r2,r2,#0\r
+ rsc r3,r3,#0\r
+L_Exit:\r
+ pop {r4,pc}\r
+\r
+\r
+\r
--- /dev/null
+//------------------------------------------------------------------------------\r
+//\r
+// Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+// Copyright (c) 2018, Pete Batard. All rights reserved.<BR>\r
+//\r
+// SPDX-License-Identifier: BSD-2-Clause-Patent\r
+//\r
+//------------------------------------------------------------------------------\r
+\r
+\r
+ IMPORT __aeabi_uldivmod\r
+ EXPORT __aeabi_ldivmod\r
+ EXPORT __rt_sdiv64\r
+\r
+ AREA s___aeabi_ldivmod, CODE, READONLY, ARM\r
+\r
+ ARM\r
+\r
+;\r
+;INT64\r
+;EFIAPI\r
+;__rt_sdiv64 (\r
+; IN INT64 Divisor\r
+; IN INT64 Dividend\r
+; );\r
+;\r
+__rt_sdiv64\r
+ ; Swap r0-r1 and r2-r3\r
+ MOV r12, r0\r
+ MOV r0, r2\r
+ MOV r2, r12\r
+ MOV r12, r1\r
+ MOV r1, r3\r
+ MOV r3, r12\r
+ B __aeabi_ldivmod\r
+\r
+;\r
+;INT64\r
+;EFIAPI\r
+;__aeabi_ldivmod (\r
+; IN INT64 Dividend\r
+; IN INT64 Divisor\r
+; );\r
+;\r
+__aeabi_ldivmod\r
+ PUSH {r4,lr}\r
+ ASRS r4,r1,#1\r
+ EOR r4,r4,r3,LSR #1\r
+ BPL L_Test1\r
+ RSBS r0,r0,#0\r
+ RSC r1,r1,#0\r
+L_Test1\r
+ TST r3,r3\r
+ BPL L_Test2\r
+ RSBS r2,r2,#0\r
+ RSC r3,r3,#0\r
+L_Test2\r
+ BL __aeabi_uldivmod\r
+ TST r4,#0x40000000\r
+ BEQ L_Test3\r
+ RSBS r0,r0,#0\r
+ RSC r1,r1,#0\r
+L_Test3\r
+ TST r4,#0x80000000\r
+ BEQ L_Exit\r
+ RSBS r2,r2,#0\r
+ RSC r3,r3,#0\r
+L_Exit\r
+ POP {r4,pc}\r
+\r
+ END\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2013, ARM. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+#\r
+#VOID\r
+#EFIAPI\r
+#__aeabi_llsl (\r
+# IN VOID *Destination,\r
+# IN VOID *Source,\r
+# IN UINT32 Size\r
+# );\r
+#\r
+ASM_FUNC(__aeabi_llsl)\r
+ subs r3,r2,#0x20\r
+ bpl 1f\r
+ rsb r3,r2,#0x20\r
+ lsl r1,r1,r2\r
+ orr r1,r1,r0,lsr r3\r
+ lsl r0,r0,r2\r
+ bx lr\r
+1:\r
+ lsl r1,r0,r3\r
+ mov r0,#0\r
+ bx lr\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2013, ARM. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+#VOID\r
+#EFIAPI\r
+#__aeabi_llsr (\r
+# IN VOID *Destination,\r
+# IN VOID *Source,\r
+# IN UINT32 Size\r
+# );\r
+#\r
+ASM_FUNC(__aeabi_llsr)\r
+ subs r3,r2,#0x20\r
+ bpl 1f\r
+ rsb r3,r2,#0x20\r
+ lsr r0,r0,r2\r
+ orr r0,r0,r1,lsl r3\r
+ lsr r1,r1,r2\r
+ bx lr\r
+1:\r
+ lsr r0,r1,r3\r
+ mov r1,#0\r
+ bx lr\r
--- /dev/null
+//------------------------------------------------------------------------------\r
+//\r
+// Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+// Copyright (c) 2018, Pete Batard. All rights reserved.<BR>\r
+//\r
+// SPDX-License-Identifier: BSD-2-Clause-Patent\r
+//\r
+//------------------------------------------------------------------------------\r
+\r
+ EXPORT __aeabi_llsr\r
+ EXPORT __rt_srsh\r
+\r
+ AREA s___aeabi_llsr, CODE, READONLY, ARM\r
+\r
+ ARM\r
+\r
+;\r
+;VOID\r
+;EFIAPI\r
+;__aeabi_llsr (\r
+; IN UINT64 Value,\r
+; IN UINT32 Shift\r
+;)\r
+;\r
+__aeabi_llsr\r
+__rt_srsh\r
+ SUBS r3,r2,#0x20\r
+ BPL __aeabi_llsr_label1\r
+ RSB r3,r2,#0x20\r
+ LSR r0,r0,r2\r
+ ORR r0,r0,r1,LSL r3\r
+ LSR r1,r1,r2\r
+ BX lr\r
+__aeabi_llsr_label1\r
+ LSR r0,r1,r3\r
+ MOV r1,#0\r
+ BX lr\r
+\r
+ END\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+ASM_FUNC(__lshrdi3)\r
+ cmp r2, #31\r
+ bls L2\r
+ cmp r2, #63\r
+ subls r2, r2, #32\r
+ movls r2, r1, lsr r2\r
+ movhi r2, #0\r
+ mov r0, r2\r
+ mov r1, #0\r
+ bx lr\r
+L2:\r
+ cmp r2, #0\r
+ rsbne r3, r2, #32\r
+ movne r3, r1, asl r3\r
+ movne r1, r1, lsr r2\r
+ orrne r0, r3, r0, lsr r2\r
+ bx lr\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2011-2014, ARM Limited. All rights reserved.\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+# VOID\r
+# EFIAPI\r
+# memmove (\r
+# IN VOID *Destination,\r
+# IN CONST VOID *Source,\r
+# IN UINT32 Size\r
+# );\r
+ASM_FUNC(memmove)\r
+ CMP r2, #0\r
+ BXEQ lr\r
+ CMP r0, r1\r
+ BXEQ lr\r
+ BHI memmove_backward\r
+\r
+memmove_forward:\r
+ LDRB r3, [r1], #1\r
+ STRB r3, [r0], #1\r
+ SUBS r2, r2, #1\r
+ BXEQ lr\r
+ B memmove_forward\r
+\r
+memmove_backward:\r
+ add r0, r2\r
+ add r1, r2\r
+memmove_backward_loop:\r
+ LDRB r3, [r1, #-1]!\r
+ STRB r3, [r0, #-1]!\r
+ SUBS r2, r2, #1\r
+ BXEQ lr\r
+ B memmove_backward_loop\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+ASM_FUNC(__moddi3)\r
+ stmfd sp!, {r4, r5, r7, lr}\r
+ mov r4, r1, asr #31\r
+ add r7, sp, #8\r
+ stmfd sp!, {r10, r11}\r
+ mov r10, r3, asr #31\r
+ sub sp, sp, #16\r
+ mov r5, r4\r
+ mov r11, r10\r
+ eor r0, r0, r4\r
+ eor r1, r1, r4\r
+ eor r2, r2, r10\r
+ eor r3, r3, r10\r
+ add ip, sp, #8\r
+ subs r0, r0, r4\r
+ sbc r1, r1, r5\r
+ subs r2, r2, r10\r
+ sbc r3, r3, r11\r
+ str ip, [sp, #0]\r
+ bl ASM_PFX(__udivmoddi4)\r
+ ldrd r0, [sp, #8]\r
+ eor r0, r0, r4\r
+ eor r1, r1, r4\r
+ subs r0, r0, r4\r
+ sbc r1, r1, r5\r
+ sub sp, r7, #16\r
+ ldmfd sp!, {r10, r11}\r
+ ldmfd sp!, {r4, r5, r7, pc}\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+ASM_FUNC(__modsi3)\r
+ stmfd sp!, {r4, r5, r7, lr}\r
+ add r7, sp, #8\r
+ mov r5, r0\r
+ mov r4, r1\r
+ bl ASM_PFX(__divsi3)\r
+ mul r0, r4, r0\r
+ rsb r0, r0, r5\r
+ ldmfd sp!, {r4, r5, r7, pc}\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+ASM_FUNC(__muldi3)\r
+ stmfd sp!, {r4, r5, r6, r7, lr}\r
+ add r7, sp, #12\r
+ stmfd sp!, {r8, r10, r11}\r
+ ldr r11, L4\r
+ mov r4, r0, lsr #16\r
+ and r8, r0, r11\r
+ and ip, r2, r11\r
+ mul lr, ip, r8\r
+ mul ip, r4, ip\r
+ sub sp, sp, #8\r
+ add r10, ip, lr, lsr #16\r
+ and ip, r10, r11\r
+ and lr, lr, r11\r
+ mov r6, r2, lsr #16\r
+ str r4, [sp, #4]\r
+ add r4, lr, ip, asl #16\r
+ mul ip, r8, r6\r
+ mov r5, r10, lsr #16\r
+ add r10, ip, r4, lsr #16\r
+ and ip, r10, r11\r
+ and lr, r4, r11\r
+ add r4, lr, ip, asl #16\r
+ mul r0, r3, r0\r
+ add ip, r5, r10, lsr #16\r
+ ldr r5, [sp, #4]\r
+ mla r0, r2, r1, r0\r
+ mla r5, r6, r5, ip\r
+ mov r10, r4\r
+ add r11, r0, r5\r
+ mov r1, r11\r
+ mov r0, r4\r
+ sub sp, r7, #24\r
+ ldmfd sp!, {r8, r10, r11}\r
+ ldmfd sp!, {r4, r5, r6, r7, pc}\r
+ .p2align 2\r
+L5:\r
+ .align 2\r
+L4:\r
+ .long 65535\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+.text\r
+\r
+GCC_ASM_EXPORT(__ARM_ll_mullu)\r
+GCC_ASM_EXPORT(__aeabi_lmul)\r
+#\r
+#INT64\r
+#EFIAPI\r
+#__aeabi_lmul (\r
+# IN INT64 Multiplicand\r
+# IN INT32 Multiplier\r
+# );\r
+#\r
+ASM_PFX(__ARM_ll_mullu):\r
+ mov r3, #0\r
+# Make upper part of INT64 Multiplier 0 and use __aeabi_lmul\r
+\r
+#\r
+#INT64\r
+#EFIAPI\r
+#__aeabi_lmul (\r
+# IN INT64 Multiplicand\r
+# IN INT64 Multiplier\r
+# );\r
+#\r
+ASM_PFX(__aeabi_lmul):\r
+ stmdb sp!, {lr}\r
+ mov lr, r0\r
+ umull r0, ip, r2, lr\r
+ mla r1, r2, r1, ip\r
+ mla r1, r3, lr, r1\r
+ ldmia sp!, {pc}\r
--- /dev/null
+#------s------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+\r
+ .text\r
+ .align 2\r
+ GCC_ASM_EXPORT(__aeabi_ulcmp)\r
+\r
+ASM_PFX(__aeabi_ulcmp):\r
+ stmfd sp!, {r4, r5, r8}\r
+ cmp r3, r1\r
+ mov r8, r0\r
+ mov r9, r1\r
+ mov r4, r2\r
+ mov r5, r3\r
+ bls L16\r
+L2:\r
+ mvn r0, #0\r
+L1:\r
+ ldmfd sp!, {r4, r5, r8}\r
+ bx lr\r
+L16:\r
+ beq L17\r
+L4:\r
+ cmp r9, r5\r
+ bhi L7\r
+ beq L18\r
+ cmp r8, r4\r
+L14:\r
+ cmpeq r9, r5\r
+ moveq r0, #0\r
+ beq L1\r
+ b L1\r
+L18:\r
+ cmp r8, r4\r
+ bls L14\r
+L7:\r
+ mov r0, #1\r
+ b L1\r
+L17:\r
+ cmp r2, r0\r
+ bhi L2\r
+ b L4\r
+\r
--- /dev/null
+#/** @file\r
+# Compiler intrinsic for ARM compiler\r
+#\r
+# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#**/\r
+#\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+.syntax unified\r
+\r
+ASM_FUNC(__switch16)\r
+ ldrh ip, [lr, #-1]\r
+ cmp r0, ip\r
+ add r0, lr, r0, lsl #1\r
+ ldrshcc r0, [r0, #1]\r
+ add ip, lr, ip, lsl #1\r
+ ldrshcs r0, [ip, #1]\r
+ add ip, lr, r0, lsl #1\r
+ bx ip\r
+\r
+\r
--- /dev/null
+#/** @file\r
+# Compiler intrinsic for ARM compiler\r
+#\r
+# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#**/\r
+#\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+.syntax unified\r
+\r
+ASM_FUNC(__switch32)\r
+ ldr ip, [lr, #-1]\r
+ cmp r0, ip\r
+ add r0, lr, r0, lsl #2\r
+ ldrcc r0, [r0, #3]\r
+ add ip, lr, ip, lsl #2\r
+ ldrcs r0, [ip, #3]\r
+ add ip, lr, r0\r
+ bx ip\r
+\r
--- /dev/null
+#/** @file\r
+# Compiler intrinsic for ARM compiler\r
+#\r
+# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#**/\r
+#\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+.syntax unified\r
+\r
+ASM_FUNC(__switch8)\r
+ ldrb ip, [lr, #-1]\r
+ cmp r0, ip\r
+ ldrsbcc r0, [lr, r0]\r
+ ldrsbcs r0, [lr, ip]\r
+ add ip, lr, r0, lsl #1\r
+ bx ip\r
+\r
--- /dev/null
+#/** @file\r
+# Compiler intrinsic for ARM compiler\r
+#\r
+# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#**/\r
+#\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+.syntax unified\r
+\r
+ASM_FUNC(__switchu8)\r
+ ldrb ip,[lr,#-1]\r
+ cmp r0,ip\r
+ ldrbcc r0,[lr,r0]\r
+ ldrbcs r0,[lr,ip]\r
+ add ip,lr,r0,LSL #1\r
+ bx ip\r
+\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+ASM_FUNC(__ucmpdi2)\r
+ stmfd sp!, {r4, r5, r8, lr}\r
+ cmp r1, r3\r
+ mov r8, r0\r
+ mov r4, r2\r
+ mov r5, r3\r
+ bcc L2\r
+ bhi L4\r
+ cmp r0, r2\r
+ bcc L2\r
+ movls r0, #1\r
+ bls L8\r
+ b L4\r
+L2:\r
+ mov r0, #0\r
+ b L8\r
+L4:\r
+ mov r0, #2\r
+L8:\r
+ ldmfd sp!, {r4, r5, r8, pc}\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+ASM_FUNC(__udivdi3)\r
+ stmfd sp!, {r7, lr}\r
+ add r7, sp, #0\r
+ sub sp, sp, #8\r
+ mov ip, #0\r
+ str ip, [sp, #0]\r
+ bl ASM_PFX(__udivmoddi4)\r
+ sub sp, r7, #0\r
+ ldmfd sp!, {r7, pc}\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+ .syntax unified\r
+\r
+ASM_FUNC(__udivmoddi4)\r
+ stmfd sp!, {r4, r5, r6, r7, lr}\r
+ add r7, sp, #12\r
+ stmfd sp!, {r10, r11}\r
+ sub sp, sp, #20\r
+ stmia sp, {r2-r3}\r
+ ldr r6, [sp, #48]\r
+ orrs r2, r2, r3\r
+ mov r10, r0\r
+ mov r11, r1\r
+ beq L2\r
+ subs ip, r1, #0\r
+ bne L4\r
+ cmp r3, #0\r
+ bne L6\r
+ cmp r6, #0\r
+ beq L8\r
+ mov r1, r2\r
+ bl ASM_PFX(__umodsi3)\r
+ mov r1, #0\r
+ stmia r6, {r0-r1}\r
+L8:\r
+ ldr r1, [sp, #0]\r
+ mov r0, r10\r
+ b L45\r
+L6:\r
+ cmp r6, #0\r
+ movne r1, #0\r
+ stmiane r6, {r0-r1}\r
+ b L2\r
+L4:\r
+ ldr r1, [sp, #0]\r
+ cmp r1, #0\r
+ bne L12\r
+ ldr r2, [sp, #4]\r
+ cmp r2, #0\r
+ bne L14\r
+ cmp r6, #0\r
+ beq L16\r
+ mov r1, r2\r
+ mov r0, r11\r
+ bl ASM_PFX(__umodsi3)\r
+ mov r1, #0\r
+ stmia r6, {r0-r1}\r
+L16:\r
+ ldr r1, [sp, #4]\r
+ mov r0, r11\r
+L45:\r
+ bl ASM_PFX(__udivsi3)\r
+L46:\r
+ mov r10, r0\r
+ mov r11, #0\r
+ b L10\r
+L14:\r
+ subs r1, r0, #0\r
+ bne L18\r
+ cmp r6, #0\r
+ beq L16\r
+ ldr r1, [sp, #4]\r
+ mov r0, r11\r
+ bl ASM_PFX(__umodsi3)\r
+ mov r4, r10\r
+ mov r5, r0\r
+ stmia r6, {r4-r5}\r
+ b L16\r
+L18:\r
+ sub r3, r2, #1\r
+ tst r2, r3\r
+ bne L22\r
+ cmp r6, #0\r
+ movne r4, r0\r
+ andne r5, ip, r3\r
+ stmiane r6, {r4-r5}\r
+L24:\r
+ rsb r3, r2, #0\r
+ and r3, r2, r3\r
+ clz r3, r3\r
+ rsb r3, r3, #31\r
+ mov r0, ip, lsr r3\r
+ b L46\r
+L22:\r
+ clz r2, r2\r
+ clz r3, ip\r
+ rsb r3, r3, r2\r
+ cmp r3, #30\r
+ bhi L48\r
+ rsb r2, r3, #31\r
+ add lr, r3, #1\r
+ mov r3, r1, asl r2\r
+ str r3, [sp, #12]\r
+ mov r3, r1, lsr lr\r
+ ldr r0, [sp, #0]\r
+ mov r5, ip, lsr lr\r
+ orr r4, r3, ip, asl r2\r
+ str r0, [sp, #8]\r
+ b L29\r
+L12:\r
+ ldr r3, [sp, #4]\r
+ cmp r3, #0\r
+ bne L30\r
+ sub r3, r1, #1\r
+ tst r1, r3\r
+ bne L32\r
+ cmp r6, #0\r
+ andne r3, r3, r0\r
+ movne r2, r3\r
+ movne r3, #0\r
+ stmiane r6, {r2-r3}\r
+L34:\r
+ cmp r1, #1\r
+ beq L10\r
+ rsb r3, r1, #0\r
+ and r3, r1, r3\r
+ clz r3, r3\r
+ rsb r0, r3, #31\r
+ mov r1, ip, lsr r0\r
+ rsb r3, r0, #32\r
+ mov r0, r10, lsr r0\r
+ orr ip, r0, ip, asl r3\r
+ str r1, [sp, #12]\r
+ str ip, [sp, #8]\r
+ ldrd r10, [sp, #8]\r
+ b L10\r
+L32:\r
+ clz r2, r1\r
+ clz r3, ip\r
+ rsb r3, r3, r2\r
+ rsb r4, r3, #31\r
+ mov r2, r0, asl r4\r
+ mvn r1, r3\r
+ and r2, r2, r1, asr #31\r
+ add lr, r3, #33\r
+ str r2, [sp, #8]\r
+ add r2, r3, #1\r
+ mov r3, r3, asr #31\r
+ and r0, r3, r0, asl r1\r
+ mov r3, r10, lsr r2\r
+ orr r3, r3, ip, asl r4\r
+ and r3, r3, r1, asr #31\r
+ orr r0, r0, r3\r
+ mov r3, ip, lsr lr\r
+ str r0, [sp, #12]\r
+ mov r0, r10, lsr lr\r
+ and r5, r3, r2, asr #31\r
+ rsb r3, lr, #31\r
+ mov r3, r3, asr #31\r
+ orr r0, r0, ip, asl r1\r
+ and r3, r3, ip, lsr r2\r
+ and r0, r0, r2, asr #31\r
+ orr r4, r3, r0\r
+ b L29\r
+L30:\r
+ clz r2, r3\r
+ clz r3, ip\r
+ rsb r3, r3, r2\r
+ cmp r3, #31\r
+ bls L37\r
+L48:\r
+ cmp r6, #0\r
+ stmiane r6, {r10-r11}\r
+ b L2\r
+L37:\r
+ rsb r1, r3, #31\r
+ mov r0, r0, asl r1\r
+ add lr, r3, #1\r
+ mov r2, #0\r
+ str r0, [sp, #12]\r
+ mov r0, r10, lsr lr\r
+ str r2, [sp, #8]\r
+ sub r2, r3, #31\r
+ and r0, r0, r2, asr #31\r
+ mov r3, ip, lsr lr\r
+ orr r4, r0, ip, asl r1\r
+ and r5, r3, r2, asr #31\r
+L29:\r
+ mov ip, #0\r
+ mov r10, ip\r
+ b L40\r
+L41:\r
+ ldr r1, [sp, #12]\r
+ ldr r2, [sp, #8]\r
+ mov r3, r4, lsr #31\r
+ orr r5, r3, r5, asl #1\r
+ mov r3, r1, lsr #31\r
+ orr r4, r3, r4, asl #1\r
+ mov r3, r2, lsr #31\r
+ orr r0, r3, r1, asl #1\r
+ orr r1, ip, r2, asl #1\r
+ ldmia sp, {r2-r3}\r
+ str r0, [sp, #12]\r
+ subs r2, r2, r4\r
+ sbc r3, r3, r5\r
+ str r1, [sp, #8]\r
+ subs r0, r2, #1\r
+ sbc r1, r3, #0\r
+ mov r2, r1, asr #31\r
+ ldmia sp, {r0-r1}\r
+ mov r3, r2\r
+ and ip, r2, #1\r
+ and r3, r3, r1\r
+ and r2, r2, r0\r
+ subs r4, r4, r2\r
+ sbc r5, r5, r3\r
+ add r10, r10, #1\r
+L40:\r
+ cmp r10, lr\r
+ bne L41\r
+ ldrd r0, [sp, #8]\r
+ adds r0, r0, r0\r
+ adc r1, r1, r1\r
+ cmp r6, #0\r
+ orr r10, r0, ip\r
+ mov r11, r1\r
+ stmiane r6, {r4-r5}\r
+ b L10\r
+L2:\r
+ mov r10, #0\r
+ mov r11, #0\r
+L10:\r
+ mov r0, r10\r
+ mov r1, r11\r
+ sub sp, r7, #20\r
+ ldmfd sp!, {r10, r11}\r
+ ldmfd sp!, {r4, r5, r6, r7, pc}\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+ .syntax unified\r
+\r
+ASM_FUNC(__udivsi3)\r
+ cmp r1, #0\r
+ cmpne r0, #0\r
+ stmfd sp!, {r4, r5, r7, lr}\r
+ add r7, sp, #8\r
+ beq L2\r
+ clz r2, r1\r
+ clz r3, r0\r
+ rsb r3, r3, r2\r
+ cmp r3, #31\r
+ bhi L2\r
+ ldmfdeq sp!, {r4, r5, r7, pc}\r
+ add r5, r3, #1\r
+ rsb r3, r3, #31\r
+ mov lr, #0\r
+ mov r2, r0, asl r3\r
+ mov ip, r0, lsr r5\r
+ mov r4, lr\r
+ b L8\r
+L9:\r
+ mov r0, r2, lsr #31\r
+ orr ip, r0, ip, asl #1\r
+ orr r2, r3, lr\r
+ rsb r3, ip, r1\r
+ sub r3, r3, #1\r
+ and r0, r1, r3, asr #31\r
+ mov lr, r3, lsr #31\r
+ rsb ip, r0, ip\r
+ add r4, r4, #1\r
+L8:\r
+ cmp r4, r5\r
+ mov r3, r2, asl #1\r
+ bne L9\r
+ orr r0, r3, lr\r
+ ldmfd sp!, {r4, r5, r7, pc}\r
+L2:\r
+ mov r0, #0\r
+ ldmfd sp!, {r4, r5, r7, pc}\r
+\r
--- /dev/null
+//------------------------------------------------------------------------------\r
+//\r
+// Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+//\r
+// SPDX-License-Identifier: BSD-2-Clause-Patent\r
+//\r
+//------------------------------------------------------------------------------\r
+\r
+\r
+\r
+ .text\r
+ .align 2\r
+ GCC_ASM_EXPORT(__aeabi_uldivmod)\r
+\r
+//\r
+//UINT64\r
+//EFIAPI\r
+//__aeabi_uldivmod (\r
+// IN UINT64 Dividend\r
+// IN UINT64 Divisor\r
+// )\r
+//\r
+ASM_PFX(__aeabi_uldivmod):\r
+ stmdb sp!, {r4, r5, r6, lr}\r
+ mov r4, r1\r
+ mov r5, r0\r
+ mov r6, #0 // 0x0\r
+ orrs ip, r3, r2, lsr #31\r
+ bne ASM_PFX(__aeabi_uldivmod_label1)\r
+ tst r2, r2\r
+ beq ASM_PFX(_ll_div0)\r
+ movs ip, r2, lsr #15\r
+ addeq r6, r6, #16 // 0x10\r
+ mov ip, r2, lsl r6\r
+ movs lr, ip, lsr #23\r
+ moveq ip, ip, lsl #8\r
+ addeq r6, r6, #8 // 0x8\r
+ movs lr, ip, lsr #27\r
+ moveq ip, ip, lsl #4\r
+ addeq r6, r6, #4 // 0x4\r
+ movs lr, ip, lsr #29\r
+ moveq ip, ip, lsl #2\r
+ addeq r6, r6, #2 // 0x2\r
+ movs lr, ip, lsr #30\r
+ moveq ip, ip, lsl #1\r
+ addeq r6, r6, #1 // 0x1\r
+ b ASM_PFX(_ll_udiv_small)\r
+ASM_PFX(__aeabi_uldivmod_label1):\r
+ tst r3, #-2147483648 // 0x80000000\r
+ bne ASM_PFX(__aeabi_uldivmod_label2)\r
+ movs ip, r3, lsr #15\r
+ addeq r6, r6, #16 // 0x10\r
+ mov ip, r3, lsl r6\r
+ movs lr, ip, lsr #23\r
+ moveq ip, ip, lsl #8\r
+ addeq r6, r6, #8 // 0x8\r
+ movs lr, ip, lsr #27\r
+ moveq ip, ip, lsl #4\r
+ addeq r6, r6, #4 // 0x4\r
+ movs lr, ip, lsr #29\r
+ moveq ip, ip, lsl #2\r
+ addeq r6, r6, #2 // 0x2\r
+ movs lr, ip, lsr #30\r
+ addeq r6, r6, #1 // 0x1\r
+ rsb r3, r6, #32 // 0x20\r
+ moveq ip, ip, lsl #1\r
+ orr ip, ip, r2, lsr r3\r
+ mov lr, r2, lsl r6\r
+ b ASM_PFX(_ll_udiv_big)\r
+ASM_PFX(__aeabi_uldivmod_label2):\r
+ mov ip, r3\r
+ mov lr, r2\r
+ b ASM_PFX(_ll_udiv_ginormous)\r
+\r
+ASM_PFX(_ll_udiv_small):\r
+ cmp r4, ip, lsl #1\r
+ mov r3, #0 // 0x0\r
+ subcs r4, r4, ip, lsl #1\r
+ addcs r3, r3, #2 // 0x2\r
+ cmp r4, ip\r
+ subcs r4, r4, ip\r
+ adcs r3, r3, #0 // 0x0\r
+ add r2, r6, #32 // 0x20\r
+ cmp r2, #32 // 0x20\r
+ rsb ip, ip, #0 // 0x0\r
+ bcc ASM_PFX(_ll_udiv_small_label1)\r
+ orrs r0, r4, r5, lsr #30\r
+ moveq r4, r5\r
+ moveq r5, #0 // 0x0\r
+ subeq r2, r2, #32 // 0x20\r
+ASM_PFX(_ll_udiv_small_label1):\r
+ mov r1, #0 // 0x0\r
+ cmp r2, #16 // 0x10\r
+ bcc ASM_PFX(_ll_udiv_small_label2)\r
+ movs r0, r4, lsr #14\r
+ moveq r4, r4, lsl #16\r
+ addeq r1, r1, #16 // 0x10\r
+ASM_PFX(_ll_udiv_small_label2):\r
+ sub lr, r2, r1\r
+ cmp lr, #8 // 0x8\r
+ bcc ASM_PFX(_ll_udiv_small_label3)\r
+ movs r0, r4, lsr #22\r
+ moveq r4, r4, lsl #8\r
+ addeq r1, r1, #8 // 0x8\r
+ASM_PFX(_ll_udiv_small_label3):\r
+ rsb r0, r1, #32 // 0x20\r
+ sub r2, r2, r1\r
+ orr r4, r4, r5, lsr r0\r
+ mov r5, r5, lsl r1\r
+ cmp r2, #1 // 0x1\r
+ bcc ASM_PFX(_ll_udiv_small_label5)\r
+ sub r2, r2, #1 // 0x1\r
+ and r0, r2, #7 // 0x7\r
+ eor r0, r0, #7 // 0x7\r
+ adds r0, r0, r0, lsl #1\r
+ add pc, pc, r0, lsl #2\r
+ nop // (mov r0,r0)\r
+ASM_PFX(_ll_udiv_small_label4):\r
+ adcs r5, r5, r5\r
+ adcs r4, ip, r4, lsl #1\r
+ rsbcc r4, ip, r4\r
+ adcs r5, r5, r5\r
+ adcs r4, ip, r4, lsl #1\r
+ rsbcc r4, ip, r4\r
+ adcs r5, r5, r5\r
+ adcs r4, ip, r4, lsl #1\r
+ rsbcc r4, ip, r4\r
+ adcs r5, r5, r5\r
+ adcs r4, ip, r4, lsl #1\r
+ rsbcc r4, ip, r4\r
+ adcs r5, r5, r5\r
+ adcs r4, ip, r4, lsl #1\r
+ rsbcc r4, ip, r4\r
+ adcs r5, r5, r5\r
+ adcs r4, ip, r4, lsl #1\r
+ rsbcc r4, ip, r4\r
+ adcs r5, r5, r5\r
+ adcs r4, ip, r4, lsl #1\r
+ rsbcc r4, ip, r4\r
+ adcs r5, r5, r5\r
+ adcs r4, ip, r4, lsl #1\r
+ sub r2, r2, #8 // 0x8\r
+ tst r2, r2\r
+ rsbcc r4, ip, r4\r
+ bpl ASM_PFX(_ll_udiv_small_label4)\r
+ASM_PFX(_ll_udiv_small_label5):\r
+ mov r2, r4, lsr r6\r
+ bic r4, r4, r2, lsl r6\r
+ adcs r0, r5, r5\r
+ adc r1, r4, r4\r
+ add r1, r1, r3, lsl r6\r
+ mov r3, #0 // 0x0\r
+ ldmia sp!, {r4, r5, r6, pc}\r
+\r
+ASM_PFX(_ll_udiv_big):\r
+ subs r0, r5, lr\r
+ mov r3, #0 // 0x0\r
+ sbcs r1, r4, ip\r
+ movcs r5, r0\r
+ movcs r4, r1\r
+ adcs r3, r3, #0 // 0x0\r
+ subs r0, r5, lr\r
+ sbcs r1, r4, ip\r
+ movcs r5, r0\r
+ movcs r4, r1\r
+ adcs r3, r3, #0 // 0x0\r
+ subs r0, r5, lr\r
+ sbcs r1, r4, ip\r
+ movcs r5, r0\r
+ movcs r4, r1\r
+ adcs r3, r3, #0 // 0x0\r
+ mov r1, #0 // 0x0\r
+ rsbs lr, lr, #0 // 0x0\r
+ rsc ip, ip, #0 // 0x0\r
+ cmp r6, #16 // 0x10\r
+ bcc ASM_PFX(_ll_udiv_big_label1)\r
+ movs r0, r4, lsr #14\r
+ moveq r4, r4, lsl #16\r
+ addeq r1, r1, #16 // 0x10\r
+ASM_PFX(_ll_udiv_big_label1):\r
+ sub r2, r6, r1\r
+ cmp r2, #8 // 0x8\r
+ bcc ASM_PFX(_ll_udiv_big_label2)\r
+ movs r0, r4, lsr #22\r
+ moveq r4, r4, lsl #8\r
+ addeq r1, r1, #8 // 0x8\r
+ASM_PFX(_ll_udiv_big_label2):\r
+ rsb r0, r1, #32 // 0x20\r
+ sub r2, r6, r1\r
+ orr r4, r4, r5, lsr r0\r
+ mov r5, r5, lsl r1\r
+ cmp r2, #1 // 0x1\r
+ bcc ASM_PFX(_ll_udiv_big_label4)\r
+ sub r2, r2, #1 // 0x1\r
+ and r0, r2, #3 // 0x3\r
+ rsb r0, r0, #3 // 0x3\r
+ adds r0, r0, r0, lsl #1\r
+ add pc, pc, r0, lsl #3\r
+ nop // (mov r0,r0)\r
+ASM_PFX(_ll_udiv_big_label3):\r
+ adcs r5, r5, r5\r
+ adcs r4, r4, r4\r
+ adcs r0, lr, r5\r
+ adcs r1, ip, r4\r
+ movcs r5, r0\r
+ movcs r4, r1\r
+ adcs r5, r5, r5\r
+ adcs r4, r4, r4\r
+ adcs r0, lr, r5\r
+ adcs r1, ip, r4\r
+ movcs r5, r0\r
+ movcs r4, r1\r
+ adcs r5, r5, r5\r
+ adcs r4, r4, r4\r
+ adcs r0, lr, r5\r
+ adcs r1, ip, r4\r
+ movcs r5, r0\r
+ movcs r4, r1\r
+ sub r2, r2, #4 // 0x4\r
+ adcs r5, r5, r5\r
+ adcs r4, r4, r4\r
+ adcs r0, lr, r5\r
+ adcs r1, ip, r4\r
+ tst r2, r2\r
+ movcs r5, r0\r
+ movcs r4, r1\r
+ bpl ASM_PFX(_ll_udiv_big_label3)\r
+ASM_PFX(_ll_udiv_big_label4):\r
+ mov r1, #0 // 0x0\r
+ mov r2, r5, lsr r6\r
+ bic r5, r5, r2, lsl r6\r
+ adcs r0, r5, r5\r
+ adc r1, r1, #0 // 0x0\r
+ movs lr, r3, lsl r6\r
+ mov r3, r4, lsr r6\r
+ bic r4, r4, r3, lsl r6\r
+ adc r1, r1, #0 // 0x0\r
+ adds r0, r0, lr\r
+ orr r2, r2, r4, ror r6\r
+ adc r1, r1, #0 // 0x0\r
+ ldmia sp!, {r4, r5, r6, pc}\r
+\r
+ASM_PFX(_ll_udiv_ginormous):\r
+ subs r2, r5, lr\r
+ mov r1, #0 // 0x0\r
+ sbcs r3, r4, ip\r
+ adc r0, r1, r1\r
+ movcc r2, r5\r
+ movcc r3, r4\r
+ ldmia sp!, {r4, r5, r6, pc}\r
+\r
+ASM_PFX(_ll_div0):\r
+ ldmia sp!, {r4, r5, r6, lr}\r
+ mov r0, #0 // 0x0\r
+ mov r1, #0 // 0x0\r
+ b ASM_PFX(__aeabi_ldiv0)\r
+\r
+ASM_PFX(__aeabi_ldiv0):\r
+ bx r14\r
+\r
+\r
--- /dev/null
+//------------------------------------------------------------------------------\r
+//\r
+// Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+// Copyright (c) 2018, Pete Batard. All rights reserved.<BR>\r
+//\r
+// SPDX-License-Identifier: BSD-2-Clause-Patent\r
+//\r
+//------------------------------------------------------------------------------\r
+\r
+\r
+ EXPORT __aeabi_uldivmod\r
+ EXPORT __rt_udiv64\r
+\r
+ AREA s___aeabi_uldivmod, CODE, READONLY, ARM\r
+\r
+ ARM\r
+\r
+;\r
+;UINT64\r
+;EFIAPI\r
+;__rt_udiv64 (\r
+; IN UINT64 Divisor\r
+; IN UINT64 Dividend\r
+; )\r
+;\r
+__rt_udiv64\r
+ ; Swap r0-r1 and r2-r3\r
+ mov r12, r0\r
+ mov r0, r2\r
+ mov r2, r12\r
+ mov r12, r1\r
+ mov r1, r3\r
+ mov r3, r12\r
+ b __aeabi_uldivmod\r
+\r
+;\r
+;UINT64\r
+;EFIAPI\r
+;__aeabi_uldivmod (\r
+; IN UINT64 Dividend\r
+; IN UINT64 Divisor\r
+; )\r
+;\r
+__aeabi_uldivmod\r
+ stmdb sp!, {r4, r5, r6, lr}\r
+ mov r4, r1\r
+ mov r5, r0\r
+ mov r6, #0 ; 0x0\r
+ orrs ip, r3, r2, lsr #31\r
+ bne __aeabi_uldivmod_label1\r
+ tst r2, r2\r
+ beq _ll_div0\r
+ movs ip, r2, lsr #15\r
+ addeq r6, r6, #16 ; 0x10\r
+ mov ip, r2, lsl r6\r
+ movs lr, ip, lsr #23\r
+ moveq ip, ip, lsl #8\r
+ addeq r6, r6, #8 ; 0x8\r
+ movs lr, ip, lsr #27\r
+ moveq ip, ip, lsl #4\r
+ addeq r6, r6, #4 ; 0x4\r
+ movs lr, ip, lsr #29\r
+ moveq ip, ip, lsl #2\r
+ addeq r6, r6, #2 ; 0x2\r
+ movs lr, ip, lsr #30\r
+ moveq ip, ip, lsl #1\r
+ addeq r6, r6, #1 ; 0x1\r
+ b _ll_udiv_small\r
+__aeabi_uldivmod_label1\r
+ tst r3, #-2147483648 ; 0x80000000\r
+ bne __aeabi_uldivmod_label2\r
+ movs ip, r3, lsr #15\r
+ addeq r6, r6, #16 ; 0x10\r
+ mov ip, r3, lsl r6\r
+ movs lr, ip, lsr #23\r
+ moveq ip, ip, lsl #8\r
+ addeq r6, r6, #8 ; 0x8\r
+ movs lr, ip, lsr #27\r
+ moveq ip, ip, lsl #4\r
+ addeq r6, r6, #4 ; 0x4\r
+ movs lr, ip, lsr #29\r
+ moveq ip, ip, lsl #2\r
+ addeq r6, r6, #2 ; 0x2\r
+ movs lr, ip, lsr #30\r
+ addeq r6, r6, #1 ; 0x1\r
+ rsb r3, r6, #32 ; 0x20\r
+ moveq ip, ip, lsl #1\r
+ orr ip, ip, r2, lsr r3\r
+ mov lr, r2, lsl r6\r
+ b _ll_udiv_big\r
+__aeabi_uldivmod_label2\r
+ mov ip, r3\r
+ mov lr, r2\r
+ b _ll_udiv_ginormous\r
+\r
+_ll_udiv_small\r
+ cmp r4, ip, lsl #1\r
+ mov r3, #0 ; 0x0\r
+ subcs r4, r4, ip, lsl #1\r
+ addcs r3, r3, #2 ; 0x2\r
+ cmp r4, ip\r
+ subcs r4, r4, ip\r
+ adcs r3, r3, #0 ; 0x0\r
+ add r2, r6, #32 ; 0x20\r
+ cmp r2, #32 ; 0x20\r
+ rsb ip, ip, #0 ; 0x0\r
+ bcc _ll_udiv_small_label1\r
+ orrs r0, r4, r5, lsr #30\r
+ moveq r4, r5\r
+ moveq r5, #0 ; 0x0\r
+ subeq r2, r2, #32 ; 0x20\r
+_ll_udiv_small_label1\r
+ mov r1, #0 ; 0x0\r
+ cmp r2, #16 ; 0x10\r
+ bcc _ll_udiv_small_label2\r
+ movs r0, r4, lsr #14\r
+ moveq r4, r4, lsl #16\r
+ addeq r1, r1, #16 ; 0x10\r
+_ll_udiv_small_label2\r
+ sub lr, r2, r1\r
+ cmp lr, #8 ; 0x8\r
+ bcc _ll_udiv_small_label3\r
+ movs r0, r4, lsr #22\r
+ moveq r4, r4, lsl #8\r
+ addeq r1, r1, #8 ; 0x8\r
+_ll_udiv_small_label3\r
+ rsb r0, r1, #32 ; 0x20\r
+ sub r2, r2, r1\r
+ orr r4, r4, r5, lsr r0\r
+ mov r5, r5, lsl r1\r
+ cmp r2, #1 ; 0x1\r
+ bcc _ll_udiv_small_label5\r
+ sub r2, r2, #1 ; 0x1\r
+ and r0, r2, #7 ; 0x7\r
+ eor r0, r0, #7 ; 0x7\r
+ adds r0, r0, r0, lsl #1\r
+ add pc, pc, r0, lsl #2\r
+ nop ; (mov r0,r0)\r
+_ll_udiv_small_label4\r
+ adcs r5, r5, r5\r
+ adcs r4, ip, r4, lsl #1\r
+ rsbcc r4, ip, r4\r
+ adcs r5, r5, r5\r
+ adcs r4, ip, r4, lsl #1\r
+ rsbcc r4, ip, r4\r
+ adcs r5, r5, r5\r
+ adcs r4, ip, r4, lsl #1\r
+ rsbcc r4, ip, r4\r
+ adcs r5, r5, r5\r
+ adcs r4, ip, r4, lsl #1\r
+ rsbcc r4, ip, r4\r
+ adcs r5, r5, r5\r
+ adcs r4, ip, r4, lsl #1\r
+ rsbcc r4, ip, r4\r
+ adcs r5, r5, r5\r
+ adcs r4, ip, r4, lsl #1\r
+ rsbcc r4, ip, r4\r
+ adcs r5, r5, r5\r
+ adcs r4, ip, r4, lsl #1\r
+ rsbcc r4, ip, r4\r
+ adcs r5, r5, r5\r
+ adcs r4, ip, r4, lsl #1\r
+ sub r2, r2, #8 ; 0x8\r
+ tst r2, r2\r
+ rsbcc r4, ip, r4\r
+ bpl _ll_udiv_small_label4\r
+_ll_udiv_small_label5\r
+ mov r2, r4, lsr r6\r
+ bic r4, r4, r2, lsl r6\r
+ adcs r0, r5, r5\r
+ adc r1, r4, r4\r
+ add r1, r1, r3, lsl r6\r
+ mov r3, #0 ; 0x0\r
+ ldmia sp!, {r4, r5, r6, pc}\r
+\r
+_ll_udiv_big\r
+ subs r0, r5, lr\r
+ mov r3, #0 ; 0x0\r
+ sbcs r1, r4, ip\r
+ movcs r5, r0\r
+ movcs r4, r1\r
+ adcs r3, r3, #0 ; 0x0\r
+ subs r0, r5, lr\r
+ sbcs r1, r4, ip\r
+ movcs r5, r0\r
+ movcs r4, r1\r
+ adcs r3, r3, #0 ; 0x0\r
+ subs r0, r5, lr\r
+ sbcs r1, r4, ip\r
+ movcs r5, r0\r
+ movcs r4, r1\r
+ adcs r3, r3, #0 ; 0x0\r
+ mov r1, #0 ; 0x0\r
+ rsbs lr, lr, #0 ; 0x0\r
+ rsc ip, ip, #0 ; 0x0\r
+ cmp r6, #16 ; 0x10\r
+ bcc _ll_udiv_big_label1\r
+ movs r0, r4, lsr #14\r
+ moveq r4, r4, lsl #16\r
+ addeq r1, r1, #16 ; 0x10\r
+_ll_udiv_big_label1\r
+ sub r2, r6, r1\r
+ cmp r2, #8 ; 0x8\r
+ bcc _ll_udiv_big_label2\r
+ movs r0, r4, lsr #22\r
+ moveq r4, r4, lsl #8\r
+ addeq r1, r1, #8 ; 0x8\r
+_ll_udiv_big_label2\r
+ rsb r0, r1, #32 ; 0x20\r
+ sub r2, r6, r1\r
+ orr r4, r4, r5, lsr r0\r
+ mov r5, r5, lsl r1\r
+ cmp r2, #1 ; 0x1\r
+ bcc _ll_udiv_big_label4\r
+ sub r2, r2, #1 ; 0x1\r
+ and r0, r2, #3 ; 0x3\r
+ rsb r0, r0, #3 ; 0x3\r
+ adds r0, r0, r0, lsl #1\r
+ add pc, pc, r0, lsl #3\r
+ nop ; (mov r0,r0)\r
+_ll_udiv_big_label3\r
+ adcs r5, r5, r5\r
+ adcs r4, r4, r4\r
+ adcs r0, lr, r5\r
+ adcs r1, ip, r4\r
+ movcs r5, r0\r
+ movcs r4, r1\r
+ adcs r5, r5, r5\r
+ adcs r4, r4, r4\r
+ adcs r0, lr, r5\r
+ adcs r1, ip, r4\r
+ movcs r5, r0\r
+ movcs r4, r1\r
+ adcs r5, r5, r5\r
+ adcs r4, r4, r4\r
+ adcs r0, lr, r5\r
+ adcs r1, ip, r4\r
+ movcs r5, r0\r
+ movcs r4, r1\r
+ sub r2, r2, #4 ; 0x4\r
+ adcs r5, r5, r5\r
+ adcs r4, r4, r4\r
+ adcs r0, lr, r5\r
+ adcs r1, ip, r4\r
+ tst r2, r2\r
+ movcs r5, r0\r
+ movcs r4, r1\r
+ bpl _ll_udiv_big_label3\r
+_ll_udiv_big_label4\r
+ mov r1, #0 ; 0x0\r
+ mov r2, r5, lsr r6\r
+ bic r5, r5, r2, lsl r6\r
+ adcs r0, r5, r5\r
+ adc r1, r1, #0 ; 0x0\r
+ movs lr, r3, lsl r6\r
+ mov r3, r4, lsr r6\r
+ bic r4, r4, r3, lsl r6\r
+ adc r1, r1, #0 ; 0x0\r
+ adds r0, r0, lr\r
+ orr r2, r2, r4, ror r6\r
+ adc r1, r1, #0 ; 0x0\r
+ ldmia sp!, {r4, r5, r6, pc}\r
+\r
+_ll_udiv_ginormous\r
+ subs r2, r5, lr\r
+ mov r1, #0 ; 0x0\r
+ sbcs r3, r4, ip\r
+ adc r0, r1, r1\r
+ movcc r2, r5\r
+ movcc r3, r4\r
+ ldmia sp!, {r4, r5, r6, pc}\r
+\r
+_ll_div0\r
+ ldmia sp!, {r4, r5, r6, lr}\r
+ mov r0, #0 ; 0x0\r
+ mov r1, #0 ; 0x0\r
+ b __aeabi_ldiv0\r
+\r
+__aeabi_ldiv0\r
+ bx r14\r
+\r
+ END\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+ASM_FUNC(__umoddi3)\r
+ stmfd sp!, {r7, lr}\r
+ add r7, sp, #0\r
+ sub sp, sp, #16\r
+ add ip, sp, #8\r
+ str ip, [sp, #0]\r
+ bl ASM_PFX(__udivmoddi4)\r
+ ldrd r0, [sp, #8]\r
+ sub sp, r7, #0\r
+ ldmfd sp!, {r7, pc}\r
+\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+ASM_FUNC(__umodsi3)\r
+ stmfd sp!, {r4, r5, r7, lr}\r
+ add r7, sp, #8\r
+ mov r5, r0\r
+ mov r4, r1\r
+ bl ASM_PFX(__udivsi3)\r
+ mul r0, r4, r0\r
+ rsb r0, r0, r5\r
+ ldmfd sp!, {r4, r5, r7, pc}\r
+\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+#\r
+#UINT32\r
+#EFIAPI\r
+#__aeabi_uread4 (\r
+# IN VOID *Pointer\r
+# );\r
+#\r
+ASM_FUNC(__aeabi_uread4)\r
+ ldrb r1, [r0]\r
+ ldrb r2, [r0, #1]\r
+ ldrb r3, [r0, #2]\r
+ ldrb r0, [r0, #3]\r
+ orr r1, r1, r2, lsl #8\r
+ orr r1, r1, r3, lsl #16\r
+ orr r0, r1, r0, lsl #24\r
+ bx lr\r
+\r
+#\r
+#UINT64\r
+#EFIAPI\r
+#__aeabi_uread8 (\r
+# IN VOID *Pointer\r
+# );\r
+#\r
+ASM_FUNC(__aeabi_uread8)\r
+ mov r3, r0\r
+\r
+ ldrb r1, [r3]\r
+ ldrb r2, [r3, #1]\r
+ orr r1, r1, r2, lsl #8\r
+ ldrb r2, [r3, #2]\r
+ orr r1, r1, r2, lsl #16\r
+ ldrb r0, [r3, #3]\r
+ orr r0, r1, r0, lsl #24\r
+\r
+ ldrb r1, [r3, #4]\r
+ ldrb r2, [r3, #5]\r
+ orr r1, r1, r2, lsl #8\r
+ ldrb r2, [r3, #6]\r
+ orr r1, r1, r2, lsl #16\r
+ ldrb r2, [r3, #7]\r
+ orr r1, r1, r2, lsl #24\r
+\r
+ bx lr\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroLib.h>\r
+\r
+#\r
+#UINT32\r
+#EFIAPI\r
+#__aeabi_uwrite4 (\r
+# IN UINT32 Data,\r
+# IN VOID *Pointer\r
+# );\r
+#\r
+ASM_FUNC(__aeabi_uwrite4)\r
+ mov r2, r0, lsr #8\r
+ strb r0, [r1]\r
+ strb r2, [r1, #1]\r
+ mov r2, r0, lsr #16\r
+ strb r2, [r1, #2]\r
+ mov r2, r0, lsr #24\r
+ strb r2, [r1, #3]\r
+ bx lr\r
+\r
+#\r
+#UINT64\r
+#EFIAPI\r
+#__aeabi_uwrite8 (\r
+# IN UINT64 Data,\r
+# IN VOID *Pointer\r
+# );\r
+#\r
+ASM_FUNC(__aeabi_uwrite8)\r
+ mov r3, r0, lsr #8\r
+ strb r0, [r2]\r
+ strb r3, [r2, #1]\r
+ mov r3, r0, lsr #16\r
+ strb r3, [r2, #2]\r
+ mov r3, r0, lsr #24\r
+ strb r3, [r2, #3]\r
+\r
+ mov r3, r1, lsr #8\r
+ strb r1, [r2, #4]\r
+ strb r3, [r2, #5]\r
+ mov r3, r1, lsr #16\r
+ strb r3, [r2, #6]\r
+ mov r3, r1, lsr #24\r
+ strb r3, [r2, #7]\r
+ bx lr\r
--- /dev/null
+#/** @file\r
+# Base Library implementation.\r
+#\r
+# Copyright (c) 2009, Apple Inc. All rights reserved.<BR>\r
+# Copyright (c) 2011-2013, ARM Limited. All rights reserved.\r
+#\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
+#\r
+#\r
+#**/\r
+\r
+[Defines]\r
+ INF_VERSION = 1.29\r
+ BASE_NAME = CompilerIntrinsicsLib\r
+ FILE_GUID = 2A6B451F-B99D-47B1-8F29-D805433C62E0\r
+ MODULE_TYPE = BASE\r
+ VERSION_STRING = 1.0\r
+ LIBRARY_CLASS = CompilerIntrinsicsLib\r
+\r
+[Sources]\r
+ memcpy.c | GCC\r
+ memset.c | GCC\r
+\r
+ memcpy_ms.c | MSFT\r
+ memset_ms.c | MSFT\r
+ memcmp_ms.c | MSFT\r
+ memmove_ms.c | MSFT\r
+\r
+[Sources.ARM]\r
+ Arm/ashrdi3.S | GCC\r
+ Arm/ashldi3.S | GCC\r
+ Arm/div.S | GCC\r
+ Arm/divdi3.S | GCC\r
+ Arm/divsi3.S | GCC\r
+ Arm/lshrdi3.S | GCC\r
+ Arm/memmove.S | GCC\r
+ Arm/modsi3.S | GCC\r
+ Arm/moddi3.S | GCC\r
+ Arm/muldi3.S | GCC\r
+ Arm/mullu.S | GCC\r
+ Arm/udivsi3.S | GCC\r
+ Arm/umodsi3.S | GCC\r
+ Arm/udivdi3.S | GCC\r
+ Arm/umoddi3.S | GCC\r
+ Arm/udivmoddi4.S | GCC\r
+ Arm/clzsi2.S | GCC\r
+ Arm/ctzsi2.S | GCC\r
+ Arm/ucmpdi2.S | GCC\r
+ Arm/switch8.S | GCC\r
+ Arm/switchu8.S | GCC\r
+ Arm/switch16.S | GCC\r
+ Arm/switch32.S | GCC\r
+ Arm/sourcery.S | GCC\r
+ Arm/uldiv.S | GCC\r
+ Arm/ldivmod.S | GCC\r
+ Arm/lasr.S | GCC\r
+ Arm/llsr.S | GCC\r
+ Arm/llsl.S | GCC\r
+ Arm/uread.S | GCC\r
+ Arm/uwrite.S | GCC\r
+\r
+ Arm/div.asm | MSFT\r
+ Arm/uldiv.asm | MSFT\r
+ Arm/ldivmod.asm | MSFT\r
+ Arm/llsr.asm | MSFT\r
+\r
+[Sources.AARCH64]\r
+ AArch64/Atomics.S | GCC\r
+ AArch64/ashlti3.S | GCC\r
+\r
+[Packages]\r
+ MdePkg/MdePkg.dec\r
+\r
+[BuildOptions]\r
+ MSFT:*_*_*_CC_FLAGS = /GL-\r
+ MSFT:*_*_ARM_ASM_FLAGS = /oldit\r
--- /dev/null
+// ------------------------------------------------------------------------------\r
+//\r
+// Copyright (c) 2019, Pete Batard. All rights reserved.\r
+// Copyright (c) 2021, Arm Limited. All rights reserved.<BR>\r
+//\r
+// SPDX-License-Identifier: BSD-2-Clause-Patent\r
+//\r
+// ------------------------------------------------------------------------------\r
+\r
+#if defined (_M_ARM64)\r
+typedef unsigned __int64 size_t;\r
+#else\r
+typedef unsigned __int32 size_t;\r
+#endif\r
+\r
+int\r
+memcmp (\r
+ void *,\r
+ void *,\r
+ size_t\r
+ );\r
+\r
+#pragma intrinsic(memcmp)\r
+#pragma function(memcmp)\r
+int\r
+memcmp (\r
+ const void *s1,\r
+ const void *s2,\r
+ size_t n\r
+ )\r
+{\r
+ unsigned char const *t1;\r
+ unsigned char const *t2;\r
+\r
+ t1 = s1;\r
+ t2 = s2;\r
+\r
+ while (n-- != 0) {\r
+ if (*t1 != *t2) {\r
+ return (int)*t1 - (int)*t2;\r
+ }\r
+\r
+ t1++;\r
+ t2++;\r
+ }\r
+\r
+ return 0;\r
+}\r
--- /dev/null
+// ------------------------------------------------------------------------------\r
+//\r
+// Copyright (c) 2016, Linaro Ltd. All rights reserved.<BR>\r
+// Copyright (c) 2021, Arm Limited. All rights reserved.<BR>\r
+//\r
+// SPDX-License-Identifier: BSD-2-Clause-Patent\r
+//\r
+// ------------------------------------------------------------------------------\r
+\r
+typedef __SIZE_TYPE__ size_t;\r
+\r
+static void\r
+__memcpy (\r
+ void *dest,\r
+ const void *src,\r
+ size_t n\r
+ )\r
+{\r
+ unsigned char *d;\r
+ unsigned char const *s;\r
+\r
+ d = dest;\r
+ s = src;\r
+\r
+ while (n-- != 0) {\r
+ *d++ = *s++;\r
+ }\r
+}\r
+\r
+void *\r
+memcpy (\r
+ void *dest,\r
+ const void *src,\r
+ size_t n\r
+ )\r
+{\r
+ __memcpy (dest, src, n);\r
+ return dest;\r
+}\r
+\r
+#ifdef __arm__\r
+\r
+__attribute__ ((__alias__ ("__memcpy")))\r
+void\r
+__aeabi_memcpy (\r
+ void *dest,\r
+ const void *src,\r
+ size_t n\r
+ );\r
+\r
+__attribute__ ((__alias__ ("__memcpy")))\r
+void\r
+__aeabi_memcpy4 (\r
+ void *dest,\r
+ const void *src,\r
+ size_t n\r
+ );\r
+\r
+__attribute__ ((__alias__ ("__memcpy")))\r
+void\r
+__aeabi_memcpy8 (\r
+ void *dest,\r
+ const void *src,\r
+ size_t n\r
+ );\r
+\r
+#endif\r
--- /dev/null
+// ------------------------------------------------------------------------------\r
+//\r
+// Copyright (c) 2017, Pete Batard. All rights reserved.<BR>\r
+// Copyright (c) 2021, Arm Limited. All rights reserved.<BR>\r
+//\r
+// SPDX-License-Identifier: BSD-2-Clause-Patent\r
+//\r
+// ------------------------------------------------------------------------------\r
+\r
+#if defined (_M_ARM64)\r
+typedef unsigned __int64 size_t;\r
+#else\r
+typedef unsigned __int32 size_t;\r
+#endif\r
+\r
+void *\r
+memcpy (\r
+ void *,\r
+ const void *,\r
+ size_t\r
+ );\r
+\r
+#pragma intrinsic(memcpy)\r
+#pragma function(memcpy)\r
+void *\r
+memcpy (\r
+ void *dest,\r
+ const void *src,\r
+ size_t n\r
+ )\r
+{\r
+ unsigned char *d;\r
+ unsigned char const *s;\r
+\r
+ d = dest;\r
+ s = src;\r
+\r
+ while (n-- != 0) {\r
+ *d++ = *s++;\r
+ }\r
+\r
+ return dest;\r
+}\r
--- /dev/null
+// ------------------------------------------------------------------------------\r
+//\r
+// Copyright (c) 2019, Pete Batard. All rights reserved.\r
+// Copyright (c) 2021, Arm Limited. All rights reserved.<BR>\r
+//\r
+// SPDX-License-Identifier: BSD-2-Clause-Patent\r
+//\r
+// ------------------------------------------------------------------------------\r
+\r
+#if defined (_M_ARM64)\r
+typedef unsigned __int64 size_t;\r
+#else\r
+typedef unsigned __int32 size_t;\r
+#endif\r
+\r
+void *\r
+memmove (\r
+ void *,\r
+ const void *,\r
+ size_t\r
+ );\r
+\r
+#pragma intrinsic(memmove)\r
+#pragma function(memmove)\r
+void *\r
+memmove (\r
+ void *dest,\r
+ const void *src,\r
+ size_t n\r
+ )\r
+{\r
+ unsigned char *d;\r
+ unsigned char const *s;\r
+\r
+ d = dest;\r
+ s = src;\r
+\r
+ if (d < s) {\r
+ while (n-- != 0) {\r
+ *d++ = *s++;\r
+ }\r
+ } else {\r
+ d += n;\r
+ s += n;\r
+ while (n-- != 0) {\r
+ *--d = *--s;\r
+ }\r
+ }\r
+\r
+ return dest;\r
+}\r
--- /dev/null
+// ------------------------------------------------------------------------------\r
+//\r
+// Copyright (c) 2016, Linaro Ltd. All rights reserved.<BR>\r
+// Copyright (c) 2021, Arm Limited. All rights reserved.<BR>\r
+//\r
+// SPDX-License-Identifier: BSD-2-Clause-Patent\r
+//\r
+// ------------------------------------------------------------------------------\r
+\r
+typedef __SIZE_TYPE__ size_t;\r
+\r
+static __attribute__ ((__used__))\r
+void *\r
+__memset (\r
+ void *s,\r
+ int c,\r
+ size_t n\r
+ )\r
+{\r
+ unsigned char *d;\r
+\r
+ d = s;\r
+\r
+ while (n-- != 0) {\r
+ *d++ = c;\r
+ }\r
+\r
+ return s;\r
+}\r
+\r
+//\r
+// Other modules (such as CryptoPkg/IntrinsicLib) may provide another\r
+// implementation of memset(), which may conflict with this one if this\r
+// object was pulled into the link due to the definitions below. So make\r
+// our memset() 'weak' to let the other implementation take precedence.\r
+//\r
+__attribute__ ((__weak__, __alias__ ("__memset")))\r
+void *\r
+memset (\r
+ void *dest,\r
+ int c,\r
+ size_t n\r
+ );\r
+\r
+#ifdef __arm__\r
+\r
+void\r
+__aeabi_memset (\r
+ void *dest,\r
+ size_t n,\r
+ int c\r
+ )\r
+{\r
+ __memset (dest, c, n);\r
+}\r
+\r
+__attribute__ ((__alias__ ("__aeabi_memset")))\r
+void\r
+__aeabi_memset4 (\r
+ void *dest,\r
+ size_t n,\r
+ int c\r
+ );\r
+\r
+__attribute__ ((__alias__ ("__aeabi_memset")))\r
+void\r
+__aeabi_memset8 (\r
+ void *dest,\r
+ size_t n,\r
+ int c\r
+ );\r
+\r
+void\r
+__aeabi_memclr (\r
+ void *dest,\r
+ size_t n\r
+ )\r
+{\r
+ __memset (dest, 0, n);\r
+}\r
+\r
+__attribute__ ((__alias__ ("__aeabi_memclr")))\r
+void\r
+__aeabi_memclr4 (\r
+ void *dest,\r
+ size_t n\r
+ );\r
+\r
+__attribute__ ((__alias__ ("__aeabi_memclr")))\r
+void\r
+__aeabi_memclr8 (\r
+ void *dest,\r
+ size_t n\r
+ );\r
+\r
+#endif\r
--- /dev/null
+// ------------------------------------------------------------------------------\r
+//\r
+// Copyright (c) 2017, Pete Batard. All rights reserved.<BR>\r
+// Copyright (c) 2021, Arm Limited. All rights reserved.<BR>\r
+//\r
+// SPDX-License-Identifier: BSD-2-Clause-Patent\r
+//\r
+// ------------------------------------------------------------------------------\r
+\r
+#if defined (_M_ARM64)\r
+typedef unsigned __int64 size_t;\r
+#else\r
+typedef unsigned __int32 size_t;\r
+#endif\r
+\r
+void *\r
+memset (\r
+ void *,\r
+ int,\r
+ size_t\r
+ );\r
+\r
+#pragma intrinsic(memset)\r
+#pragma function(memset)\r
+void *\r
+memset (\r
+ void *s,\r
+ int c,\r
+ size_t n\r
+ )\r
+{\r
+ unsigned char *d;\r
+\r
+ d = s;\r
+\r
+ while (n-- != 0) {\r
+ *d++ = (unsigned char)c;\r
+ }\r
+\r
+ return s;\r
+}\r
SafeIntLib|MdePkg/Library/BaseSafeIntLib/BaseSafeIntLib.inf\r
SynchronizationLib|MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf\r
MmUnblockMemoryLib|MdePkg/Library/MmUnblockMemoryLib/MmUnblockMemoryLibNull.inf\r
+\r
+[LibraryClasses.ARM, LibraryClasses.AARCH64]\r
+ #\r
+ # It is not possible to prevent the ARM/AARCH64 compilers from inserting generic intrinsic functions.\r
+ # This library provides the intrinsic functions generated by these compilers.\r
+ #\r
+ # Linking this here as a null library will cause all ARM/AARCH64 files to link against it and have\r
+ # definitions for the intrinsic functions.\r
+ #\r
+ NULL|MdePkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
[Components.ARM, Components.AARCH64]\r
MdePkg/Library/BaseIoLibIntrinsic/BaseIoLibIntrinsicArmVirt.inf\r
MdePkg/Library/BaseStackCheckLib/BaseStackCheckLib.inf\r
+ MdePkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
\r
[Components.RISCV64]\r
MdePkg/Library/BaseRiscVSbiLib/BaseRiscVSbiLib.inf\r
ShellLib|ShellPkg/Library/UefiShellLib/UefiShellLib.inf\r
\r
[LibraryClasses.ARM, LibraryClasses.AARCH64]\r
- #\r
- # It is not possible to prevent ARM compiler calls to generic intrinsic functions.\r
- # This library provides the instrinsic functions generated by a given compiler.\r
- # [LibraryClasses.ARM] and NULL mean link this library into all ARM images.\r
- #\r
- NULL|ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
NULL|MdePkg/Library/BaseStackCheckLib/BaseStackCheckLib.inf\r
ArmSoftFloatLib|ArmPkg/Library/ArmSoftFloatLib/ArmSoftFloatLib.inf\r
\r
HobLib|MdePkg/Library/DxeHobLib/DxeHobLib.inf\r
ReportStatusCodeLib|MdeModulePkg/Library/DxeReportStatusCodeLib/DxeReportStatusCodeLib.inf\r
DebugLib|MdePkg/Library/UefiDebugLibConOut/UefiDebugLibConOut.inf\r
+\r
[LibraryClasses.common.UEFI_APPLICATION]\r
DebugLib|MdePkg/Library/UefiDebugLibStdErr/UefiDebugLibStdErr.inf\r
ShellLib|ShellPkg/Library/UefiShellLib/UefiShellLib.inf\r
+\r
[LibraryClasses.ARM, LibraryClasses.AARCH64]\r
- #\r
- # It is not possible to prevent ARM compiler calls to generic intrinsic functions.\r
- # This library provides the instrinsic functions generated by a given compiler.\r
- # [LibraryClasses.ARM] and NULL mean link this library into all ARM images.\r
- #\r
-!if $(TOOL_CHAIN_TAG) != VS2017 and $(TOOL_CHAIN_TAG) != VS2015 and $(TOOL_CHAIN_TAG) != VS2019\r
- NULL|ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
-!endif\r
NULL|MdePkg/Library/BaseStackCheckLib/BaseStackCheckLib.inf\r
+\r
[LibraryClasses.ARM]\r
RngLib|MdePkg/Library/BaseRngLibTimerLib/BaseRngLibTimerLib.inf\r
+\r
[LibraryClasses.RISCV64]\r
RngLib|MdePkg/Library/BaseRngLibTimerLib/BaseRngLibTimerLib.inf\r
\r
## options defined .pytool/Plugin/DependencyCheck\r
"DependencyCheck": {\r
"AcceptableDependencies": [\r
- "ArmPkg/ArmPkg.dec",\r
"MdeModulePkg/MdeModulePkg.dec",\r
"MdePkg/MdePkg.dec",\r
"PrmPkg/PrmPkg.dec",\r
\r
DEFINE PLATFORM_PACKAGE = $(PLATFORM_NAME)Pkg\r
\r
+!include MdePkg/MdeLibs.dsc.inc\r
+\r
[LibraryClasses.common]\r
#\r
# EDK II Packages\r
MtrrLib|UefiCpuPkg/Library/MtrrLib/MtrrLib.inf\r
\r
[LibraryClasses.AARCH64]\r
- NULL|ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
NULL|MdePkg/Library/BaseStackCheckLib/BaseStackCheckLib.inf\r
\r
[LibraryClasses.common.DXE_DRIVER, LibraryClasses.common.DXE_RUNTIME_DRIVER, LibraryClasses.common.UEFI_APPLICATION]\r
$(PLATFORM_PACKAGE)/Samples/PrmSampleHardwareAccessModule/PrmSampleHardwareAccessModule.inf\r
\r
[Components.AARCH64]\r
- ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
-\r
# Add support for GCC stack protector\r
MdePkg/Library/BaseStackCheckLib/BaseStackCheckLib.inf\r
\r
#\r
# This library provides the instrinsic functions generated by a given compiler.\r
#\r
- NULL|ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
NULL|MdePkg/Library/BaseStackCheckLib/BaseStackCheckLib.inf\r
ArmSoftFloatLib|ArmPkg/Library/ArmSoftFloatLib/ArmSoftFloatLib.inf\r
\r
MemLibWrapper|SecurityPkg/DeviceSecurity/OsStub/MemLibWrapper/MemLibWrapper.inf\r
\r
[LibraryClasses.ARM, LibraryClasses.AARCH64]\r
- #\r
- # It is not possible to prevent the ARM compiler for generic intrinsic functions.\r
- # This library provides the intrinsic functions generate by a given compiler.\r
- # And NULL mean link this library into all ARM images.\r
- #\r
- NULL|ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
-\r
# Add support for GCC stack protector\r
NULL|MdePkg/Library/BaseStackCheckLib/BaseStackCheckLib.inf\r
\r
ReportStatusCodeLib|MdePkg/Library/BaseReportStatusCodeLibNull/BaseReportStatusCodeLibNull.inf\r
\r
[LibraryClasses.ARM,LibraryClasses.AARCH64]\r
- #\r
- # It is not possible to prevent the ARM compiler for generic intrinsic functions.\r
- # This library provides the instrinsic functions generate by a given compiler.\r
- # [LibraryClasses.ARM] and NULL mean link this library into all ARM images.\r
- #\r
- NULL|ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
-\r
# Add support for GCC stack protector\r
NULL|MdePkg/Library/BaseStackCheckLib/BaseStackCheckLib.inf\r
\r
ArmSoftFloatLib|ArmPkg/Library/ArmSoftFloatLib/ArmSoftFloatLib.inf\r
\r
[LibraryClasses.AARCH64, LibraryClasses.ARM]\r
- #\r
- # It is not possible to prevent the ARM compiler for generic intrinsic functions.\r
- # This library provides the instrinsic functions generate by a given compiler.\r
- # And NULL mean link this library into all ARM images.\r
- #\r
- NULL|ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
-\r
# Add support for GCC stack protector\r
NULL|MdePkg/Library/BaseStackCheckLib/BaseStackCheckLib.inf\r
\r
CacheMaintenanceLib|ArmPkg/Library/ArmCacheMaintenanceLib/ArmCacheMaintenanceLib.inf\r
PeCoffExtraActionLib|StandaloneMmPkg/Library/StandaloneMmPeCoffExtraActionLib/StandaloneMmPeCoffExtraActionLib.inf\r
\r
- NULL|ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
NULL|MdePkg/Library/BaseStackCheckLib/BaseStackCheckLib.inf\r
\r
[LibraryClasses.common.MM_CORE_STANDALONE]\r
NULL|UnitTestFrameworkPkg/Library/UnitTestDebugAssertLib/UnitTestDebugAssertLib.inf\r
\r
[LibraryClasses.ARM, LibraryClasses.AARCH64]\r
- #\r
- # It is not possible to prevent ARM compiler calls to generic intrinsic functions.\r
- # This library provides the instrinsic functions generated by a given compiler.\r
- # [LibraryClasses.ARM] and NULL mean link this library into all ARM images.\r
- #\r
- NULL|ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
-\r
#\r
# Since software stack checking may be heuristically enabled by the compiler\r
# include BaseStackCheckLib unconditionally.\r