REF: https://bugzilla.tianocore.org/show_bug.cgi?id=4432
There is a return value bug:
The sc.w/sc.d instruction will destroy the reg_t0,
use reg_t1 to avoid context reg_t0 being corrupted.
Adjust Check that ptr align is UINT16.
Optimize function SyncIncrement and SyncDecrement.
Cc: Michael D Kinney <michael.d.kinney@intel.com>
Cc: Liming Gao <gaoliming@byosoft.com.cn>
Cc: Zhiguang Liu <zhiguang.liu@intel.com>
Cc: Chao Li <lichao@loongson.cn>
Signed-off-by: Dongyan Qian <qiandongyan@loongson.cn>
Reviewed-by: Chao Li <lichao@loongson.cn>
1:\r
ll.w $t0, $a0, 0x0\r
bne $t0, $a1, 2f\r
- move $t0, $a2\r
- sc.w $t0, $a0, 0x0\r
- beqz $t0, 1b\r
+ move $t1, $a2\r
+ sc.w $t1, $a0, 0x0\r
+ beqz $t1, 1b\r
b 3f\r
2:\r
dbar 0\r
1:\r
ll.d $t0, $a0, 0x0\r
bne $t0, $a1, 2f\r
- move $t0, $a2\r
- sc.d $t0, $a0, 0x0\r
- beqz $t0, 1b\r
+ move $t1, $a2\r
+ sc.d $t1, $a0, 0x0\r
+ beqz $t1, 1b\r
b 3f\r
2:\r
dbar 0\r
)\r
**/\r
ASM_PFX(AsmInternalSyncIncrement):\r
- move $t0, $a0\r
- dbar 0\r
- ld.w $t1, $t0, 0x0\r
- li.w $t2, 1\r
- amadd.w $t1, $t2, $t0\r
+ li.w $t0, 1\r
+ amadd.w $zero, $t0, $a0\r
\r
- ld.w $a0, $t0, 0x0\r
+ ld.w $a0, $a0, 0\r
jirl $zero, $ra, 0\r
\r
/**\r
)\r
**/\r
ASM_PFX(AsmInternalSyncDecrement):\r
- move $t0, $a0\r
- dbar 0\r
- ld.w $t1, $t0, 0x0\r
- li.w $t2, -1\r
- amadd.w $t1, $t2, $t0\r
+ li.w $t0, -1\r
+ amadd.w $zero, $t0, $a0\r
\r
- ld.w $a0, $t0, 0x0\r
+ ld.w $a0, $a0, 0\r
jirl $zero, $ra, 0\r
.end\r
volatile UINT32 *Ptr32;\r
\r
/* Check that ptr is naturally aligned */\r
- ASSERT (!((UINT64)Value & (sizeof (Value) - 1)));\r
+ ASSERT (!((UINT64)Value & (sizeof (UINT16) - 1)));\r
\r
/* Mask inputs to the correct size. */\r
Mask = (((~0UL) - (1UL << (0)) + 1) & (~0UL >> (64 - 1 - ((sizeof (UINT16) * 8) - 1))));\r