summaryrefslogtreecommitdiff
path: root/palemoon/debian/patches/fix_arm_FTBFS.patch
blob: a8b1582ac43ec17c44f603b56c47af98f4421bd0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
Description: Fix build failure on armhf arch
Author: Steven Pusser <stevep@mxlinux.org>
Last-Update: 2020-06-09

--- palemoon-28.10.0.orig/platform/js/src/wasm/WasmBaselineCompile.cpp
+++ palemoon-28.10.0/platform/js/src/wasm/WasmBaselineCompile.cpp
@@ -3391,7 +3391,7 @@ class BaseCompiler
 #ifdef JS_CODEGEN_ARM
     void
     loadI32(MemoryAccessDesc access, bool isSigned, RegI32 ptr, Register rt) {
-        if (access.byteSize() > 1 && IsUnaligned(ins->access())) {
+        if (access.byteSize() > 1 && IsUnaligned(access)) {
             masm.add32(HeapReg, ptr.reg);
             SecondScratchRegisterScope scratch(*this);
             masm.emitUnalignedLoad(isSigned, access.byteSize(), ptr.reg, scratch, rt, 0);
@@ -3405,7 +3405,7 @@ class BaseCompiler
 
     void
     storeI32(MemoryAccessDesc access, RegI32 ptr, Register rt) {
-        if (access.byteSize() > 1 && IsUnaligned(ins->access())) {
+        if (access.byteSize() > 1 && IsUnaligned(access)) {
             masm.add32(HeapReg, ptr.reg);
             masm.emitUnalignedStore(access.byteSize(), ptr.reg, rt, 0);
         } else {
@@ -3419,7 +3419,7 @@ class BaseCompiler
 
     void
     loadI64(MemoryAccessDesc access, RegI32 ptr, RegI64 dest) {
-        if (IsUnaligned(ins->access())) {
+        if (IsUnaligned(access)) {
             masm.add32(HeapReg, ptr.reg);
             SecondScratchRegisterScope scratch(*this);
             masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, dest.reg.low,
@@ -3440,7 +3440,7 @@ class BaseCompiler
 
     void
     storeI64(MemoryAccessDesc access, RegI32 ptr, RegI64 src) {
-        if (IsUnaligned(ins->access())) {
+        if (IsUnaligned(access)) {
             masm.add32(HeapReg, ptr.reg);
             masm.emitUnalignedStore(ByteSize(4), ptr.reg, src.reg.low, 0);
             masm.emitUnalignedStore(ByteSize(4), ptr.reg, src.reg.high, 4);
@@ -3459,7 +3459,7 @@ class BaseCompiler
     void
     loadF32(MemoryAccessDesc access, RegI32 ptr, RegF32 dest, RegI32 tmp1) {
         masm.add32(HeapReg, ptr.reg);
-        if (IsUnaligned(ins->access())) {
+        if (IsUnaligned(access)) {
             SecondScratchRegisterScope scratch(*this);
             masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, tmp1.reg, 0);
             masm.ma_vxfer(tmp1.reg, dest.reg);
@@ -3473,7 +3473,7 @@ class BaseCompiler
     void
     storeF32(MemoryAccessDesc access, RegI32 ptr, RegF32 src, RegI32 tmp1) {
         masm.add32(HeapReg, ptr.reg);
-        if (IsUnaligned(ins->access())) {
+        if (IsUnaligned(access)) {
             masm.ma_vxfer(src.reg, tmp1.reg);
             masm.emitUnalignedStore(ByteSize(4), ptr.reg, tmp1.reg, 0);
         } else {
@@ -3486,7 +3486,7 @@ class BaseCompiler
     void
     loadF64(MemoryAccessDesc access, RegI32 ptr, RegF64 dest, RegI32 tmp1, RegI32 tmp2) {
         masm.add32(HeapReg, ptr.reg);
-        if (IsUnaligned(ins->access())) {
+        if (IsUnaligned(access)) {
             SecondScratchRegisterScope scratch(*this);
             masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, tmp1.reg, 0);
             masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, tmp2.reg, 4);
@@ -3501,7 +3501,7 @@ class BaseCompiler
     void
     storeF64(MemoryAccessDesc access, RegI32 ptr, RegF64 src, RegI32 tmp1, RegI32 tmp2) {
         masm.add32(HeapReg, ptr.reg);
-        if (IsUnaligned(ins->access())) {
+        if (IsUnaligned(access)) {
             masm.ma_vxfer(src.reg, tmp1.reg, tmp2.reg);
             masm.emitUnalignedStore(ByteSize(4), ptr.reg, tmp1.reg, 0);
             masm.emitUnalignedStore(ByteSize(4), ptr.reg, tmp2.reg, 4);
bgstack15