summaryrefslogtreecommitdiff
path: root/palemoon/debian/patches/fix_arm_FTBFS.patch
diff options
context:
space:
mode:
authorB Stack <bgstack15@gmail.com>2020-07-14 14:41:31 -0400
committerB Stack <bgstack15@gmail.com>2020-07-14 14:45:04 -0400
commitee86074a964bcc13643cf1d693c0a384132db3f2 (patch)
tree2e2f39a41e31be8126351f43a929e43107053b20 /palemoon/debian/patches/fix_arm_FTBFS.patch
parentfix #23 add new settings to browser prefs (diff)
downloadstackrpms-ee86074a964bcc13643cf1d693c0a384132db3f2.tar.gz
stackrpms-ee86074a964bcc13643cf1d693c0a384132db3f2.tar.bz2
stackrpms-ee86074a964bcc13643cf1d693c0a384132db3f2.zip
pm 28.11.0-1 rc1, dpkg and rpm
bring in 32-bit memory mod for rpm, to match dpkg, from https://forum.palemoon.org/viewtopic.php?f=37&t=24737&p=193746
Diffstat (limited to 'palemoon/debian/patches/fix_arm_FTBFS.patch')
-rw-r--r--palemoon/debian/patches/fix_arm_FTBFS.patch78
1 files changed, 78 insertions, 0 deletions
diff --git a/palemoon/debian/patches/fix_arm_FTBFS.patch b/palemoon/debian/patches/fix_arm_FTBFS.patch
new file mode 100644
index 0000000..a8b1582
--- /dev/null
+++ b/palemoon/debian/patches/fix_arm_FTBFS.patch
@@ -0,0 +1,78 @@
+Description: Fix build failure on armhf arch
+Author: Steven Pusser <stevep@mxlinux.org>
+Last-Update: 2020-06-09
+
+--- palemoon-28.10.0.orig/platform/js/src/wasm/WasmBaselineCompile.cpp
++++ palemoon-28.10.0/platform/js/src/wasm/WasmBaselineCompile.cpp
+@@ -3391,7 +3391,7 @@ class BaseCompiler
+ #ifdef JS_CODEGEN_ARM
+ void
+ loadI32(MemoryAccessDesc access, bool isSigned, RegI32 ptr, Register rt) {
+- if (access.byteSize() > 1 && IsUnaligned(ins->access())) {
++ if (access.byteSize() > 1 && IsUnaligned(access)) {
+ masm.add32(HeapReg, ptr.reg);
+ SecondScratchRegisterScope scratch(*this);
+ masm.emitUnalignedLoad(isSigned, access.byteSize(), ptr.reg, scratch, rt, 0);
+@@ -3405,7 +3405,7 @@ class BaseCompiler
+
+ void
+ storeI32(MemoryAccessDesc access, RegI32 ptr, Register rt) {
+- if (access.byteSize() > 1 && IsUnaligned(ins->access())) {
++ if (access.byteSize() > 1 && IsUnaligned(access)) {
+ masm.add32(HeapReg, ptr.reg);
+ masm.emitUnalignedStore(access.byteSize(), ptr.reg, rt, 0);
+ } else {
+@@ -3419,7 +3419,7 @@ class BaseCompiler
+
+ void
+ loadI64(MemoryAccessDesc access, RegI32 ptr, RegI64 dest) {
+- if (IsUnaligned(ins->access())) {
++ if (IsUnaligned(access)) {
+ masm.add32(HeapReg, ptr.reg);
+ SecondScratchRegisterScope scratch(*this);
+ masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, dest.reg.low,
+@@ -3440,7 +3440,7 @@ class BaseCompiler
+
+ void
+ storeI64(MemoryAccessDesc access, RegI32 ptr, RegI64 src) {
+- if (IsUnaligned(ins->access())) {
++ if (IsUnaligned(access)) {
+ masm.add32(HeapReg, ptr.reg);
+ masm.emitUnalignedStore(ByteSize(4), ptr.reg, src.reg.low, 0);
+ masm.emitUnalignedStore(ByteSize(4), ptr.reg, src.reg.high, 4);
+@@ -3459,7 +3459,7 @@ class BaseCompiler
+ void
+ loadF32(MemoryAccessDesc access, RegI32 ptr, RegF32 dest, RegI32 tmp1) {
+ masm.add32(HeapReg, ptr.reg);
+- if (IsUnaligned(ins->access())) {
++ if (IsUnaligned(access)) {
+ SecondScratchRegisterScope scratch(*this);
+ masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, tmp1.reg, 0);
+ masm.ma_vxfer(tmp1.reg, dest.reg);
+@@ -3473,7 +3473,7 @@ class BaseCompiler
+ void
+ storeF32(MemoryAccessDesc access, RegI32 ptr, RegF32 src, RegI32 tmp1) {
+ masm.add32(HeapReg, ptr.reg);
+- if (IsUnaligned(ins->access())) {
++ if (IsUnaligned(access)) {
+ masm.ma_vxfer(src.reg, tmp1.reg);
+ masm.emitUnalignedStore(ByteSize(4), ptr.reg, tmp1.reg, 0);
+ } else {
+@@ -3486,7 +3486,7 @@ class BaseCompiler
+ void
+ loadF64(MemoryAccessDesc access, RegI32 ptr, RegF64 dest, RegI32 tmp1, RegI32 tmp2) {
+ masm.add32(HeapReg, ptr.reg);
+- if (IsUnaligned(ins->access())) {
++ if (IsUnaligned(access)) {
+ SecondScratchRegisterScope scratch(*this);
+ masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, tmp1.reg, 0);
+ masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, tmp2.reg, 4);
+@@ -3501,7 +3501,7 @@ class BaseCompiler
+ void
+ storeF64(MemoryAccessDesc access, RegI32 ptr, RegF64 src, RegI32 tmp1, RegI32 tmp2) {
+ masm.add32(HeapReg, ptr.reg);
+- if (IsUnaligned(ins->access())) {
++ if (IsUnaligned(access)) {
+ masm.ma_vxfer(src.reg, tmp1.reg, tmp2.reg);
+ masm.emitUnalignedStore(ByteSize(4), ptr.reg, tmp1.reg, 0);
+ masm.emitUnalignedStore(ByteSize(4), ptr.reg, tmp2.reg, 4);
bgstack15