From 54d75c7653d66732864aa9d755cdb89d6e5862d7 Mon Sep 17 00:00:00 2001 From: B Stack Date: Tue, 14 Jul 2020 14:41:31 -0400 Subject: pm 28.11.0-1 rc1, dpkg and rpm bring in 32-bit memory mod for rpm, to match dpkg, from https://forum.palemoon.org/viewtopic.php?f=37&t=24737&p=193746 --- palemoon/debian/patches/fix_arm_FTBFS.patch | 78 +++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 palemoon/debian/patches/fix_arm_FTBFS.patch (limited to 'palemoon/debian/patches/fix_arm_FTBFS.patch') diff --git a/palemoon/debian/patches/fix_arm_FTBFS.patch b/palemoon/debian/patches/fix_arm_FTBFS.patch new file mode 100644 index 0000000..a8b1582 --- /dev/null +++ b/palemoon/debian/patches/fix_arm_FTBFS.patch @@ -0,0 +1,78 @@ +Description: Fix build failure on armhf arch +Author: Steven Pusser +Last-Update: 2020-06-09 + +--- palemoon-28.10.0.orig/platform/js/src/wasm/WasmBaselineCompile.cpp ++++ palemoon-28.10.0/platform/js/src/wasm/WasmBaselineCompile.cpp +@@ -3391,7 +3391,7 @@ class BaseCompiler + #ifdef JS_CODEGEN_ARM + void + loadI32(MemoryAccessDesc access, bool isSigned, RegI32 ptr, Register rt) { +- if (access.byteSize() > 1 && IsUnaligned(ins->access())) { ++ if (access.byteSize() > 1 && IsUnaligned(access)) { + masm.add32(HeapReg, ptr.reg); + SecondScratchRegisterScope scratch(*this); + masm.emitUnalignedLoad(isSigned, access.byteSize(), ptr.reg, scratch, rt, 0); +@@ -3405,7 +3405,7 @@ class BaseCompiler + + void + storeI32(MemoryAccessDesc access, RegI32 ptr, Register rt) { +- if (access.byteSize() > 1 && IsUnaligned(ins->access())) { ++ if (access.byteSize() > 1 && IsUnaligned(access)) { + masm.add32(HeapReg, ptr.reg); + masm.emitUnalignedStore(access.byteSize(), ptr.reg, rt, 0); + } else { +@@ -3419,7 +3419,7 @@ class BaseCompiler + + void + loadI64(MemoryAccessDesc access, RegI32 ptr, RegI64 dest) { +- if (IsUnaligned(ins->access())) { ++ if (IsUnaligned(access)) { + masm.add32(HeapReg, ptr.reg); + SecondScratchRegisterScope scratch(*this); + masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, dest.reg.low, +@@ -3440,7 +3440,7 @@ class BaseCompiler + + void + storeI64(MemoryAccessDesc access, RegI32 ptr, RegI64 src) { +- if (IsUnaligned(ins->access())) { ++ if (IsUnaligned(access)) { + masm.add32(HeapReg, ptr.reg); + masm.emitUnalignedStore(ByteSize(4), ptr.reg, src.reg.low, 0); + masm.emitUnalignedStore(ByteSize(4), ptr.reg, src.reg.high, 4); +@@ -3459,7 +3459,7 @@ class BaseCompiler + void + loadF32(MemoryAccessDesc access, RegI32 ptr, RegF32 dest, RegI32 tmp1) { + masm.add32(HeapReg, ptr.reg); +- if (IsUnaligned(ins->access())) { ++ if (IsUnaligned(access)) { + SecondScratchRegisterScope scratch(*this); + masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, tmp1.reg, 0); + masm.ma_vxfer(tmp1.reg, dest.reg); +@@ -3473,7 +3473,7 @@ class BaseCompiler + void + storeF32(MemoryAccessDesc access, RegI32 ptr, RegF32 src, RegI32 tmp1) { + masm.add32(HeapReg, ptr.reg); +- if (IsUnaligned(ins->access())) { ++ if (IsUnaligned(access)) { + masm.ma_vxfer(src.reg, tmp1.reg); + masm.emitUnalignedStore(ByteSize(4), ptr.reg, tmp1.reg, 0); + } else { +@@ -3486,7 +3486,7 @@ class BaseCompiler + void + loadF64(MemoryAccessDesc access, RegI32 ptr, RegF64 dest, RegI32 tmp1, RegI32 tmp2) { + masm.add32(HeapReg, ptr.reg); +- if (IsUnaligned(ins->access())) { ++ if (IsUnaligned(access)) { + SecondScratchRegisterScope scratch(*this); + masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, tmp1.reg, 0); + masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, tmp2.reg, 4); +@@ -3501,7 +3501,7 @@ class BaseCompiler + void + storeF64(MemoryAccessDesc access, RegI32 ptr, RegF64 src, RegI32 tmp1, RegI32 tmp2) { + masm.add32(HeapReg, ptr.reg); +- if (IsUnaligned(ins->access())) { ++ if (IsUnaligned(access)) { + masm.ma_vxfer(src.reg, tmp1.reg, tmp2.reg); + masm.emitUnalignedStore(ByteSize(4), ptr.reg, tmp1.reg, 0); + masm.emitUnalignedStore(ByteSize(4), ptr.reg, tmp2.reg, 4); -- cgit