diff options
author | B Stack <bgstack15@gmail.com> | 2018-09-01 10:20:35 -0400 |
---|---|---|
committer | B Stack <bgstack15@gmail.com> | 2018-09-01 10:20:35 -0400 |
commit | 133a3df2a343a0197f6b4a525791656424824b7d (patch) | |
tree | b3b66abb937d89af3b43ffc8dbc842875821eacb | |
parent | palemoon: remove extraneous buildreqs (diff) | |
download | stackrpms-133a3df2a343a0197f6b4a525791656424824b7d.tar.gz stackrpms-133a3df2a343a0197f6b4a525791656424824b7d.tar.bz2 stackrpms-133a3df2a343a0197f6b4a525791656424824b7d.zip |
add gcc49 with symlink fix
Uses big-sources gitlab raw downloads, and the libgcc_s.so symlink fix for
x86_64
This is mainly for the epel-7 chroot and it does not matter if it fails
to compile for any other chroot.
-rw-r--r-- | gcc49/gcc49 | 23 | ||||
-rw-r--r-- | gcc49/gcc49.spec | 193 | ||||
-rw-r--r-- | gcc49/local_atomic.patch | 1062 | ||||
-rw-r--r-- | gcc49/target.path | 125 |
4 files changed, 1403 insertions, 0 deletions
diff --git a/gcc49/gcc49 b/gcc49/gcc49 new file mode 100644 index 0000000..619f7e8 --- /dev/null +++ b/gcc49/gcc49 @@ -0,0 +1,23 @@ +#! /bin/bash + +gver=4.9.3 +gcc_target_platform=$(uname -m)-fedoraunited-linux-gnu + +if [ `getconf LONG_BIT` = "64" ] +then +lib=lib64 +else +lib=lib +fi + + +export CC=/opt/gcc-$gver/bin/gcc +export CXX=/opt/gcc-$gver/bin/g++ +export CPP=/opt/gcc-$gver/bin/cpp +export LD=/opt/gcc-$gver/bin/gcc +# + +# gcc49 +export LDFLAGS="-L/opt/gcc-$gver/$lib/gcc/$gcc_target_platform/$lib/" +export CPPFLAGS="-I/opt/gcc-$gver/$lib/gcc/$gcc_target_platform/$gver/include/" +# diff --git a/gcc49/gcc49.spec b/gcc49/gcc49.spec new file mode 100644 index 0000000..d613404 --- /dev/null +++ b/gcc49/gcc49.spec @@ -0,0 +1,193 @@ +%global _optdir /opt +%global _islver 0.12.2 +%global _cloogver 0.18.4 +%global _mpfrver 3.1.4 +%global _gmpver 6.1.0 +%global _mpcver 1.0.3 +%global gcc_target_platform %{_arch}-fedoraunited-linux-gnu +%define debug_package %{nil} + +Summary: Various compilers (C, C++, Objective-C, Java, ada, go, obj-c++ ...) +Name: gcc49 +Version: 4.9.3 +Release: 3 + +License: GPLv3+ and GPLv3+ with exceptions and GPLv2+ with exceptions and LGPLv2+ and BSD +Group: Development/Languages + +#Source: http://gcc.gnu.org/pub/gcc/releases/gcc-4.9.3/gcc-4.9.3.tar.bz2 +#Source1: http://isl.gforge.inria.fr/isl-%{_islver}.tar.bz2 +#Source2: http://www.bastoul.net/cloog/pages/download/cloog-%{_cloogver}.tar.gz +#Source3: http://www.mpfr.org/mpfr-current/mpfr-%{_mpfrver}.tar.bz2 +#Source4: https://gmplib.org/download/gmp/gmp-%{_gmpver}.tar.bz2 +#Source5: ftp://ftp.gnu.org/gnu/mpc/mpc-%{_mpcver}.tar.gz +Source: https://gitlab.com/bgstack15/big-sources/raw/master/gcc49/gcc-%{version}.tar.bz2 +Source1: https://gitlab.com/bgstack15/big-sources/raw/master/gcc49/isl-%{_islver}.tar.bz2 +Source2: https://gitlab.com/bgstack15/big-sources/raw/master/gcc49/cloog-%{_cloogver}.tar.gz +Source3: https://gitlab.com/bgstack15/big-sources/raw/master/gcc49/mpfr-%{_mpfrver}.tar.bz2 +Source4: https://gitlab.com/bgstack15/big-sources/raw/master/gcc49/gmp-%{_gmpver}.tar.bz2 +Source5: https://gitlab.com/bgstack15/big-sources/raw/master/gcc49/mpc-%{_mpcver}.tar.gz +Source6: gcc49 +Patch: target.path + +# Patch1 for libitm: Don't redefine __always_inline in local_atomic. +# https://gcc.gnu.org/viewcvs/gcc?view=revision&revision=227040 +Patch1: local_atomic.patch +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) +BuildRequires: binutils >= 2.24 +BuildRequires: make autoconf m4 gettext dejagnu bison flex sharutils +BuildRequires: texinfo texinfo-tex +BuildRequires: python-sphinx +BuildRequires: zlib-devel +BuildRequires: texinfo +BuildRequires: glibc-devel +#ada +BuildRequires: dejagnu + +# go +BuildRequires: hostname, procps + +# java +# BuildRequires: which +# BuildRequires: dejagnu +# BuildRequires: libart_lgpl-devel +# BuildRequires: gtk2-devel + +Requires: binutils >= 2.24 +Conflicts: gdb < 5.1-2 +Requires(post): /sbin/install-info +Requires(preun): /sbin/install-info +AutoReq: true + + + +%description +The gcc package contains the GNU Compiler Collection version 4.9. +You'll need this package in order to compile C code. +You can change the environment variables as follows adding +"source /usr/bin/gcc49" + + + +%prep +%setup -n gcc-4.9.3 +%patch -p0 +%patch1 -p0 + +tar jxvf %{SOURCE1} -C %{_builddir}/gcc-%{version}/ +tar zxvf %{SOURCE2} -C %{_builddir}/gcc-%{version}/ +tar jxvf %{SOURCE3} -C %{_builddir}/gcc-%{version}/ +tar jxvf %{SOURCE4} -C %{_builddir}/gcc-%{version}/ +tar zxvf %{SOURCE5} -C %{_builddir}/gcc-%{version}/ + + # link isl/cloog for in-tree builds + ln -s isl-%{_islver} isl + ln -s cloog-%{_cloogver} cloog + ln -s mpfr-%{_mpfrver} mpfr + ln -s gmp-%{_gmpver} gmp + ln -s mpc-%{_mpcver} mpc + + # Do not run fixincludes + sed -i 's@\./fixinc\.sh@-c true@' gcc/Makefile.in + + # Fedora Linux installs x86_64 libraries /lib + +[[ $CARCH == "x86_64" ]] && sed -i '/m64=/s/lib64/lib/' gcc/config/i386/t-linux64 + + echo %{version} > gcc/BASE-VER + + # hack! - some configure tests for header files using "$CPP $CPPFLAGS" + sed -i "/ac_cpp=/s/\$CPPFLAGS/\$CPPFLAGS -O2/" {libiberty,gcc}/configure + + mkdir -p %{_builddir}/gcc-build + +%build +cd %{_builddir}/gcc-build + + # using -pipe causes spurious test-suite failures + # http://gcc.gnu.org/bugzilla/show_bug.cgi?id=48565 + CFLAGS=${CFLAGS/-pipe/} + CXXFLAGS=${CXXFLAGS/-pipe/} + + + %{_builddir}/gcc-%{version}/configure --prefix=/opt/gcc-%{version} \ + --libdir=/opt/gcc-%{version}/%{_lib} --libexecdir=/opt/gcc-%{version}/%{_lib} \ + --mandir=/opt/gcc-%{version}/man --infodir=/opt/gcc-%{version}/info --with-gxx-include-dir=/opt/gcc-%{version}/include \ + --host=%{_arch}-fedoraunited-linux-gnu \ + --build=%{_arch}-fedoraunited-linux-gnu \ + --enable-languages=c,c++,objc,obj-c++,go,fortran,lto \ + --with-system-zlib \ + --enable-libstdcxx-time \ + --disable-multilib \ + --enable-version-specific-runtime-libs \ + --enable-plugin \ + --enable-threads=posix \ + --enable-checking=release \ + --enable-gnu-unique-object \ + --enable-linker-build-id \ + --enable-lto \ + --enable-initfini-array \ + --enable-gnu-indirect-function \ + --enable-tls \ + %ifarch %{ix86} x86_64 + --with-tune=generic \ +%endif + --enable-bootstrap + + +make bootstrap || return 1 + + + +%install + +cd %{_builddir}/gcc-build + + make -j1 DESTDIR=%{buildroot} install + + + # Install Runtime Library Exception + install -dm 755 %{buildroot}/usr/share/licenses/%{name}/ + install -m 0644 %{_builddir}/gcc-%{version}/COPYING.RUNTIME %{buildroot}/usr/share/licenses/%{name}/RUNTIME.LIBRARY.EXCEPTION + + # Help plugins find out nvra. +echo "gcc-%{version}-%{release}.%{_arch}" | tee %{buildroot}/opt/gcc-%{version}/%{_lib}/gcc/%{gcc_target_platform}/rpmver + + # i686 +%ifarch i686 + ln -sf %{_optdir}/gcc-%{version}/lib/gcc/i386-fedoraunited-linux-gnu %{buildroot}/opt/gcc-%{version}/%{_lib}/gcc/i686-fedoraunited-linux-gnu +%endif + + # fix id bgstack15-gcc49-0001 +%ifarch x86_64 + pushd %{buildroot}%{_optdir}/gcc-%{version}/lib64/gcc/x86_64-fedoraunited-linux-gnu/%{version} + %{__cp} -p ../lib64/libgcc_s.so.1 . + %{__ln_s} libgcc_s.so.1 libgcc_s.so + popd + #ln -sf ../lib64/libgcc_s.so.1 %{buildroot}%{_optdir}/gcc-%{version}/lib64/gcc/x86_64-fedoraunited-linux-gnu/%{version}/libgcc_s.so.1 || : + #ln -s libgcc_s.so.1 %{buildroot}%{_optdir}/gcc-%{version}/lib64/gcc/x86_64-fedoraunited-linux-gnu/%{version}/libgcc_s.so || : + find %{buildroot} -name 'libgcc_s.so*' -printf '%p\n' +%endif + + # We need a script to change environment variables as follows + install -dm 755 %{buildroot}/%{_bindir}/ + install -m 0644 %{SOURCE6} %{buildroot}/%{_bindir}/ + chmod a+x %{buildroot}/%{_bindir}/gcc49 + + +%files + +%{_optdir}/gcc-%{version}/ +%{_datadir}/licenses/%{name}/RUNTIME.LIBRARY.EXCEPTION +%{_bindir}/gcc49 + + +%changelog +* Sat Sep 1 2018 B Stack <bgstack15@gmail.com> 4.9.3-3 +- fix x86_64 gcc_s.so symlink + +* Mon Mar 28 2016 David Vasquez <davidjeremias82@gmail.com> 4.9.3-2 +- Added i686 symlink + +* Sat Oct 10 2015 David Vasquez <davidjeremias82@gmail.com> 4.9.3-1 +- New package diff --git a/gcc49/local_atomic.patch b/gcc49/local_atomic.patch new file mode 100644 index 0000000..a2fc93e --- /dev/null +++ b/gcc49/local_atomic.patch @@ -0,0 +1,1062 @@ +--- libitm/local_atomic 2015/08/20 17:43:55 227039 ++++ libitm/local_atomic 2015/08/20 17:55:24 227040 +@@ -41,8 +41,7 @@ + #ifndef _GLIBCXX_ATOMIC + #define _GLIBCXX_ATOMIC 1 + +-#undef __always_inline +-#define __always_inline __attribute__((always_inline)) ++#define __libitm_always_inline __attribute__((always_inline)) + + // #pragma GCC system_header + +@@ -74,7 +73,7 @@ + memory_order_seq_cst + } memory_order; + +- inline __always_inline memory_order ++ inline __libitm_always_inline memory_order + __calculate_memory_order(memory_order __m) noexcept + { + const bool __cond1 = __m == memory_order_release; +@@ -84,13 +83,13 @@ + return __mo2; + } + +- inline __always_inline void ++ inline __libitm_always_inline void + atomic_thread_fence(memory_order __m) noexcept + { + __atomic_thread_fence (__m); + } + +- inline __always_inline void ++ inline __libitm_always_inline void + atomic_signal_fence(memory_order __m) noexcept + { + __atomic_thread_fence (__m); +@@ -280,19 +279,19 @@ + // Conversion to ATOMIC_FLAG_INIT. + atomic_flag(bool __i) noexcept : __atomic_flag_base({ __i }) { } + +- __always_inline bool ++ __libitm_always_inline bool + test_and_set(memory_order __m = memory_order_seq_cst) noexcept + { + return __atomic_test_and_set (&_M_i, __m); + } + +- __always_inline bool ++ __libitm_always_inline bool + test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept + { + return __atomic_test_and_set (&_M_i, __m); + } + +- __always_inline void ++ __libitm_always_inline void + clear(memory_order __m = memory_order_seq_cst) noexcept + { + // __glibcxx_assert(__m != memory_order_consume); +@@ -302,7 +301,7 @@ + __atomic_clear (&_M_i, __m); + } + +- __always_inline void ++ __libitm_always_inline void + clear(memory_order __m = memory_order_seq_cst) volatile noexcept + { + // __glibcxx_assert(__m != memory_order_consume); +@@ -455,7 +454,7 @@ + is_lock_free() const volatile noexcept + { return __atomic_is_lock_free (sizeof (_M_i), &_M_i); } + +- __always_inline void ++ __libitm_always_inline void + store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept + { + // __glibcxx_assert(__m != memory_order_acquire); +@@ -465,7 +464,7 @@ + __atomic_store_n(&_M_i, __i, __m); + } + +- __always_inline void ++ __libitm_always_inline void + store(__int_type __i, + memory_order __m = memory_order_seq_cst) volatile noexcept + { +@@ -476,7 +475,7 @@ + __atomic_store_n(&_M_i, __i, __m); + } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + load(memory_order __m = memory_order_seq_cst) const noexcept + { + // __glibcxx_assert(__m != memory_order_release); +@@ -485,7 +484,7 @@ + return __atomic_load_n(&_M_i, __m); + } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + load(memory_order __m = memory_order_seq_cst) const volatile noexcept + { + // __glibcxx_assert(__m != memory_order_release); +@@ -494,21 +493,21 @@ + return __atomic_load_n(&_M_i, __m); + } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + exchange(__int_type __i, + memory_order __m = memory_order_seq_cst) noexcept + { + return __atomic_exchange_n(&_M_i, __i, __m); + } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + exchange(__int_type __i, + memory_order __m = memory_order_seq_cst) volatile noexcept + { + return __atomic_exchange_n(&_M_i, __i, __m); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(__int_type& __i1, __int_type __i2, + memory_order __m1, memory_order __m2) noexcept + { +@@ -519,7 +518,7 @@ + return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(__int_type& __i1, __int_type __i2, + memory_order __m1, + memory_order __m2) volatile noexcept +@@ -531,7 +530,7 @@ + return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(__int_type& __i1, __int_type __i2, + memory_order __m = memory_order_seq_cst) noexcept + { +@@ -539,7 +538,7 @@ + __calculate_memory_order(__m)); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(__int_type& __i1, __int_type __i2, + memory_order __m = memory_order_seq_cst) volatile noexcept + { +@@ -547,7 +546,7 @@ + __calculate_memory_order(__m)); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(__int_type& __i1, __int_type __i2, + memory_order __m1, memory_order __m2) noexcept + { +@@ -558,7 +557,7 @@ + return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(__int_type& __i1, __int_type __i2, + memory_order __m1, + memory_order __m2) volatile noexcept +@@ -570,7 +569,7 @@ + return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(__int_type& __i1, __int_type __i2, + memory_order __m = memory_order_seq_cst) noexcept + { +@@ -578,7 +577,7 @@ + __calculate_memory_order(__m)); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(__int_type& __i1, __int_type __i2, + memory_order __m = memory_order_seq_cst) volatile noexcept + { +@@ -586,52 +585,52 @@ + __calculate_memory_order(__m)); + } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + fetch_add(__int_type __i, + memory_order __m = memory_order_seq_cst) noexcept + { return __atomic_fetch_add(&_M_i, __i, __m); } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + fetch_add(__int_type __i, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return __atomic_fetch_add(&_M_i, __i, __m); } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + fetch_sub(__int_type __i, + memory_order __m = memory_order_seq_cst) noexcept + { return __atomic_fetch_sub(&_M_i, __i, __m); } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + fetch_sub(__int_type __i, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return __atomic_fetch_sub(&_M_i, __i, __m); } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + fetch_and(__int_type __i, + memory_order __m = memory_order_seq_cst) noexcept + { return __atomic_fetch_and(&_M_i, __i, __m); } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + fetch_and(__int_type __i, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return __atomic_fetch_and(&_M_i, __i, __m); } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + fetch_or(__int_type __i, + memory_order __m = memory_order_seq_cst) noexcept + { return __atomic_fetch_or(&_M_i, __i, __m); } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + fetch_or(__int_type __i, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return __atomic_fetch_or(&_M_i, __i, __m); } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + fetch_xor(__int_type __i, + memory_order __m = memory_order_seq_cst) noexcept + { return __atomic_fetch_xor(&_M_i, __i, __m); } + +- __always_inline __int_type ++ __libitm_always_inline __int_type + fetch_xor(__int_type __i, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return __atomic_fetch_xor(&_M_i, __i, __m); } +@@ -733,7 +732,7 @@ + is_lock_free() const volatile noexcept + { return __atomic_is_lock_free (sizeof (_M_p), &_M_p); } + +- __always_inline void ++ __libitm_always_inline void + store(__pointer_type __p, + memory_order __m = memory_order_seq_cst) noexcept + { +@@ -744,7 +743,7 @@ + __atomic_store_n(&_M_p, __p, __m); + } + +- __always_inline void ++ __libitm_always_inline void + store(__pointer_type __p, + memory_order __m = memory_order_seq_cst) volatile noexcept + { +@@ -755,7 +754,7 @@ + __atomic_store_n(&_M_p, __p, __m); + } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + load(memory_order __m = memory_order_seq_cst) const noexcept + { + // __glibcxx_assert(__m != memory_order_release); +@@ -764,7 +763,7 @@ + return __atomic_load_n(&_M_p, __m); + } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + load(memory_order __m = memory_order_seq_cst) const volatile noexcept + { + // __glibcxx_assert(__m != memory_order_release); +@@ -773,21 +772,21 @@ + return __atomic_load_n(&_M_p, __m); + } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + exchange(__pointer_type __p, + memory_order __m = memory_order_seq_cst) noexcept + { + return __atomic_exchange_n(&_M_p, __p, __m); + } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + exchange(__pointer_type __p, + memory_order __m = memory_order_seq_cst) volatile noexcept + { + return __atomic_exchange_n(&_M_p, __p, __m); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, + memory_order __m1, + memory_order __m2) noexcept +@@ -799,7 +798,7 @@ + return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, + memory_order __m1, + memory_order __m2) volatile noexcept +@@ -811,22 +810,22 @@ + return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2); + } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + fetch_add(ptrdiff_t __d, + memory_order __m = memory_order_seq_cst) noexcept + { return __atomic_fetch_add(&_M_p, __d, __m); } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + fetch_add(ptrdiff_t __d, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return __atomic_fetch_add(&_M_p, __d, __m); } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + fetch_sub(ptrdiff_t __d, + memory_order __m = memory_order_seq_cst) noexcept + { return __atomic_fetch_sub(&_M_p, __d, __m); } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + fetch_sub(ptrdiff_t __d, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return __atomic_fetch_sub(&_M_p, __d, __m); } +@@ -870,67 +869,67 @@ + bool + is_lock_free() const volatile noexcept { return _M_base.is_lock_free(); } + +- __always_inline void ++ __libitm_always_inline void + store(bool __i, memory_order __m = memory_order_seq_cst) noexcept + { _M_base.store(__i, __m); } + +- __always_inline void ++ __libitm_always_inline void + store(bool __i, memory_order __m = memory_order_seq_cst) volatile noexcept + { _M_base.store(__i, __m); } + +- __always_inline bool ++ __libitm_always_inline bool + load(memory_order __m = memory_order_seq_cst) const noexcept + { return _M_base.load(__m); } + +- __always_inline bool ++ __libitm_always_inline bool + load(memory_order __m = memory_order_seq_cst) const volatile noexcept + { return _M_base.load(__m); } + +- __always_inline bool ++ __libitm_always_inline bool + exchange(bool __i, memory_order __m = memory_order_seq_cst) noexcept + { return _M_base.exchange(__i, __m); } + +- __always_inline bool ++ __libitm_always_inline bool + exchange(bool __i, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return _M_base.exchange(__i, __m); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1, + memory_order __m2) noexcept + { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1, + memory_order __m2) volatile noexcept + { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(bool& __i1, bool __i2, + memory_order __m = memory_order_seq_cst) noexcept + { return _M_base.compare_exchange_weak(__i1, __i2, __m); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(bool& __i1, bool __i2, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return _M_base.compare_exchange_weak(__i1, __i2, __m); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1, + memory_order __m2) noexcept + { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1, + memory_order __m2) volatile noexcept + { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(bool& __i1, bool __i2, + memory_order __m = memory_order_seq_cst) noexcept + { return _M_base.compare_exchange_strong(__i1, __i2, __m); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(bool& __i1, bool __i2, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return _M_base.compare_exchange_strong(__i1, __i2, __m); } +@@ -980,11 +979,11 @@ + store(_Tp __i, memory_order _m = memory_order_seq_cst) noexcept + { __atomic_store(&_M_i, &__i, _m); } + +- __always_inline void ++ __libitm_always_inline void + store(_Tp __i, memory_order _m = memory_order_seq_cst) volatile noexcept + { __atomic_store(&_M_i, &__i, _m); } + +- __always_inline _Tp ++ __libitm_always_inline _Tp + load(memory_order _m = memory_order_seq_cst) const noexcept + { + _Tp tmp; +@@ -992,7 +991,7 @@ + return tmp; + } + +- __always_inline _Tp ++ __libitm_always_inline _Tp + load(memory_order _m = memory_order_seq_cst) const volatile noexcept + { + _Tp tmp; +@@ -1000,7 +999,7 @@ + return tmp; + } + +- __always_inline _Tp ++ __libitm_always_inline _Tp + exchange(_Tp __i, memory_order _m = memory_order_seq_cst) noexcept + { + _Tp tmp; +@@ -1008,7 +1007,7 @@ + return tmp; + } + +- __always_inline _Tp ++ __libitm_always_inline _Tp + exchange(_Tp __i, + memory_order _m = memory_order_seq_cst) volatile noexcept + { +@@ -1017,50 +1016,50 @@ + return tmp; + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(_Tp& __e, _Tp __i, memory_order __s, + memory_order __f) noexcept + { + return __atomic_compare_exchange(&_M_i, &__e, &__i, true, __s, __f); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(_Tp& __e, _Tp __i, memory_order __s, + memory_order __f) volatile noexcept + { + return __atomic_compare_exchange(&_M_i, &__e, &__i, true, __s, __f); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(_Tp& __e, _Tp __i, + memory_order __m = memory_order_seq_cst) noexcept + { return compare_exchange_weak(__e, __i, __m, __m); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(_Tp& __e, _Tp __i, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return compare_exchange_weak(__e, __i, __m, __m); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(_Tp& __e, _Tp __i, memory_order __s, + memory_order __f) noexcept + { + return __atomic_compare_exchange(&_M_i, &__e, &__i, false, __s, __f); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(_Tp& __e, _Tp __i, memory_order __s, + memory_order __f) volatile noexcept + { + return __atomic_compare_exchange(&_M_i, &__e, &__i, false, __s, __f); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(_Tp& __e, _Tp __i, + memory_order __m = memory_order_seq_cst) noexcept + { return compare_exchange_strong(__e, __i, __m, __m); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(_Tp& __e, _Tp __i, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return compare_exchange_strong(__e, __i, __m, __m); } +@@ -1153,46 +1152,46 @@ + is_lock_free() const volatile noexcept + { return _M_b.is_lock_free(); } + +- __always_inline void ++ __libitm_always_inline void + store(__pointer_type __p, + memory_order __m = memory_order_seq_cst) noexcept + { return _M_b.store(__p, __m); } + +- __always_inline void ++ __libitm_always_inline void + store(__pointer_type __p, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return _M_b.store(__p, __m); } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + load(memory_order __m = memory_order_seq_cst) const noexcept + { return _M_b.load(__m); } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + load(memory_order __m = memory_order_seq_cst) const volatile noexcept + { return _M_b.load(__m); } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + exchange(__pointer_type __p, + memory_order __m = memory_order_seq_cst) noexcept + { return _M_b.exchange(__p, __m); } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + exchange(__pointer_type __p, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return _M_b.exchange(__p, __m); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2, + memory_order __m1, memory_order __m2) noexcept + { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2, + memory_order __m1, + memory_order __m2) volatile noexcept + { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2, + memory_order __m = memory_order_seq_cst) noexcept + { +@@ -1200,7 +1199,7 @@ + __calculate_memory_order(__m)); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2, + memory_order __m = memory_order_seq_cst) volatile noexcept + { +@@ -1208,18 +1207,18 @@ + __calculate_memory_order(__m)); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, + memory_order __m1, memory_order __m2) noexcept + { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, + memory_order __m1, + memory_order __m2) volatile noexcept + { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, + memory_order __m = memory_order_seq_cst) noexcept + { +@@ -1227,7 +1226,7 @@ + __calculate_memory_order(__m)); + } + +- __always_inline bool ++ __libitm_always_inline bool + compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, + memory_order __m = memory_order_seq_cst) volatile noexcept + { +@@ -1235,22 +1234,22 @@ + __calculate_memory_order(__m)); + } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + fetch_add(ptrdiff_t __d, + memory_order __m = memory_order_seq_cst) noexcept + { return _M_b.fetch_add(__d, __m); } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + fetch_add(ptrdiff_t __d, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return _M_b.fetch_add(__d, __m); } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + fetch_sub(ptrdiff_t __d, + memory_order __m = memory_order_seq_cst) noexcept + { return _M_b.fetch_sub(__d, __m); } + +- __always_inline __pointer_type ++ __libitm_always_inline __pointer_type + fetch_sub(ptrdiff_t __d, + memory_order __m = memory_order_seq_cst) volatile noexcept + { return _M_b.fetch_sub(__d, __m); } +@@ -1544,98 +1543,98 @@ + + + // Function definitions, atomic_flag operations. +- inline __always_inline bool ++ inline __libitm_always_inline bool + atomic_flag_test_and_set_explicit(atomic_flag* __a, + memory_order __m) noexcept + { return __a->test_and_set(__m); } + +- inline __always_inline bool ++ inline __libitm_always_inline bool + atomic_flag_test_and_set_explicit(volatile atomic_flag* __a, + memory_order __m) noexcept + { return __a->test_and_set(__m); } + +- inline __always_inline void ++ inline __libitm_always_inline void + atomic_flag_clear_explicit(atomic_flag* __a, memory_order __m) noexcept + { __a->clear(__m); } + +- inline __always_inline void ++ inline __libitm_always_inline void + atomic_flag_clear_explicit(volatile atomic_flag* __a, + memory_order __m) noexcept + { __a->clear(__m); } + +- inline __always_inline bool ++ inline __libitm_always_inline bool + atomic_flag_test_and_set(atomic_flag* __a) noexcept + { return atomic_flag_test_and_set_explicit(__a, memory_order_seq_cst); } + +- inline __always_inline bool ++ inline __libitm_always_inline bool + atomic_flag_test_and_set(volatile atomic_flag* __a) noexcept + { return atomic_flag_test_and_set_explicit(__a, memory_order_seq_cst); } + +- inline __always_inline void ++ inline __libitm_always_inline void + atomic_flag_clear(atomic_flag* __a) noexcept + { atomic_flag_clear_explicit(__a, memory_order_seq_cst); } + +- inline __always_inline void ++ inline __libitm_always_inline void + atomic_flag_clear(volatile atomic_flag* __a) noexcept + { atomic_flag_clear_explicit(__a, memory_order_seq_cst); } + + + // Function templates generally applicable to atomic types. + template<typename _ITp> +- __always_inline bool ++ __libitm_always_inline bool + atomic_is_lock_free(const atomic<_ITp>* __a) noexcept + { return __a->is_lock_free(); } + + template<typename _ITp> +- __always_inline bool ++ __libitm_always_inline bool + atomic_is_lock_free(const volatile atomic<_ITp>* __a) noexcept + { return __a->is_lock_free(); } + + template<typename _ITp> +- __always_inline void ++ __libitm_always_inline void + atomic_init(atomic<_ITp>* __a, _ITp __i) noexcept; + + template<typename _ITp> +- __always_inline void ++ __libitm_always_inline void + atomic_init(volatile atomic<_ITp>* __a, _ITp __i) noexcept; + + template<typename _ITp> +- __always_inline void ++ __libitm_always_inline void + atomic_store_explicit(atomic<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { __a->store(__i, __m); } + + template<typename _ITp> +- __always_inline void ++ __libitm_always_inline void + atomic_store_explicit(volatile atomic<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { __a->store(__i, __m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_load_explicit(const atomic<_ITp>* __a, memory_order __m) noexcept + { return __a->load(__m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_load_explicit(const volatile atomic<_ITp>* __a, + memory_order __m) noexcept + { return __a->load(__m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_exchange_explicit(atomic<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { return __a->exchange(__i, __m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_exchange_explicit(volatile atomic<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { return __a->exchange(__i, __m); } + + template<typename _ITp> +- __always_inline bool ++ __libitm_always_inline bool + atomic_compare_exchange_weak_explicit(atomic<_ITp>* __a, + _ITp* __i1, _ITp __i2, + memory_order __m1, +@@ -1643,7 +1642,7 @@ + { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); } + + template<typename _ITp> +- __always_inline bool ++ __libitm_always_inline bool + atomic_compare_exchange_weak_explicit(volatile atomic<_ITp>* __a, + _ITp* __i1, _ITp __i2, + memory_order __m1, +@@ -1651,7 +1650,7 @@ + { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); } + + template<typename _ITp> +- __always_inline bool ++ __libitm_always_inline bool + atomic_compare_exchange_strong_explicit(atomic<_ITp>* __a, + _ITp* __i1, _ITp __i2, + memory_order __m1, +@@ -1659,7 +1658,7 @@ + { return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); } + + template<typename _ITp> +- __always_inline bool ++ __libitm_always_inline bool + atomic_compare_exchange_strong_explicit(volatile atomic<_ITp>* __a, + _ITp* __i1, _ITp __i2, + memory_order __m1, +@@ -1668,37 +1667,37 @@ + + + template<typename _ITp> +- __always_inline void ++ __libitm_always_inline void + atomic_store(atomic<_ITp>* __a, _ITp __i) noexcept + { atomic_store_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline void ++ __libitm_always_inline void + atomic_store(volatile atomic<_ITp>* __a, _ITp __i) noexcept + { atomic_store_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_load(const atomic<_ITp>* __a) noexcept + { return atomic_load_explicit(__a, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_load(const volatile atomic<_ITp>* __a) noexcept + { return atomic_load_explicit(__a, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_exchange(atomic<_ITp>* __a, _ITp __i) noexcept + { return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_exchange(volatile atomic<_ITp>* __a, _ITp __i) noexcept + { return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline bool ++ __libitm_always_inline bool + atomic_compare_exchange_weak(atomic<_ITp>* __a, + _ITp* __i1, _ITp __i2) noexcept + { +@@ -1708,7 +1707,7 @@ + } + + template<typename _ITp> +- __always_inline bool ++ __libitm_always_inline bool + atomic_compare_exchange_weak(volatile atomic<_ITp>* __a, + _ITp* __i1, _ITp __i2) noexcept + { +@@ -1718,7 +1717,7 @@ + } + + template<typename _ITp> +- __always_inline bool ++ __libitm_always_inline bool + atomic_compare_exchange_strong(atomic<_ITp>* __a, + _ITp* __i1, _ITp __i2) noexcept + { +@@ -1728,7 +1727,7 @@ + } + + template<typename _ITp> +- __always_inline bool ++ __libitm_always_inline bool + atomic_compare_exchange_strong(volatile atomic<_ITp>* __a, + _ITp* __i1, _ITp __i2) noexcept + { +@@ -1742,158 +1741,158 @@ + // intergral types as specified in the standard, excluding address + // types. + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_add_explicit(__atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { return __a->fetch_add(__i, __m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_add_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { return __a->fetch_add(__i, __m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_sub_explicit(__atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { return __a->fetch_sub(__i, __m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_sub_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { return __a->fetch_sub(__i, __m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_and_explicit(__atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { return __a->fetch_and(__i, __m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_and_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { return __a->fetch_and(__i, __m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_or_explicit(__atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { return __a->fetch_or(__i, __m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_or_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { return __a->fetch_or(__i, __m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_xor_explicit(__atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { return __a->fetch_xor(__i, __m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_xor_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) noexcept + { return __a->fetch_xor(__i, __m); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_add(__atomic_base<_ITp>* __a, _ITp __i) noexcept + { return atomic_fetch_add_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_add(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept + { return atomic_fetch_add_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_sub(__atomic_base<_ITp>* __a, _ITp __i) noexcept + { return atomic_fetch_sub_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_sub(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept + { return atomic_fetch_sub_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_and(__atomic_base<_ITp>* __a, _ITp __i) noexcept + { return atomic_fetch_and_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_and(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept + { return atomic_fetch_and_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_or(__atomic_base<_ITp>* __a, _ITp __i) noexcept + { return atomic_fetch_or_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_or(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept + { return atomic_fetch_or_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_xor(__atomic_base<_ITp>* __a, _ITp __i) noexcept + { return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); } + + template<typename _ITp> +- __always_inline _ITp ++ __libitm_always_inline _ITp + atomic_fetch_xor(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept + { return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); } + + + // Partial specializations for pointers. + template<typename _ITp> +- __always_inline _ITp* ++ __libitm_always_inline _ITp* + atomic_fetch_add_explicit(atomic<_ITp*>* __a, ptrdiff_t __d, + memory_order __m) noexcept + { return __a->fetch_add(__d, __m); } + + template<typename _ITp> +- __always_inline _ITp* ++ __libitm_always_inline _ITp* + atomic_fetch_add_explicit(volatile atomic<_ITp*>* __a, ptrdiff_t __d, + memory_order __m) noexcept + { return __a->fetch_add(__d, __m); } + + template<typename _ITp> +- __always_inline _ITp* ++ __libitm_always_inline _ITp* + atomic_fetch_add(volatile atomic<_ITp*>* __a, ptrdiff_t __d) noexcept + { return __a->fetch_add(__d); } + + template<typename _ITp> +- __always_inline _ITp* ++ __libitm_always_inline _ITp* + atomic_fetch_add(atomic<_ITp*>* __a, ptrdiff_t __d) noexcept + { return __a->fetch_add(__d); } + + template<typename _ITp> +- __always_inline _ITp* ++ __libitm_always_inline _ITp* + atomic_fetch_sub_explicit(volatile atomic<_ITp*>* __a, + ptrdiff_t __d, memory_order __m) noexcept + { return __a->fetch_sub(__d, __m); } + + template<typename _ITp> +- __always_inline _ITp* ++ __libitm_always_inline _ITp* + atomic_fetch_sub_explicit(atomic<_ITp*>* __a, ptrdiff_t __d, + memory_order __m) noexcept + { return __a->fetch_sub(__d, __m); } + + template<typename _ITp> +- __always_inline _ITp* ++ __libitm_always_inline _ITp* + atomic_fetch_sub(volatile atomic<_ITp*>* __a, ptrdiff_t __d) noexcept + { return __a->fetch_sub(__d); } + + template<typename _ITp> +- __always_inline _ITp* ++ __libitm_always_inline _ITp* + atomic_fetch_sub(atomic<_ITp*>* __a, ptrdiff_t __d) noexcept + { return __a->fetch_sub(__d); } + // @} group atomics diff --git a/gcc49/target.path b/gcc49/target.path new file mode 100644 index 0000000..b801c77 --- /dev/null +++ b/gcc49/target.path @@ -0,0 +1,125 @@ +--- config.guess 2014-02-05 04:40:55.000000000 -0600 ++++ config-new.guess 2015-10-19 01:41:40.576796312 -0600 +@@ -884,11 +884,11 @@ + echo ${UNAME_MACHINE}-pc-minix + exit ;; + aarch64:Linux:*:*) +- echo ${UNAME_MACHINE}-unknown-linux-${LIBC} ++ echo ${UNAME_MACHINE}-fedora-linux + exit ;; + aarch64_be:Linux:*:*) + UNAME_MACHINE=aarch64_be +- echo ${UNAME_MACHINE}-unknown-linux-${LIBC} ++ echo ${UNAME_MACHINE}-fedora-linux + exit ;; + alpha:Linux:*:*) + case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in +@@ -902,29 +902,29 @@ + esac + objdump --private-headers /bin/sh | grep -q ld.so.1 + if test "$?" = 0 ; then LIBC="gnulibc1" ; fi +- echo ${UNAME_MACHINE}-unknown-linux-${LIBC} ++ echo ${UNAME_MACHINE}-fedora-linux + exit ;; + arc:Linux:*:* | arceb:Linux:*:*) +- echo ${UNAME_MACHINE}-unknown-linux-${LIBC} ++ echo ${UNAME_MACHINE}-fedora-linux + exit ;; + arm*:Linux:*:*) + eval $set_cc_for_build + if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_EABI__ + then +- echo ${UNAME_MACHINE}-unknown-linux-${LIBC} ++ echo ${UNAME_MACHINE}-fedora-linux + else + if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_PCS_VFP + then +- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi ++ echo ${UNAME_MACHINE}-fedora-linux-eabi + else +- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf ++ echo ${UNAME_MACHINE}-fedora-linux-eabihf + fi + fi + exit ;; + avr32*:Linux:*:*) +- echo ${UNAME_MACHINE}-unknown-linux-${LIBC} ++ echo ${UNAME_MACHINE}-fedora-linux + exit ;; + cris:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-${LIBC} +@@ -933,22 +933,22 @@ + echo ${UNAME_MACHINE}-axis-linux-${LIBC} + exit ;; + frv:Linux:*:*) +- echo ${UNAME_MACHINE}-unknown-linux-${LIBC} ++ echo ${UNAME_MACHINE}-fedora-linux + exit ;; + hexagon:Linux:*:*) +- echo ${UNAME_MACHINE}-unknown-linux-${LIBC} ++ echo ${UNAME_MACHINE}-fedora-linux + exit ;; + i*86:Linux:*:*) + echo ${UNAME_MACHINE}-pc-linux-${LIBC} + exit ;; + ia64:Linux:*:*) +- echo ${UNAME_MACHINE}-unknown-linux-${LIBC} ++ echo ${UNAME_MACHINE}-fedora-linux + exit ;; + m32r*:Linux:*:*) +- echo ${UNAME_MACHINE}-unknown-linux-${LIBC} ++ echo ${UNAME_MACHINE}-fedora-linux + exit ;; + m68*:Linux:*:*) +- echo ${UNAME_MACHINE}-unknown-linux-${LIBC} ++ echo ${UNAME_MACHINE}-fedora-linux + exit ;; + mips:Linux:*:* | mips64:Linux:*:*) + eval $set_cc_for_build +@@ -970,10 +970,10 @@ + test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } + ;; + or1k:Linux:*:*) +- echo ${UNAME_MACHINE}-unknown-linux-${LIBC} ++ echo ${UNAME_MACHINE}-fedora-linux + exit ;; + or32:Linux:*:*) +- echo ${UNAME_MACHINE}-unknown-linux-${LIBC} ++ echo ${UNAME_MACHINE}-fedora-linux + exit ;; + padre:Linux:*:*) + echo sparc-unknown-linux-${LIBC} +@@ -1005,25 +1005,25 @@ + echo ${UNAME_MACHINE}-ibm-linux-${LIBC} + exit ;; + sh64*:Linux:*:*) +- echo ${UNAME_MACHINE}-unknown-linux-${LIBC} ++ echo ${UNAME_MACHINE}-fedora-linux + exit ;; + sh*:Linux:*:*) +- echo ${UNAME_MACHINE}-unknown-linux-${LIBC} ++ echo ${UNAME_MACHINE}-fedora-linux + exit ;; + sparc:Linux:*:* | sparc64:Linux:*:*) +- echo ${UNAME_MACHINE}-unknown-linux-${LIBC} ++ echo ${UNAME_MACHINE}-fedora-linux + exit ;; + tile*:Linux:*:*) +- echo ${UNAME_MACHINE}-unknown-linux-${LIBC} ++ echo ${UNAME_MACHINE}-fedora-linux + exit ;; + vax:Linux:*:*) + echo ${UNAME_MACHINE}-dec-linux-${LIBC} + exit ;; + x86_64:Linux:*:*) +- echo ${UNAME_MACHINE}-unknown-linux-${LIBC} ++ echo ${UNAME_MACHINE}-fedora-linux + exit ;; + xtensa*:Linux:*:*) +- echo ${UNAME_MACHINE}-unknown-linux-${LIBC} ++ echo ${UNAME_MACHINE}-fedora-linux + exit ;; + i*86:DYNIX/ptx:4*:*) + # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. |