ParallelsDesktopCrack/src/xorstr.hpp

242 lines
10 KiB
C++

/*
* Copyright 2017 - 2021 Justas Masiulis
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef JM_XORSTR_HPP
#define JM_XORSTR_HPP
#if defined(_M_ARM64) || defined(__aarch64__) || defined(_M_ARM) || defined(__arm__)
#include <arm_neon.h>
#elif defined(_M_X64) || defined(__amd64__) || defined(_M_IX86) || defined(__i386__)
#include <immintrin.h>
#else
#error Unsupported platform
#endif
#include <cstdint>
#include <cstddef>
#include <utility>
#include <type_traits>
#define xorstr(str) ::OqYAfkhl::swG([]() { return str; }, std::integral_constant<std::size_t, sizeof(str) / sizeof(*str)>{}, std::make_index_sequence<::OqYAfkhl::CtKWt::UfWL<sizeof(str)>()>{})
#define xorstr_(str) xorstr(str).Pr()
#ifdef _MSC_VER
#define XORSTR_FORCEINLINE __forceinline
#else
#define XORSTR_FORCEINLINE __attribute__((always_inline)) inline
#endif
namespace OqYAfkhl {
namespace CtKWt {
template<std::size_t Size>
XORSTR_FORCEINLINE constexpr std::size_t UfWL()
{
return ((Size / 16) + (Size % 16 != 0)) * 2;
}
template<std::uint32_t Seed>
XORSTR_FORCEINLINE constexpr std::uint32_t rY() noexcept
{
std::uint32_t value = Seed;
for(char c : __TIME__)
value = static_cast<std::uint32_t>((value ^ c) * 16777619ull);
return value;
}
template<std::size_t S>
XORSTR_FORCEINLINE constexpr std::uint64_t WL()
{
constexpr auto first_part = rY<2166136261 + S>();
constexpr auto second_part = rY<first_part>();
return (static_cast<std::uint64_t>(first_part) << 32) | second_part;
}
// loads up to 8 characters of string into uint64 and xors it with the key
template<std::size_t N, class CharT>
XORSTR_FORCEINLINE constexpr std::uint64_t
EE(std::uint64_t key, std::size_t idx, const CharT* str) noexcept
{
using cast_type = typename std::make_unsigned<CharT>::type;
constexpr auto value_size = sizeof(CharT);
constexpr auto idx_offset = 8 / value_size;
std::uint64_t value = key;
for(std::size_t i = 0; i < idx_offset && i + idx * idx_offset < N; ++i)
value ^=
(std::uint64_t{ static_cast<cast_type>(str[i + idx * idx_offset]) }
<< ((i % idx_offset) * 8 * value_size));
return value;
}
// forces compiler to use registers instead of stuffing constants in rdata
XORSTR_FORCEINLINE std::uint64_t Fw(std::uint64_t value) noexcept
{
#if defined(__clang__) || defined(__GNUC__)
asm("" : "=r"(value) : "0"(value) :);
return value;
#else
volatile std::uint64_t reg = value;
return reg;
#endif
}
} // namespace detail
template<class CharT, std::size_t Size, class Keys, class Indices>
class swG;
template<class CharT, std::size_t Size, std::uint64_t... Keys, std::size_t... Indices>
class swG<CharT, Size, std::integer_sequence<std::uint64_t, Keys...>, std::index_sequence<Indices...>> {
#ifndef JM_XORSTR_DISABLE_AVX_INTRINSICS
constexpr static inline std::uint64_t alignment = ((Size > 16) ? 32 : 16);
#else
constexpr static inline std::uint64_t alignment = 16;
#endif
alignas(alignment) std::uint64_t _storage[sizeof...(Keys)];
public:
using value_type = CharT;
using size_type = std::size_t;
using pointer = CharT*;
using const_pointer = const CharT*;
template<class L>
XORSTR_FORCEINLINE swG(L l, std::integral_constant<std::size_t, Size>, std::index_sequence<Indices...>) noexcept
: _storage{ ::OqYAfkhl::CtKWt::Fw((std::integral_constant<std::uint64_t, CtKWt::EE<Size>(Keys, Indices, l())>::value))... }
{}
XORSTR_FORCEINLINE constexpr size_type size() const noexcept
{
return Size - 1;
}
XORSTR_FORCEINLINE void fO() noexcept
{
// everything is inlined by hand because a certain compiler with a certain linker is _very_ slow
#if defined(__clang__)
alignas(alignment)
std::uint64_t arr[]{ ::OqYAfkhl::CtKWt::Fw(Keys)... };
std::uint64_t* keys =
(std::uint64_t*)::OqYAfkhl::CtKWt::Fw((std::uint64_t)arr);
#else
alignas(alignment) std::uint64_t keys[]{ ::OqYAfkhl::CtKWt::Fw(Keys)... };
#endif
#if defined(_M_ARM64) || defined(__aarch64__) || defined(_M_ARM) || defined(__arm__)
#if defined(__clang__)
((Indices >= sizeof(_storage) / 16 ? static_cast<void>(0) : __builtin_neon_vst1q_v(
reinterpret_cast<uint64_t*>(_storage) + Indices * 2,
veorq_u64(__builtin_neon_vld1q_v(reinterpret_cast<const uint64_t*>(_storage) + Indices * 2, 51),
__builtin_neon_vld1q_v(reinterpret_cast<const uint64_t*>(keys) + Indices * 2, 51)),
51)), ...);
#else // GCC, MSVC
((Indices >= sizeof(_storage) / 16 ? static_cast<void>(0) : vst1q_u64(
reinterpret_cast<uint64_t*>(_storage) + Indices * 2,
veorq_u64(vld1q_u64(reinterpret_cast<const uint64_t*>(_storage) + Indices * 2),
vld1q_u64(reinterpret_cast<const uint64_t*>(keys) + Indices * 2)))), ...);
#endif
#elif !defined(JM_XORSTR_DISABLE_AVX_INTRINSICS)
((Indices >= sizeof(_storage) / 32 ? static_cast<void>(0) : _mm256_store_si256(
reinterpret_cast<__m256i*>(_storage) + Indices,
_mm256_xor_si256(
_mm256_load_si256(reinterpret_cast<const __m256i*>(_storage) + Indices),
_mm256_load_si256(reinterpret_cast<const __m256i*>(keys) + Indices)))), ...);
if constexpr(sizeof(_storage) % 32 != 0)
_mm_store_si128(
reinterpret_cast<__m128i*>(_storage + sizeof...(Keys) - 2),
_mm_xor_si128(_mm_load_si128(reinterpret_cast<const __m128i*>(_storage + sizeof...(Keys) - 2)),
_mm_load_si128(reinterpret_cast<const __m128i*>(keys + sizeof...(Keys) - 2))));
#else
((Indices >= sizeof(_storage) / 16 ? static_cast<void>(0) : _mm_store_si128(
reinterpret_cast<__m128i*>(_storage) + Indices,
_mm_xor_si128(_mm_load_si128(reinterpret_cast<const __m128i*>(_storage) + Indices),
_mm_load_si128(reinterpret_cast<const __m128i*>(keys) + Indices)))), ...);
#endif
}
XORSTR_FORCEINLINE const_pointer Ce() const noexcept
{
return reinterpret_cast<const_pointer>(_storage);
}
XORSTR_FORCEINLINE pointer Ce() noexcept
{
return reinterpret_cast<pointer>(_storage);
}
XORSTR_FORCEINLINE pointer Pr() noexcept
{
// fO() is inlined by hand because a certain compiler with a certain linker is _very_ slow
#if defined(__clang__)
alignas(alignment)
std::uint64_t arr[]{ ::OqYAfkhl::CtKWt::Fw(Keys)... };
std::uint64_t* keys =
(std::uint64_t*)::OqYAfkhl::CtKWt::Fw((std::uint64_t)arr);
#else
alignas(alignment) std::uint64_t keys[]{ ::OqYAfkhl::CtKWt::Fw(Keys)... };
#endif
#if defined(_M_ARM64) || defined(__aarch64__) || defined(_M_ARM) || defined(__arm__)
#if defined(__clang__)
((Indices >= sizeof(_storage) / 16 ? static_cast<void>(0) : __builtin_neon_vst1q_v(
reinterpret_cast<uint64_t*>(_storage) + Indices * 2,
veorq_u64(__builtin_neon_vld1q_v(reinterpret_cast<const uint64_t*>(_storage) + Indices * 2, 51),
__builtin_neon_vld1q_v(reinterpret_cast<const uint64_t*>(keys) + Indices * 2, 51)),
51)), ...);
#else // GCC, MSVC
((Indices >= sizeof(_storage) / 16 ? static_cast<void>(0) : vst1q_u64(
reinterpret_cast<uint64_t*>(_storage) + Indices * 2,
veorq_u64(vld1q_u64(reinterpret_cast<const uint64_t*>(_storage) + Indices * 2),
vld1q_u64(reinterpret_cast<const uint64_t*>(keys) + Indices * 2)))), ...);
#endif
#elif !defined(JM_XORSTR_DISABLE_AVX_INTRINSICS)
((Indices >= sizeof(_storage) / 32 ? static_cast<void>(0) : _mm256_store_si256(
reinterpret_cast<__m256i*>(_storage) + Indices,
_mm256_xor_si256(
_mm256_load_si256(reinterpret_cast<const __m256i*>(_storage) + Indices),
_mm256_load_si256(reinterpret_cast<const __m256i*>(keys) + Indices)))), ...);
if constexpr(sizeof(_storage) % 32 != 0)
_mm_store_si128(
reinterpret_cast<__m128i*>(_storage + sizeof...(Keys) - 2),
_mm_xor_si128(_mm_load_si128(reinterpret_cast<const __m128i*>(_storage + sizeof...(Keys) - 2)),
_mm_load_si128(reinterpret_cast<const __m128i*>(keys + sizeof...(Keys) - 2))));
#else
((Indices >= sizeof(_storage) / 16 ? static_cast<void>(0) : _mm_store_si128(
reinterpret_cast<__m128i*>(_storage) + Indices,
_mm_xor_si128(_mm_load_si128(reinterpret_cast<const __m128i*>(_storage) + Indices),
_mm_load_si128(reinterpret_cast<const __m128i*>(keys) + Indices)))), ...);
#endif
return (pointer)(_storage);
}
};
template<class L, std::size_t Size, std::size_t... Indices>
swG(L l, std::integral_constant<std::size_t, Size>, std::index_sequence<Indices...>) -> swG<
std::remove_const_t<std::remove_reference_t<decltype(l()[0])>>,
Size,
std::integer_sequence<std::uint64_t, CtKWt::WL<Indices>()...>,
std::index_sequence<Indices...>>;
} // namespace jm
#endif // include guard