/* * Copyright 2017 - 2021 Justas Masiulis * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef JM_XORSTR_HPP #define JM_XORSTR_HPP #if defined(_M_ARM64) || defined(__aarch64__) || defined(_M_ARM) || defined(__arm__) #include #elif defined(_M_X64) || defined(__amd64__) || defined(_M_IX86) || defined(__i386__) #include #else #error Unsupported platform #endif #include #include #include #include #define xorstr(str) ::OqYAfkhl::swG([]() { return str; }, std::integral_constant{}, std::make_index_sequence<::OqYAfkhl::CtKWt::UfWL()>{}) #define xorstr_(str) xorstr(str).Pr() #ifdef _MSC_VER #define XORSTR_FORCEINLINE __forceinline #else #define XORSTR_FORCEINLINE __attribute__((always_inline)) inline #endif namespace OqYAfkhl { namespace CtKWt { template XORSTR_FORCEINLINE constexpr std::size_t UfWL() { return ((Size / 16) + (Size % 16 != 0)) * 2; } template XORSTR_FORCEINLINE constexpr std::uint32_t rY() noexcept { std::uint32_t value = Seed; for(char c : __TIME__) value = static_cast((value ^ c) * 16777619ull); return value; } template XORSTR_FORCEINLINE constexpr std::uint64_t WL() { constexpr auto first_part = rY<2166136261 + S>(); constexpr auto second_part = rY(); return (static_cast(first_part) << 32) | second_part; } // loads up to 8 characters of string into uint64 and xors it with the key template XORSTR_FORCEINLINE constexpr std::uint64_t EE(std::uint64_t key, std::size_t idx, const CharT* str) noexcept { using cast_type = typename std::make_unsigned::type; constexpr auto value_size = sizeof(CharT); constexpr auto idx_offset = 8 / value_size; std::uint64_t value = key; for(std::size_t i = 0; i < idx_offset && i + idx * idx_offset < N; ++i) value ^= (std::uint64_t{ static_cast(str[i + idx * idx_offset]) } << ((i % idx_offset) * 8 * value_size)); return value; } // forces compiler to use registers instead of stuffing constants in rdata XORSTR_FORCEINLINE std::uint64_t Fw(std::uint64_t value) noexcept { #if defined(__clang__) || defined(__GNUC__) asm("" : "=r"(value) : "0"(value) :); return value; #else volatile std::uint64_t reg = value; return reg; #endif } } // namespace detail template class swG; template class swG, std::index_sequence> { #ifndef JM_XORSTR_DISABLE_AVX_INTRINSICS constexpr static inline std::uint64_t alignment = ((Size > 16) ? 32 : 16); #else constexpr static inline std::uint64_t alignment = 16; #endif alignas(alignment) std::uint64_t _storage[sizeof...(Keys)]; public: using value_type = CharT; using size_type = std::size_t; using pointer = CharT*; using const_pointer = const CharT*; template XORSTR_FORCEINLINE swG(L l, std::integral_constant, std::index_sequence) noexcept : _storage{ ::OqYAfkhl::CtKWt::Fw((std::integral_constant(Keys, Indices, l())>::value))... } {} XORSTR_FORCEINLINE constexpr size_type size() const noexcept { return Size - 1; } XORSTR_FORCEINLINE void fO() noexcept { // everything is inlined by hand because a certain compiler with a certain linker is _very_ slow #if defined(__clang__) alignas(alignment) std::uint64_t arr[]{ ::OqYAfkhl::CtKWt::Fw(Keys)... }; std::uint64_t* keys = (std::uint64_t*)::OqYAfkhl::CtKWt::Fw((std::uint64_t)arr); #else alignas(alignment) std::uint64_t keys[]{ ::OqYAfkhl::CtKWt::Fw(Keys)... }; #endif #if defined(_M_ARM64) || defined(__aarch64__) || defined(_M_ARM) || defined(__arm__) #if defined(__clang__) ((Indices >= sizeof(_storage) / 16 ? static_cast(0) : __builtin_neon_vst1q_v( reinterpret_cast(_storage) + Indices * 2, veorq_u64(__builtin_neon_vld1q_v(reinterpret_cast(_storage) + Indices * 2, 51), __builtin_neon_vld1q_v(reinterpret_cast(keys) + Indices * 2, 51)), 51)), ...); #else // GCC, MSVC ((Indices >= sizeof(_storage) / 16 ? static_cast(0) : vst1q_u64( reinterpret_cast(_storage) + Indices * 2, veorq_u64(vld1q_u64(reinterpret_cast(_storage) + Indices * 2), vld1q_u64(reinterpret_cast(keys) + Indices * 2)))), ...); #endif #elif !defined(JM_XORSTR_DISABLE_AVX_INTRINSICS) ((Indices >= sizeof(_storage) / 32 ? static_cast(0) : _mm256_store_si256( reinterpret_cast<__m256i*>(_storage) + Indices, _mm256_xor_si256( _mm256_load_si256(reinterpret_cast(_storage) + Indices), _mm256_load_si256(reinterpret_cast(keys) + Indices)))), ...); if constexpr(sizeof(_storage) % 32 != 0) _mm_store_si128( reinterpret_cast<__m128i*>(_storage + sizeof...(Keys) - 2), _mm_xor_si128(_mm_load_si128(reinterpret_cast(_storage + sizeof...(Keys) - 2)), _mm_load_si128(reinterpret_cast(keys + sizeof...(Keys) - 2)))); #else ((Indices >= sizeof(_storage) / 16 ? static_cast(0) : _mm_store_si128( reinterpret_cast<__m128i*>(_storage) + Indices, _mm_xor_si128(_mm_load_si128(reinterpret_cast(_storage) + Indices), _mm_load_si128(reinterpret_cast(keys) + Indices)))), ...); #endif } XORSTR_FORCEINLINE const_pointer Ce() const noexcept { return reinterpret_cast(_storage); } XORSTR_FORCEINLINE pointer Ce() noexcept { return reinterpret_cast(_storage); } XORSTR_FORCEINLINE pointer Pr() noexcept { // fO() is inlined by hand because a certain compiler with a certain linker is _very_ slow #if defined(__clang__) alignas(alignment) std::uint64_t arr[]{ ::OqYAfkhl::CtKWt::Fw(Keys)... }; std::uint64_t* keys = (std::uint64_t*)::OqYAfkhl::CtKWt::Fw((std::uint64_t)arr); #else alignas(alignment) std::uint64_t keys[]{ ::OqYAfkhl::CtKWt::Fw(Keys)... }; #endif #if defined(_M_ARM64) || defined(__aarch64__) || defined(_M_ARM) || defined(__arm__) #if defined(__clang__) ((Indices >= sizeof(_storage) / 16 ? static_cast(0) : __builtin_neon_vst1q_v( reinterpret_cast(_storage) + Indices * 2, veorq_u64(__builtin_neon_vld1q_v(reinterpret_cast(_storage) + Indices * 2, 51), __builtin_neon_vld1q_v(reinterpret_cast(keys) + Indices * 2, 51)), 51)), ...); #else // GCC, MSVC ((Indices >= sizeof(_storage) / 16 ? static_cast(0) : vst1q_u64( reinterpret_cast(_storage) + Indices * 2, veorq_u64(vld1q_u64(reinterpret_cast(_storage) + Indices * 2), vld1q_u64(reinterpret_cast(keys) + Indices * 2)))), ...); #endif #elif !defined(JM_XORSTR_DISABLE_AVX_INTRINSICS) ((Indices >= sizeof(_storage) / 32 ? static_cast(0) : _mm256_store_si256( reinterpret_cast<__m256i*>(_storage) + Indices, _mm256_xor_si256( _mm256_load_si256(reinterpret_cast(_storage) + Indices), _mm256_load_si256(reinterpret_cast(keys) + Indices)))), ...); if constexpr(sizeof(_storage) % 32 != 0) _mm_store_si128( reinterpret_cast<__m128i*>(_storage + sizeof...(Keys) - 2), _mm_xor_si128(_mm_load_si128(reinterpret_cast(_storage + sizeof...(Keys) - 2)), _mm_load_si128(reinterpret_cast(keys + sizeof...(Keys) - 2)))); #else ((Indices >= sizeof(_storage) / 16 ? static_cast(0) : _mm_store_si128( reinterpret_cast<__m128i*>(_storage) + Indices, _mm_xor_si128(_mm_load_si128(reinterpret_cast(_storage) + Indices), _mm_load_si128(reinterpret_cast(keys) + Indices)))), ...); #endif return (pointer)(_storage); } }; template swG(L l, std::integral_constant, std::index_sequence) -> swG< std::remove_const_t>, Size, std::integer_sequence()...>, std::index_sequence>; } // namespace jm #endif // include guard