diff --git a/include/tensorwrapper/allocator/allocator.hpp b/include/tensorwrapper/allocator/allocator.hpp deleted file mode 100644 index 9f00af46..00000000 --- a/include/tensorwrapper/allocator/allocator.hpp +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2024 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include -#include -#include -#include - -/** @brief Contains classes related to allocating Buffer objects. */ -namespace tensorwrapper::allocator {} diff --git a/include/tensorwrapper/allocator/allocator_base.hpp b/include/tensorwrapper/allocator/allocator_base.hpp deleted file mode 100644 index 1ebc1308..00000000 --- a/include/tensorwrapper/allocator/allocator_base.hpp +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Copyright 2024 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include -#include -#include -#include -#include - -namespace tensorwrapper::allocator { - -/** @brief Common base class for all allocators. - * - * The AllocatorBase class serves as type-erasure and a unified API for all - * allocators. - */ -class AllocatorBase : public detail_::PolymorphicBase { -private: - /// The type of *this - using my_type = AllocatorBase; - - /// The type *this derives from - using my_base_type = detail_::PolymorphicBase; - -public: - /// Type of a view of the runtime system - using runtime_view_type = parallelzone::runtime::RuntimeView; - - /// Type of a mutable reference to the runtime system - using runtime_view_reference = runtime_view_type&; - - /// Type of a read-only reference to the runtime system - using const_runtime_view_reference = const runtime_view_type&; - - /// Type all physical layouts derive from - using layout_type = layout::Physical; - - /// Type of a pointer to an object of type layout_type - using layout_pointer = std::unique_ptr; - - /// Type of a read-only reference to the layout - using const_layout_reference = const layout_type&; - - /// Type all buffers derive from - using buffer_base_type = buffer::BufferBase; - - /// Type of the class defining types for the buffer_base_type class - using buffer_base_traits = types::ClassTraits; - - /// Type of a mutable reference to an object of type buffer_base_type - using buffer_base_reference = - typename buffer_base_traits::buffer_base_reference; - - /// Type of a read-only reference to an object of type buffer_base_type - using const_buffer_base_reference = - typename buffer_base_traits::const_buffer_base_reference; - - /// Type of a pointer to an object of type buffer_base_type - using buffer_base_pointer = - typename buffer_base_traits::buffer_base_pointer; - - // ------------------------------------------------------------------------- - // -- Ctors and assignment - // ------------------------------------------------------------------------- - - /** @brief Polymorphically allocates a new buffer. - * - * This method type-erases the process of creating a buffer by dispatching - * to the derived class. In general the buffer created by this method will - * NOT be initialized, though this will depend on the default behavior of - * the backend. Use `construct` instead of `allocate` if you additionally - * want to guarantee initialization. - * - * Derived classes implement this method by overriding allocate_. - * - * @param[in] playout A pointer to the layout for the new buffer. - * - * @return The newly allocated, but not necessarily initialized buffer. - */ - buffer_base_pointer allocate(layout_pointer playout) { - return allocate_(std::move(playout)); - } - - buffer_base_pointer allocate(const_layout_reference layout) { - return allocate(layout.clone_as()); - } - - /** @brief The runtime *this uses for allocating. - * - * Allocators are tied to runtimes. This method can be used to retrieve - * the runtime *this is using for allocation. - * - * @return A mutable reference to the runtime *this is using for allocating - * buffers. - * - * @throw None No throw guarantee. - */ - runtime_view_reference runtime() noexcept { return m_rv_; } - - /** @brief The runtime *this uses for allocating. - * - * This method is the same as the non-const version except that it returns - * the runtime in a read-only manner. - * - * @return A read-only reference to the runtime *this uses for allocating - * buffers. - * - * @throw None No throw guarantee. - */ - const_runtime_view_reference runtime() const noexcept { return m_rv_; } - - // ------------------------------------------------------------------------- - // -- Utility methods - // ------------------------------------------------------------------------- - - /** @brief Is *this value equal to @p rhs? - * - * This method is non-polymorphic and only compares the AllocatorBase part - * of *this to the AllocatorBase part of @p rhs. Two AllocatorBase objects - * are value equal if they contain views of the same runtime. - * - * @return True if *this is value equal to @p rhs and false otherwise. - * - * @throw None No throw guarantee. - */ - bool operator==(const AllocatorBase& rhs) const noexcept { - return m_rv_ == rhs.m_rv_; - } - - /** @brief Is *this different from @p rhs? - * - * This method defines "different" as "not value equal." See the - * documentation for operator== for the definition of value equal. - * - * @param[in] rhs The allocator to compare against. - * - * @return False if *this is value equal to @p rhs and true otherwise. - * - * @throw None No throw guarantee. - * - */ - bool operator!=(const AllocatorBase& rhs) const noexcept { - return !((*this) == rhs); - } - -protected: - /** @brief Creates an allocator for the runtime @p rv. - * - * @param[in] rv The runtime in which to allocate buffers. - * - * @throw None No throw guarantee. - */ - explicit AllocatorBase(runtime_view_type rv) : m_rv_(std::move(rv)) {} - - /** @brief Creates *this so that it uses the same runtime as @p other. - * - * @param[in] other The allocator to make a copy of. - * - * @throw std::bad_alloc if there is a problem allocating the copy. Strong - * throw guarantee. - */ - AllocatorBase(const AllocatorBase& other) = default; - - /** @brief Derived classes should overwrite in order to implement allocate. - * - * Derived classes are charged with ensuring @p playout is a valid layout - * and then creating a buffer adhering to the layout. - * - * @param[in] playout The layout for the buffer to allocate. - * - * @throw std::bad_alloc if the allocation fails. Strong throw guarantee. - * @throw std::runtime_error if @p playout is not a valid layout. Strong - * throw guarantee. - */ - virtual buffer_base_pointer allocate_(layout_pointer playout) = 0; - -private: - /// The runtime we are allocating memory in - runtime_view_type m_rv_; -}; - -} // namespace tensorwrapper::allocator diff --git a/include/tensorwrapper/allocator/allocator_fwd.hpp b/include/tensorwrapper/allocator/allocator_fwd.hpp deleted file mode 100644 index 346cec07..00000000 --- a/include/tensorwrapper/allocator/allocator_fwd.hpp +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -namespace tensorwrapper::allocator { - -class AllocatorBase; - -template -class Eigen; - -class Local; - -class Replicated; - -template -class Contiguous; - -} // namespace tensorwrapper::allocator diff --git a/include/tensorwrapper/allocator/contiguous.hpp b/include/tensorwrapper/allocator/contiguous.hpp deleted file mode 100644 index 6231257f..00000000 --- a/include/tensorwrapper/allocator/contiguous.hpp +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include -#include - -namespace tensorwrapper::allocator { - -/** @brief Allocator that can create Contiguous buffers. - * - * @tparam FloatType Type of the elements in the contiguous buffer. - */ -template -class Contiguous : public Replicated { -private: - /// Type of *this - using my_type = Contiguous; - - /// Type *this derives from - using base_type = Replicated; - -public: - /// Pull in base types - ///@{ - using base_type::buffer_base_pointer; - using base_type::const_layout_reference; - using base_type::layout_pointer; - ///@} - - /// Type of each element in the tensor - using element_type = FloatType; - - /// Type of the buffer associated with *this - using contiguous_buffer_type = buffer::Contiguous; - using contiguous_pointer = std::unique_ptr; - - /// Type of initializer lists - using rank0_il = typename types::ILTraits::type; - using rank1_il = typename types::ILTraits::type; - using rank2_il = typename types::ILTraits::type; - using rank3_il = typename types::ILTraits::type; - using rank4_il = typename types::ILTraits::type; - - /// Pull in base class's ctors - using base_type::base_type; - - /** @brief Allocates a contiguous pointer given @p layout. - * - * @note These methods shadow the function of the same name in the base - * class. The intent is to avoid needing to rebind a freshly - * allocated buffer when the user already knows it is a Contiguous - * buffer. - * - * @param[in] layout The layout of the tensor to allocate. May be passed as - * a unique_ptr or by reference. If passed by reference - * will be copied. - * - * @return A pointer to the newly allocated buffer::Contiguous object. - */ - ///@{ - contiguous_pointer allocate(const_layout_reference layout) { - return allocate(layout.clone_as()); - } - contiguous_pointer allocate(layout_pointer layout) { - auto p = allocate_(std::move(layout)); - return detail_::static_pointer_cast(p); - } - ///@} - - /// Constructs a contiguous buffer from an initializer list - ///@{ - contiguous_pointer construct(rank0_il il) { return construct_(il); } - contiguous_pointer construct(rank1_il il) { return construct_(il); } - contiguous_pointer construct(rank2_il il) { return construct_(il); } - contiguous_pointer construct(rank3_il il) { return construct_(il); } - contiguous_pointer construct(rank4_il il) { return construct_(il); } - ///@} - - /** @brief Constructs a contiguous buffer and sets all elements to @p value. - * - * @param[in] layout The layout of the buffer to allocate. May be passed - * either by unique_ptr or reference. If passed by - * reference will be copied. - * - * @return A pointer to the newly constructed buffer. - */ - ///@{ - contiguous_pointer construct(const_layout_reference layout, - element_type value) { - return construct(layout.clone_as(), std::move(value)); - } - contiguous_pointer construct(layout_pointer layout, element_type value) { - return construct_(std::move(layout), std::move(value)); - } - ///@} - -protected: - virtual contiguous_pointer construct_(rank0_il il) = 0; - virtual contiguous_pointer construct_(rank1_il il) = 0; - virtual contiguous_pointer construct_(rank2_il il) = 0; - virtual contiguous_pointer construct_(rank3_il il) = 0; - virtual contiguous_pointer construct_(rank4_il il) = 0; - - /// To be overridden by the derived class to implement construct - virtual contiguous_pointer construct_(layout_pointer layout, - element_type value) = 0; -}; - -} // namespace tensorwrapper::allocator diff --git a/include/tensorwrapper/allocator/eigen.hpp b/include/tensorwrapper/allocator/eigen.hpp deleted file mode 100644 index 851b5f0a..00000000 --- a/include/tensorwrapper/allocator/eigen.hpp +++ /dev/null @@ -1,181 +0,0 @@ -/* - * Copyright 2024 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include -#include -#include - -namespace tensorwrapper::allocator { - -/** @brief Used to allocate buffers which rely on Eigen tensors. - * - * @tparam FloatType The numerical type the buffer will use to store the - * elements. - * @tparam Rank The rank of the tensor stored in the buffer. - * - * This allocator is capable of creating buffers with Eigen tensors in them. - * - */ -template -class Eigen : public Contiguous { -private: - /// The type of *this - using my_type = Eigen; - - /// The class *this inherits from - using my_base_type = Contiguous; - -public: - // Pull in base class's types - using typename my_base_type::base_pointer; - using typename my_base_type::buffer_base_pointer; - using typename my_base_type::buffer_base_reference; - using typename my_base_type::const_base_reference; - using typename my_base_type::const_buffer_base_reference; - using typename my_base_type::contiguous_pointer; - using typename my_base_type::element_type; - using typename my_base_type::layout_pointer; - using typename my_base_type::rank0_il; - using typename my_base_type::rank1_il; - using typename my_base_type::rank2_il; - using typename my_base_type::rank3_il; - using typename my_base_type::rank4_il; - using typename my_base_type::runtime_view_type; - - /// Type of a buffer containing an Eigen tensor - using eigen_buffer_type = buffer::Eigen; - - /// Type of a mutable reference to an object of type eigen_buffer_type - using eigen_buffer_reference = eigen_buffer_type&; - - /// Type of a read-only reference to an object of type eigen_buffer_type - using const_eigen_buffer_reference = const eigen_buffer_type&; - - /// Type of a pointer to an eigen_buffer_type object - using eigen_buffer_pointer = std::unique_ptr; - - // Reuse base class's ctors - using my_base_type::my_base_type; - - // ------------------------------------------------------------------------- - // -- Ctor - // ------------------------------------------------------------------------- - - /** @brief Creates a new Eigen allocator tied to the runtime @p rv. - * - * This ctor simply dispatches to the base class's ctor with the same - * signature. See the base class's description for more detail. - * - * @param[in] rv The runtime to use for allocating. - * - * @throw None No throw guarantee. - */ - explicit Eigen(runtime_view_type rv) : my_base_type(std::move(rv)) {} - - /** @brief Determines if @p buffer can be rebound as an Eigen buffer. - * - * Rebinding a buffer allows the same memory to be viewed as a (possibly) - * different type of buffer. - * - * @param[in] buffer The tensor we are attempting to rebind. - * - * @return True if @p buffer can be rebound to the type of buffer - * associated with this allocator and false otherwise. - * - * @throw None No throw guarantee - */ - static bool can_rebind(const_buffer_base_reference buffer); - - /** @brief Rebinds a buffer to the same type as *this. - * - * This method will convert @p buffer into a buffer which could have been - * allocated by *this. If @p buffer was allocated as such a buffer already, - * then this method is simply a downcast. - * - * @param[in] buffer The buffer to rebind. - * - * @return A mutable reference to @p buffer viewed as a buffer that could - * have been allocated by *this. - * - * @throw std::runtime_error if can_rebind(buffer) is false. Strong throw - * guarantee. - */ - static eigen_buffer_reference rebind(buffer_base_reference buffer); - - /** @brief Rebinds a buffer to the same type as *this. - * - * This method is the same as the non-const version except that the result - * is read-only. See the description for the non-const version for more - * details. - * - * @param[in] buffer The buffer to rebind. - * - * @return A read-only reference to @p buffer viewed as if it was - * allocated by *this. - * - * @throw std::runtime_error if can_rebind(buffer) is false. Strong throw - * guarantee. - */ - static const_eigen_buffer_reference rebind( - const_buffer_base_reference buffer); - - static base_pointer make_eigen_allocator(unsigned int rank, - runtime_view_type rv); - -protected: - /** @brief Polymorphic allocation of a new buffer. - * - * This method overrides the polymorphic allocation so that it creates a - * new Eigen buffer. - */ - buffer_base_pointer allocate_(layout_pointer playout) override; - - contiguous_pointer construct_(rank0_il il) override; - contiguous_pointer construct_(rank1_il il) override; - contiguous_pointer construct_(rank2_il il) override; - contiguous_pointer construct_(rank3_il il) override; - contiguous_pointer construct_(rank4_il il) override; - - contiguous_pointer construct_(layout_pointer playout, - element_type value) override; - - /// Implements clone by calling copy ctor - base_pointer clone_() const override { - return std::make_unique(*this); - } - - /// Implements are_equal, by deferring to the base's operator== - bool are_equal_(const_base_reference rhs) const noexcept override { - return my_base_type::template are_equal_impl_(rhs); - } - -private: - template - contiguous_pointer il_construct_(ILType il); -}; - -// ----------------------------------------------------------------------------- -// -- Explicit class template declarations -// ----------------------------------------------------------------------------- - -#define DECLARE_EIGEN_ALLOCATOR(TYPE) extern template class Eigen - -TW_APPLY_FLOATING_POINT_TYPES(DECLARE_EIGEN_ALLOCATOR); - -#undef DECLARE_EIGEN_ALLOCATOR - -} // namespace tensorwrapper::allocator diff --git a/include/tensorwrapper/allocator/local.hpp b/include/tensorwrapper/allocator/local.hpp deleted file mode 100644 index c9a82118..00000000 --- a/include/tensorwrapper/allocator/local.hpp +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2024 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include - -namespace tensorwrapper::allocator { - -/** @brief Can create buffers that exist entirely in local memory. - * - * This class is presently a stub that will be filled in later, as needed. - */ -class Local : public AllocatorBase { -private: - /// Type *this inherits from - using my_base_type = AllocatorBase; - -public: - // Pull in base's ctors - using my_base_type::my_base_type; -}; - -} // namespace tensorwrapper::allocator diff --git a/include/tensorwrapper/allocator/replicated.hpp b/include/tensorwrapper/allocator/replicated.hpp deleted file mode 100644 index be537aae..00000000 --- a/include/tensorwrapper/allocator/replicated.hpp +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2024 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include - -namespace tensorwrapper::allocator { - -/** @brief Can create buffers that exist entirely in local memory and are - * guaranteed to be the same for all processes. - * - * This class is presently a stub that will be filled in later, as needed. - */ -class Replicated : public Local { -private: - /// Type *this inherits from - using my_base_type = Local; - -public: - // Pull in base's ctors - using my_base_type::my_base_type; -}; - -} // namespace tensorwrapper::allocator diff --git a/include/tensorwrapper/buffer/buffer.hpp b/include/tensorwrapper/buffer/buffer.hpp index 93bde979..980b69c5 100644 --- a/include/tensorwrapper/buffer/buffer.hpp +++ b/include/tensorwrapper/buffer/buffer.hpp @@ -16,7 +16,7 @@ #pragma once #include -#include +#include #include #include diff --git a/include/tensorwrapper/buffer/buffer_base.hpp b/include/tensorwrapper/buffer/buffer_base.hpp index ad46d093..9926c1e6 100644 --- a/include/tensorwrapper/buffer/buffer_base.hpp +++ b/include/tensorwrapper/buffer/buffer_base.hpp @@ -15,11 +15,10 @@ */ #pragma once -#include #include #include #include -#include +#include #include namespace tensorwrapper::buffer { @@ -68,18 +67,6 @@ class BufferBase : public tensorwrapper::detail_::PolymorphicBase, /// Type of a pointer to the layout using layout_pointer = std::unique_ptr; - /// Type all allocators inherit from - using allocator_base_type = allocator::AllocatorBase; - - /// Type of a pointer to an allocator_base_type object - using allocator_base_pointer = std::unique_ptr; - - /// Type of a mutable reference to an allocator_base_type - using allocator_base_reference = allocator_base_type&; - - /// Type of a read-only reference to an allocator_base_type - using const_allocator_reference = const allocator_base_type&; - /// Type used to represent the tensor's rank using rank_type = typename layout_type::size_type; @@ -98,18 +85,6 @@ class BufferBase : public tensorwrapper::detail_::PolymorphicBase, */ bool has_layout() const noexcept { return static_cast(m_layout_); } - /** @brief Does *this have an allocator? - * - * Default constructed or moved from BufferBase objects will not have - * allocators. This method is used to determine if *this has an allocator - * or not. - * - * @throw None No throw guarantee. - */ - bool has_allocator() const noexcept { - return static_cast(m_allocator_); - } - /** @brief Retrieves the layout of *this. * * This method can be used to retrieve the layout associated with *this, @@ -126,38 +101,6 @@ class BufferBase : public tensorwrapper::detail_::PolymorphicBase, return *m_layout_; } - /** @brief Retrieves the allocator of *this. - * - * This method can be used to retrieve the allocator used to allocate - * *this, assuming *this was provided an allocator. See has_allocator for - * determining if *this has an allocator or not. - * - * @return A mutable reference to the allocator. - * - * @throw std::runtime_error if *this does not have an allocator. Strong - * throw guarantee. - */ - allocator_base_reference allocator() { - assert_layout_(); - return *m_allocator_; - } - - /** @brief Retrieves the allocator of *this. - * - * This method can be used to retrieve the allocator used to allocate - * *this, assuming *this was provided an allocator. See has_allocator for - * determining if *this has an allocator or not. - * - * @return A read-only reference to the allocator. - * - * @throw std::runtime_error if *this does not have an allocator. Strong - * throw guarantee. - */ - const_allocator_reference allocator() const { - assert_layout_(); - return *m_allocator_; - } - rank_type rank() const noexcept { return has_layout() ? layout().rank() : 0; } @@ -180,11 +123,8 @@ class BufferBase : public tensorwrapper::detail_::PolymorphicBase, */ bool operator==(const BufferBase& rhs) const noexcept { if(has_layout() != rhs.has_layout()) return false; - if(has_allocator() != rhs.has_allocator()) return false; if(has_layout() && m_layout_->are_different(*rhs.m_layout_)) return false; - if(has_allocator() && m_allocator_->are_different(*rhs.m_allocator_)) - return false; return true; } @@ -204,6 +144,10 @@ class BufferBase : public tensorwrapper::detail_::PolymorphicBase, return !(*this == rhs); } + bool approximately_equal(const BufferBase& rhs, double tol) const { + return approximately_equal_(rhs, tol); + } + protected: // ------------------------------------------------------------------------- // -- Ctors, assignment @@ -217,7 +161,7 @@ class BufferBase : public tensorwrapper::detail_::PolymorphicBase, * * @throw None No throw guarantee. */ - BufferBase() : BufferBase(nullptr, nullptr) {} + BufferBase() : BufferBase(nullptr) {} /** @brief Creates a buffer initialized with a copy of @p layout. * @@ -226,9 +170,8 @@ class BufferBase : public tensorwrapper::detail_::PolymorphicBase, * @throw std::bad_alloc if there is a problem allocating the copy of * @p layout. Strong throw guarantee. */ - explicit BufferBase(const_layout_reference layout, - const_allocator_reference allocator) : - BufferBase(layout.clone_as(), allocator.clone()) {} + explicit BufferBase(const_layout_reference layout) : + BufferBase(layout.clone_as()) {} /** @brief Creates a buffer which owns the layout pointed to by @p playout. * @@ -237,9 +180,8 @@ class BufferBase : public tensorwrapper::detail_::PolymorphicBase, * @throw None No throw guarantee. */ - explicit BufferBase(layout_pointer playout, - allocator_base_pointer pallocator) noexcept : - m_layout_(std::move(playout)), m_allocator_(std::move(pallocator)) {} + explicit BufferBase(layout_pointer playout) noexcept : + m_layout_(std::move(playout)) {} /** @brief Creates a buffer by deep copying @p other. * @@ -250,9 +192,7 @@ class BufferBase : public tensorwrapper::detail_::PolymorphicBase, */ BufferBase(const BufferBase& other) : m_layout_(other.m_layout_ ? other.m_layout_->clone_as() : - nullptr), - m_allocator_(other.m_allocator_ ? other.m_allocator_->clone() : nullptr) { - } + nullptr) {} /** @brief Replaces the state in *this with a deep copy of the state in * @p rhs. @@ -269,10 +209,8 @@ class BufferBase : public tensorwrapper::detail_::PolymorphicBase, auto temp_layout = rhs.has_layout() ? rhs.m_layout_->clone_as() : nullptr; - auto temp_allocator = - rhs.has_allocator() ? rhs.m_allocator_->clone() : nullptr; + temp_layout.swap(m_layout_); - temp_allocator.swap(m_allocator_); } return *this; } @@ -292,6 +230,9 @@ class BufferBase : public tensorwrapper::detail_::PolymorphicBase, dsl_reference permute_assignment_(label_type this_labels, const_labeled_reference rhs) override; + virtual bool approximately_equal_(const BufferBase& rhs, + double tol) const = 0; + private: template dsl_reference binary_op_common_(FxnType&& fxn, label_type this_labels, @@ -305,18 +246,8 @@ class BufferBase : public tensorwrapper::detail_::PolymorphicBase, "Buffer has no layout. Was it default initialized?"); } - /// Throws std::runtime_error when there is no allocator - void assert_allocator_() const { - if(has_allocator()) return; - throw std::runtime_error( - "Buffer has no allocator. Was it default initialized?"); - } - /// The layout of *this layout_pointer m_layout_; - - /// The allocator of *this - allocator_base_pointer m_allocator_; }; } // namespace tensorwrapper::buffer diff --git a/include/tensorwrapper/buffer/buffer_fwd.hpp b/include/tensorwrapper/buffer/buffer_fwd.hpp index 940e3b2b..98f41eea 100644 --- a/include/tensorwrapper/buffer/buffer_fwd.hpp +++ b/include/tensorwrapper/buffer/buffer_fwd.hpp @@ -20,12 +20,8 @@ namespace tensorwrapper::buffer { class BufferBase; -template class Contiguous; -template -class Eigen; - class Local; class Replicated; diff --git a/include/tensorwrapper/buffer/contiguous.hpp b/include/tensorwrapper/buffer/contiguous.hpp index ed09aebb..8ab50205 100644 --- a/include/tensorwrapper/buffer/contiguous.hpp +++ b/include/tensorwrapper/buffer/contiguous.hpp @@ -16,196 +16,385 @@ #pragma once #include -#include +#include +#include +#include #include namespace tensorwrapper::buffer { -/** @brief Denotes that a buffer is held contiguously. +/** @brief A multidimensional (MD) contiguous buffer. * - * Contiguous buffers are such that given a pointer to the first element `p`, - * the `i`-th element (`i` is zero based) is given by dereferencing the - * pointer `p + i`. Note that contiguous buffers are always vectors and storing - * higher rank tensors in a contiguous buffer requires "vectorization" of the - * tensor. In C++ vectorization is usually done in row-major format. - * - * @tparam FloatType the type of elements in the buffer. + * This class is a dense multidimensional buffer of contiguous floating-point + * values. */ -template class Contiguous : public Replicated { private: /// Type *this derives from using my_base_type = Replicated; -public: - /// Type of each element - using element_type = FloatType; - - /// Type of a mutable reference to an object of type element_type - using reference = element_type&; + /// Type defining the types for the public API of *this + using traits_type = types::ClassTraits; - /// Type of a read-only reference to an object of type element_type - using const_reference = const element_type&; + /// Type of *this + using my_type = Contiguous; - using element_vector = std::vector; +public: + /// Add types from traits_type to public API + ///@{ + using value_type = typename traits_type::value_type; + using reference = typename traits_type::reference; + using const_reference = typename traits_type::const_reference; + using buffer_type = typename traits_type::buffer_type; + using buffer_view = typename traits_type::buffer_view; + using const_buffer_view = typename traits_type::const_buffer_view; + using rank_type = typename traits_type::rank_type; + using shape_type = typename traits_type::shape_type; + using const_shape_view = typename traits_type::const_shape_view; + using size_type = typename traits_type::size_type; + ///@} + + /// Type of an offset vector + using index_vector = std::vector; - /// Type of a pointer to a mutable element_type object - using pointer = element_type*; + /// Type of the object used to annotate modes + using typename my_base_type::label_type; + using string_type = std::string; - /// Type of a pointer to a read-only element_type object - using const_pointer = const element_type*; + // ------------------------------------------------------------------------- + // -- Ctors, assignment, and dtor + // ------------------------------------------------------------------------- - /// Type used for offsets and indexing - using size_type = std::size_t; + /** @brief Creates an empty multi-dimensional buffer. + * + * The resulting buffer will have a shape of rank 0, but a size of 0. Thus + * the buffer can NOT be used to store any elements (including treating + * *this as a scalar). The resulting buffer can be assigned to or moved + * to to populate it. + * + * @throw None No throw guarantee. + */ + Contiguous() noexcept; - /// Type of a multi-dimensional index - using index_vector = std::vector; + /** @brief Treats allocated memory like a multi-dimensional buffer. + * + * @tparam T The type of the elements in the buffer. Must satisfy the + * FloatingPoint concept. + * + * This ctor will use @p element to create a buffer_type object and then + * pass that along with @p shape to the main ctor. + * + * @param[in] elements The elements to be used as the backing store. + * @param[in] shape The shape of *this. + * + * @throw std::invalid_argument if the size of @p elements does not match + * the size implied by @p shape. Strong throw + * guarantee. + * @throw std::bad_alloc if there is a problem allocating memory for the + * internal state. Strong throw guarantee. + */ + template + Contiguous(std::vector elements, shape_type shape) : + Contiguous(buffer_type(std::move(elements)), std::move(shape)) {} - // Pull in base's ctors - using my_base_type::my_base_type; + /** @brief The main ctor. + * + * This ctor will create *this using @p buffer as the backing store and + * @p shape to describe the geometry of the multidimensional array. + * + * All other ctors (aside from copy and move) delegate to this one. + * + * @param[in] buffer The buffer to be used as the backing store. + * @param[in] shape The shape of *this. + * + * @throw std::invalid_argument if the size of @p buffer does not match + * the size implied by @p shape. Strong throw + * guarantee. + * @throw std::bad_alloc if there is a problem allocating memory for the + * internal state. Strong throw guarantee. + */ + Contiguous(buffer_type buffer, shape_type shape); - /// Returns the number of elements in contiguous memory - size_type size() const noexcept { return size_(); } + /** @brief Initializes *this to a deep copy of @p other. + * + * This ctor will initialize *this to be a deep copy of @p other. + * + * @param[in] other The Contiguous to copy. + * + * @throw std::bad_alloc if there is a problem allocating memory for the + * internal state. Strong throw guarantee. + */ + Contiguous(const Contiguous& other) = default; - /** @brief Returns a mutable pointer to the first element in contiguous - * memory + /** @brief Move ctor. * - * @warning Returning a mutable pointer to the underlying data makes it - * no longer possible for *this to reliably track changes to that - * data. Calling this method may have performance implications, so - * use only when strictly required. + * This ctor will initialize *this by taking the state from @p other. + * After this ctor is called @p other is left in a valid but unspecified + * state. * - * @return A read/write pointer to the data. + * @param[in,out] other The Contiguous to move from. * * @throw None No throw guarantee. */ - pointer get_mutable_data() noexcept { return get_mutable_data_(); } + Contiguous(Contiguous&& other) noexcept = default; - /** @brief Returns an immutable pointer to the first element in contiguous - * memory + /** @brief Copy assignment. + * + * This operator will make *this a deep copy of @p other. + * + * @param[in] other The Contiguous to copy. + * + * @return *this after the assignment. + * + * @throw std::bad_alloc if there is a problem allocating memory for the + * internal state. Strong throw guarantee. + */ + Contiguous& operator=(const Contiguous& other) = default; + + /** @brief Move assignment. + * + * This operator will make *this take the state from @p other. After + * this operator is called @p other is left in a valid but unspecified + * state. + * + * @param[in,out] other The Contiguous to move from. * - * @return A read-only pointer to the data. + * @return *this after the assignment. * * @throw None No throw guarantee. */ - const_pointer get_immutable_data() const noexcept { - return get_immutable_data_(); - } + Contiguous& operator=(Contiguous&& other) noexcept = default; - /** @brief Retrieves a tensor element by offset. + /** @brief Defaulted dtor. * - * This method is used to access the element in an immutable way. + * @throw None No throw guarantee. + */ + ~Contiguous() override = default; + + // ------------------------------------------------------------------------- + // -- State Accessors + // ------------------------------------------------------------------------- + + /** @brief Returns (a view of) the shape of *this. * - * @param[in] index The offset of the element being retrieved. + * The shape of *this describes the geometry of the underlying + * multidimensional array. * - * @return A read-only reference to the element. + * @return A view of the shape of *this. * - * @throw std::runtime_error if the number of indices does not match the - * rank of the tensor. Strong throw guarantee. + * @throw std::bad_alloc if there is a problem allocating memory for the + * returned view. Strong throw guarantee. */ - const_reference get_elem(index_vector index) const { - if(index.size() != this->rank()) - throw std::runtime_error("Number of offsets must match rank"); - return get_elem_(index); - } + const_shape_view shape() const; - /** @brief Sets a tensor element by offset. + /** @brief The total number of elements in *this. * - * This method is used to change the value of an element. + * The total number of elements is the product of the extents of each + * mode of *this. * - * @param[in] index The offset of the element being updated. - * @param[in] new_value The new value of the element. + * @return The total number of elements in *this. * - * @throw std::runtime_error if the number of indices does not match the - * rank of the tensor. Strong throw guarantee. + * @throw None No throw guarantee. */ - void set_elem(index_vector index, element_type new_value) { - if(index.size() != this->rank()) - throw std::runtime_error("Number of offsets must match rank"); - return set_elem_(index, new_value); - } + size_type size() const noexcept; - /** @brief Retrieves a tensor element by ordinal offset. + /** @brief Returns the element with the offsets specified by @p index. * - * This method is used to access the element in an immutable way. + * This method will retrieve a const reference to the element at the + * offsets specified by @p index. The length of @p index must be equal + * to the rank of *this and each entry in @p index must be less than the + * extent of the corresponding mode of *this. * - * @param[in] index The ordinal offset of the element being retrieved. + * This method can only be used to retrieve elements from *this. To modify + * elements use set_elem(). * - * @return A read-only reference to the element. + * @param[in] index The offsets into each mode of *this for the desired + * element. * - * @throw std::runtime_error if the index is greater than the number of - * elements. Strong throw guarantee. + * @return A const reference to the element at the specified offsets. */ - const_reference get_data(size_type index) const { - if(index >= this->size()) - throw std::runtime_error("Index greater than number of elements"); - return get_data_(std::move(index)); - } + const_reference get_elem(index_vector index) const; - /** @brief Sets a tensor element by ordinal offset. + /** @brief Sets the specified element to @p new_value. * - * This method is used to change the value of an element. + * This method will set the element at the offsets specified by @p index. + * The length of @p index must be equal to the rank of *this and each + * entry in @p index must be less than the extent of the corresponding + * mode of *this. * - * @param[in] index The ordinal offset of the element being updated. - * @param[in] new_value The new value of the element. + * @param[in] index The offsets into each mode of *this for the desired + * element. + * @param[in] new_value The new value for the specified element. * - * @throw std::runtime_error if the index is greater than the number of - * elements. Strong throw guarantee. + * @throw std::out_of_range if any entry in @p index is invalid. Strong + * throw guarantee. */ - void set_data(size_type index, element_type new_value) { - if(index >= this->size()) - throw std::runtime_error("Index greater than number of elements"); - set_data_(index, new_value); - } + void set_elem(index_vector index, value_type new_value); - /** @brief Sets all elements to a value. + /** @brief Returns a view of the data. * - * @param[in] value The new value of all elements. + */ + buffer_view get_mutable_data(); + + /** @brief Returns a read-only view of the data. * - * @throw None No throw guarantee. */ - void fill(element_type value) { fill_(std::move(value)); } + const_buffer_view get_immutable_data() const; + + value_type infinity_norm() const; - /** @brief Sets elements using a list of values. + // ------------------------------------------------------------------------- + // -- Utility Methods + // ------------------------------------------------------------------------- + + /** @brief Compares two Contiguous objects for exact equality. + * + * Two Contiguous objects are exactly equal if they have the same shape and + * if all of their corresponding elements are bitwise identical. + * In practice, the implementation stores a hash of the elements in the + * tensor and compares the hashes for equality rather than checking each + * element individually. * - * @param[in] values The new values of all elements. + * @param[in] rhs The Contiguous to compare against. + * + * @return True if *this and @p rhs are exactly equal and false otherwise. * * @throw None No throw guarantee. */ - void copy(const element_vector& values) { copy_(values); } + bool operator==(const my_type& rhs) const noexcept; protected: - /// Derived class can override if it likes - virtual size_type size_() const noexcept { return layout().shape().size(); } + /// Makes a deep polymorphic copy of *this + buffer_base_pointer clone_() const override; - /// Derived class should implement according to data() description - virtual pointer get_mutable_data_() noexcept = 0; + /// Implements are_equal by checking that rhs is an Contiguous and then + /// calling operator== + bool are_equal_(const_buffer_base_reference rhs) const noexcept override; - /// Derived class should implement according to data() const description - virtual const_pointer get_immutable_data_() const noexcept = 0; + dsl_reference addition_assignment_(label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) override; + dsl_reference subtraction_assignment_(label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) override; + dsl_reference multiplication_assignment_( + label_type this_labels, const_labeled_reference lhs, + const_labeled_reference rhs) override; - /// Derived class should implement according to get_elem() - virtual const_reference get_elem_(index_vector index) const = 0; + dsl_reference permute_assignment_(label_type this_labels, + const_labeled_reference rhs) override; - /// Derived class should implement according to set_elem() - virtual void set_elem_(index_vector index, element_type new_value) = 0; + dsl_reference scalar_multiplication_(label_type this_labels, double scalar, + const_labeled_reference rhs) override; - /// Derived class should implement according to get_data() - virtual const_reference get_data_(size_type index) const = 0; + bool approximately_equal_(const_buffer_base_reference rhs, + double tol) const override; - /// Derived class should implement according to set_data() - virtual void set_data_(size_type index, element_type new_value) = 0; + /// Calls add_to_stream_ on a stringstream to implement + string_type to_string_() const override; - /// Derived class should implement according to fill() - virtual void fill_(element_type) = 0; + /// Uses Eigen's printing capabilities to add to stream + std::ostream& add_to_stream_(std::ostream& os) const override; - virtual void copy_(const element_vector& values) = 0; -}; +private: + /// Type for storing the hash of *this + using hash_type = std::size_t; + + /// Logic for validating that an index is within the bounds of the shape + void check_index_(const index_vector& index) const; + + /// Converts a coordinate index to a linear (ordinal) index + size_type coordinate_to_ordinal_(index_vector index) const; + + /// Returns the hash for the current state of *this, computing first if + /// needed. + hash_type get_hash_() const { + if(m_recalculate_hash_ or !m_hash_caching_) update_hash_(); + return m_hash_; + } + + /// Computes the hash for the current state of *this + void update_hash_() const; -#define DECLARE_CONTIG_BUFFER(TYPE) extern template class Contiguous + /// Designates that the state may have changed and to recalculate the hash. + /// This function is really just for readability and clarity. + void mark_for_rehash_() const { m_recalculate_hash_ = true; } -TW_APPLY_FLOATING_POINT_TYPES(DECLARE_CONTIG_BUFFER); + /// Designates that state changes are not trackable and we should + /// recalculate the hash each time. + void turn_off_hash_caching_() const { m_hash_caching_ = false; } -#undef DECLARE_CONTIG_BUFFER + /// Tracks whether the hash needs to be redetermined + mutable bool m_recalculate_hash_ = true; + + /// Tracks whether hash caching has been turned off + mutable bool m_hash_caching_ = true; + + /// Holds the computed hash value for this instance's state + mutable hash_type m_hash_ = 0; + + /// How the hyper-rectangular array is shaped + shape_type m_shape_; + + /// The flat buffer holding the elements of *this + buffer_type m_buffer_; +}; + +template +decltype(auto) visit_contiguous_buffer(KernelType&& kernel, + buffer::Contiguous& buffer) { + using fp_types = types::floating_point_types; + auto wtf_buffer = buffer.get_mutable_data(); + return wtf::buffer::visit_contiguous_buffer_view( + std::forward(kernel), wtf_buffer); +} + +template +decltype(auto) visit_contiguous_buffer(KernelType&& kernel, + const buffer::Contiguous& buffer) { + using fp_types = types::floating_point_types; + auto wtf_buffer = buffer.get_immutable_data(); + return wtf::buffer::visit_contiguous_buffer_view( + std::forward(kernel), wtf_buffer); +} + +template +Contiguous make_contiguous(const shape::ShapeBase& shape) { + auto smooth_view = shape.as_smooth(); + using size_type = typename decltype(smooth_view)::size_type; + std::vector extents(smooth_view.rank()); + for(size_type i = 0; i < smooth_view.rank(); ++i) + extents[i] = smooth_view.extent(i); + shape::Smooth smooth_shape(extents.begin(), extents.end()); + std::vector elements(smooth_view.size(), + static_cast(0)); // Initialize to zeroes + return Contiguous(std::move(elements), std::move(smooth_shape)); +} + +inline Contiguous& make_contiguous(buffer::BufferBase& buffer) { + auto* pcontiguous = dynamic_cast(&buffer); + if(pcontiguous == nullptr) + throw std::runtime_error( + "make_contiguous: buffer is not a Contiguous buffer"); + return *pcontiguous; +} + +inline const Contiguous& make_contiguous(const buffer::BufferBase& buffer) { + const auto* pcontiguous = dynamic_cast(&buffer); + if(pcontiguous == nullptr) + throw std::runtime_error( + "make_contiguous: buffer is not a Contiguous buffer"); + return *pcontiguous; +} + +/** @brief Makes a new Contiguous buffer using @p buffer as a guide. + * + * This function is used to create a new buffer using @p buffer as a type hint. + * More specifically, this function will create a default initialized + * Contiguous buffer whose shape is given by @p shape. The type of the elements + * is taken from the type of the elements in @p buffer. + */ +Contiguous make_contiguous(const buffer::BufferBase& buffer, + const shape::ShapeBase& shape); } // namespace tensorwrapper::buffer diff --git a/include/tensorwrapper/buffer/eigen.hpp b/include/tensorwrapper/buffer/eigen.hpp deleted file mode 100644 index d41e384e..00000000 --- a/include/tensorwrapper/buffer/eigen.hpp +++ /dev/null @@ -1,303 +0,0 @@ -/* - * Copyright 2024 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include -#include - -namespace tensorwrapper::buffer { -namespace detail_ { -template -class EigenPIMPL; - -} - -/** @brief A buffer which wraps an Eigen object. - * - * @tparam FloatType The type used to store the elements of the object. - * - * Right now the backend is always an Eigen Tensor, but concievably it could - * be generalized to be matrices or Eigen's map class. - */ -template -class Eigen : public Contiguous { -private: - /// Type of *this - using my_type = Eigen; - - /// Type *this derives from - using my_base_type = Contiguous; - -public: - /// Pull in base class's types - using typename my_base_type::allocator_base_pointer; - using typename my_base_type::buffer_base_pointer; - using typename my_base_type::const_allocator_reference; - using typename my_base_type::const_buffer_base_reference; - using typename my_base_type::const_labeled_reference; - using typename my_base_type::const_layout_reference; - using typename my_base_type::const_pointer; - using typename my_base_type::const_reference; - using typename my_base_type::dsl_reference; - using typename my_base_type::element_type; - using typename my_base_type::element_vector; - using typename my_base_type::index_vector; - using typename my_base_type::label_type; - using typename my_base_type::layout_pointer; - using typename my_base_type::layout_type; - using typename my_base_type::pointer; - using typename my_base_type::polymorphic_base; - using typename my_base_type::reference; - using typename my_base_type::size_type; - - using pimpl_type = detail_::EigenPIMPL; - using pimpl_pointer = std::unique_ptr; - using pimpl_reference = pimpl_type&; - using const_pimpl_reference = const pimpl_type&; - - /** @brief Creates a buffer with no layout and a default initialized - * tensor. - * - * @throw None No throw guarantee. - */ - Eigen() noexcept; - - /** @brief Wraps the provided tensor. - * - * @tparam DataType The type of the input tensor. Must be implicitly - * convertible to an object of type data_type. - * - * @param[in] t The tensor to wrap. - * @param[in] layout The physical layout of @p t. - * - * @throw std::bad_alloc if there is a problem copying @p layout. Strong - * throw guarantee. - */ - Eigen(pimpl_pointer pimpl, const_layout_reference layout, - const_allocator_reference allocator) : - Eigen(std::move(pimpl), layout.template clone_as(), - allocator.clone()) {} - - Eigen(pimpl_pointer pimpl, layout_pointer playout, - allocator_base_pointer pallocator); - - /** @brief Initializes *this with a copy of @p other. - * - * @param[in] other The object to copy. - * - * @throw std::bad_alloc if there is a problem allocating the copy. Strong - * throw guarantee. - */ - Eigen(const Eigen& other); - - /** @brief Initializes *this with the state from @p other. - * - * @param[in,out] other The object to take the state from. After this call - * @p other will be in a valid, but otherwise - * undefined state. - * - * @throw None No throw guarantee. - */ - Eigen(Eigen&& other) noexcept; - - /** @brief Replaces the state in *this with a copy of the state in @p rhs. - * - * @param[in] rhs The object to copy the state from. - * - * @return *this after replacing its state with a copy of @p rhs. - * - * @throw std::bad_alloc if the copy fails to allocate memory. Strong - * throw guarantee. - */ - Eigen& operator=(const Eigen& rhs); - - /** @brief Replaces the state in *this with the state in @p rhs. - * - * @param[in,out] rhs The Eigen object to take the state from. After this - * method is called @p rhs will be in a valid, but - * otherwise undefined state. - * - * @return *this after taking the state from @p rhs. - * - * @throw None No throw guarantee. - */ - Eigen& operator=(Eigen&& rhs) noexcept; - - /// Defaulted no throw dtor - ~Eigen() noexcept; - - // ------------------------------------------------------------------------- - // -- Utility methods - // ------------------------------------------------------------------------- - - /** @brief Exchanges the contents of *this with @p other. - * - * @param[in,out] other The buffer to swap state with. - * - * @throw None No throw guarantee. - */ - void swap(Eigen& other) noexcept; - - /** @brief Is *this value equal to @p rhs? - * - * Two Eigen objects are value equal if they both have the same layout and - * they both have the same values. - * - * @note For tensors where the @p FloatType is an uncertain floating point - * number, the tensors are required to have the same sources of - * uncertainty. - * - * @param[in] rhs The object to compare against. - * - * @return True if *this is value equal to @p rhs and false otherwise. - * - * @throw None No throw guarantee. - */ - bool operator==(const Eigen& rhs) const noexcept; - - /** @brief Is *this different from @p rhs? - * - * This class defines different as not value equal. See operator== for the - * definition of value equal. - * - * @param[in] rhs The object to compare *this to. - * - * @return False if *this is value equal to @p rhs and true otherwise. - * - * @throw None No throw guarantee. - */ - bool operator!=(const Eigen& rhs) const noexcept { return !(*this == rhs); } - -protected: - /// Implements clone by calling copy ctor - buffer_base_pointer clone_() const override; - - /// Implements are_equal by calling are_equal_impl_ - bool are_equal_(const_buffer_base_reference rhs) const noexcept override; - - /// Implements addition_assignment by calling addition_assignment on state - dsl_reference addition_assignment_(label_type this_labels, - const_labeled_reference lhs, - const_labeled_reference rhs) override; - - /// Calls subtraction_assignment on each member - dsl_reference subtraction_assignment_(label_type this_labels, - const_labeled_reference lhs, - const_labeled_reference rhs) override; - - /// Calls multiplication_assignment on each member - dsl_reference multiplication_assignment_( - label_type this_labels, const_labeled_reference lhs, - const_labeled_reference rhs) override; - - /// Calls permute_assignment on each member - dsl_reference permute_assignment_(label_type this_labels, - const_labeled_reference rhs) override; - - /// Scales *this by @p scalar - dsl_reference scalar_multiplication_(label_type this_labels, double scalar, - const_labeled_reference rhs) override; - - /// Implements getting the raw pointer - pointer get_mutable_data_() noexcept override; - - /// Implements getting the raw pointer (read-only) - const_pointer get_immutable_data_() const noexcept override; - - /// Implements read-only element access - const_reference get_elem_(index_vector index) const override; - - // Implements element updating - void set_elem_(index_vector index, element_type new_value) override; - - /// Implements read-only element access by ordinal index - const_reference get_data_(size_type index) const override; - - // Implements element updating by ordinal index - void set_data_(size_type index, element_type new_value) override; - - /// Implements filling the tensor - void fill_(element_type value) override; - - /// Implements copying new values into the tensor - void copy_(const element_vector& values) override; - - /// Implements to_string - typename polymorphic_base::string_type to_string_() const override; - - /// Implements add_to_stream - std::ostream& add_to_stream_(std::ostream& os) const override; - -private: - /// True if *this has a PIMPL - bool has_pimpl_() const noexcept; - - /// Throws std::runtime_error if *this has no PIMPL - void assert_pimpl_() const; - - /// Asserts *this has a PIMPL then returns it - pimpl_reference pimpl_(); - - /// Assert *this has a PIMPL then returns it - const_pimpl_reference pimpl_() const; - - /// The object actually implementing *this - pimpl_pointer m_pimpl_; -}; - -/** @brief Wraps downcasting a buffer to an Eigen buffer. - * - * @tparam FloatType The type of the elements in the resulting Buffer. - * - * This function is a convience function for using an allocator to convert - * @p b to a buffer::Eigen object. - * - * @param[in] b The BufferBase object to convert. - * - * @return A reference to @p b after downcasting it. - */ -template -Eigen& to_eigen_buffer(BufferBase& b); - -/** @brief Wraps downcasting a buffer to an Eigen buffer. - * - * @tparam FloatType The type of the elements in the resulting Buffer. - * - * This function is the same as the non-const overload except that result will - * be read-only. - * - * @param[in] b The BufferBase object to convert. - * - * @return A reference to @p b after downcasting it. - */ -template -const Eigen& to_eigen_buffer(const BufferBase& b); - -#define DECLARE_EIGEN_BUFFER(TYPE) extern template class Eigen -#define DECLARE_TO_EIGEN_BUFFER(TYPE) \ - extern template Eigen& to_eigen_buffer(BufferBase&) -#define DECLARE_TO_CONST_EIGEN_BUFFER(TYPE) \ - extern template const Eigen& to_eigen_buffer(const BufferBase&) - -TW_APPLY_FLOATING_POINT_TYPES(DECLARE_EIGEN_BUFFER); -TW_APPLY_FLOATING_POINT_TYPES(DECLARE_TO_EIGEN_BUFFER); -TW_APPLY_FLOATING_POINT_TYPES(DECLARE_TO_CONST_EIGEN_BUFFER); - -#undef DECLARE_EIGEN_BUFFER -#undef DECLARE_TO_EIGEN_BUFFER -#undef DECLARE_TO_CONST_EIGEN_BUFFER - -} // namespace tensorwrapper::buffer diff --git a/include/tensorwrapper/buffer/mdbuffer.hpp b/include/tensorwrapper/buffer/mdbuffer.hpp deleted file mode 100644 index ab5e1cc0..00000000 --- a/include/tensorwrapper/buffer/mdbuffer.hpp +++ /dev/null @@ -1,337 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include -#include -#include -#include - -namespace tensorwrapper::buffer { - -/** @brief A multidimensional (MD) buffer. - * - * This class is a dense multidimensional buffer of floating-point values. - */ -class MDBuffer : public Replicated { -private: - /// Type *this derives from - using my_base_type = Replicated; - - /// Type defining the types for the public API of *this - using traits_type = types::ClassTraits; - - /// Type of *this - using my_type = MDBuffer; - -public: - /// Add types from traits_type to public API - ///@{ - using value_type = typename traits_type::value_type; - using reference = typename traits_type::reference; - using const_reference = typename traits_type::const_reference; - using buffer_type = typename traits_type::buffer_type; - using buffer_view = typename traits_type::buffer_view; - using const_buffer_view = typename traits_type::const_buffer_view; - using pimpl_type = typename traits_type::pimpl_type; - using pimpl_pointer = typename traits_type::pimpl_pointer; - using rank_type = typename traits_type::rank_type; - using shape_type = typename traits_type::shape_type; - using const_shape_view = typename traits_type::const_shape_view; - using size_type = typename traits_type::size_type; - ///@} - - using index_vector = std::vector; - using typename my_base_type::label_type; - using string_type = std::string; - - // ------------------------------------------------------------------------- - // -- Ctors, assignment, and dtor - // ------------------------------------------------------------------------- - - /** @brief Creates an empty multi-dimensional buffer. - * - * The resulting buffer will have a shape of rank 0, but a size of 0. Thus - * the buffer can NOT be used to store any elements (including treating - * *this as a scalar). The resulting buffer can be assigned to or moved - * to to populate it. - * - * @throw None No throw guarantee. - */ - MDBuffer() noexcept; - - /** @brief Treats allocated memory like a multi-dimensional buffer. - * - * @tparam T The type of the elements in the buffer. Must satisfy the - * FloatingPoint concept. - * - * This ctor will use @p element to create a buffer_type object and then - * pass that along with @p shape to the main ctor. - * - * @param[in] elements The elements to be used as the backing store. - * @param[in] shape The shape of *this. - * - * @throw std::invalid_argument if the size of @p elements does not match - * the size implied by @p shape. Strong throw - * guarantee. - * @throw std::bad_alloc if there is a problem allocating memory for the - * internal state. Strong throw guarantee. - */ - template - MDBuffer(std::vector elements, shape_type shape) : - MDBuffer(buffer_type(std::move(elements)), std::move(shape)) {} - - /** @brief The main ctor. - * - * This ctor will create *this using @p buffer as the backing store and - * @p shape to describe the geometry of the multidimensional array. - * - * All other ctors (aside from copy and move) delegate to this one. - * - * @param[in] buffer The buffer to be used as the backing store. - * @param[in] shape The shape of *this. - * - * @throw std::invalid_argument if the size of @p buffer does not match - * the size implied by @p shape. Strong throw - * guarantee. - * @throw std::bad_alloc if there is a problem allocating memory for the - * internal state. Strong throw guarantee. - */ - MDBuffer(buffer_type buffer, shape_type shape); - - /** @brief Initializes *this to a deep copy of @p other. - * - * This ctor will initialize *this to be a deep copy of @p other. - * - * @param[in] other The MDBuffer to copy. - * - * @throw std::bad_alloc if there is a problem allocating memory for the - * internal state. Strong throw guarantee. - */ - MDBuffer(const MDBuffer& other) = default; - - /** @brief Move ctor. - * - * This ctor will initialize *this by taking the state from @p other. - * After this ctor is called @p other is left in a valid but unspecified - * state. - * - * @param[in,out] other The MDBuffer to move from. - * - * @throw None No throw guarantee. - */ - MDBuffer(MDBuffer&& other) noexcept = default; - - /** @brief Copy assignment. - * - * This operator will make *this a deep copy of @p other. - * - * @param[in] other The MDBuffer to copy. - * - * @return *this after the assignment. - * - * @throw std::bad_alloc if there is a problem allocating memory for the - * internal state. Strong throw guarantee. - */ - MDBuffer& operator=(const MDBuffer& other) = default; - - /** @brief Move assignment. - * - * This operator will make *this take the state from @p other. After - * this operator is called @p other is left in a valid but unspecified - * state. - * - * @param[in,out] other The MDBuffer to move from. - * - * @return *this after the assignment. - * - * @throw None No throw guarantee. - */ - MDBuffer& operator=(MDBuffer&& other) noexcept = default; - - /** @brief Defaulted dtor. - * - * @throw None No throw guarantee. - */ - ~MDBuffer() override = default; - - // ------------------------------------------------------------------------- - // -- State Accessors - // ------------------------------------------------------------------------- - - /** @brief Returns (a view of) the shape of *this. - * - * The shape of *this describes the geometry of the underlying - * multidimensional array. - * - * @return A view of the shape of *this. - * - * @throw std::bad_alloc if there is a problem allocating memory for the - * returned view. Strong throw guarantee. - */ - const_shape_view shape() const; - - /** @brief The total number of elements in *this. - * - * The total number of elements is the product of the extents of each - * mode of *this. - * - * @return The total number of elements in *this. - * - * @throw None No throw guarantee. - */ - size_type size() const noexcept; - - /** @brief Returns the element with the offsets specified by @p index. - * - * This method will retrieve a const reference to the element at the - * offsets specified by @p index. The length of @p index must be equal - * to the rank of *this and each entry in @p index must be less than the - * extent of the corresponding mode of *this. - * - * This method can only be used to retrieve elements from *this. To modify - * elements use set_elem(). - * - * @param[in] index The offsets into each mode of *this for the desired - * element. - * - * @return A const reference to the element at the specified offsets. - */ - const_reference get_elem(index_vector index) const; - - /** @brief Sets the specified element to @p new_value. - * - * This method will set the element at the offsets specified by @p index. - * The length of @p index must be equal to the rank of *this and each - * entry in @p index must be less than the extent of the corresponding - * mode of *this. - * - * @param[in] index The offsets into each mode of *this for the desired - * element. - * @param[in] new_value The new value for the specified element. - * - * @throw std::out_of_range if any entry in @p index is invalid. Strong - * throw guarantee. - */ - void set_elem(index_vector index, value_type new_value); - - /** @brief Returns a view of the data. - * - * This method is deprecated. Use set_slice instead. - */ - [[deprecated]] buffer_view get_mutable_data(); - - /** @brief Returns a read-only view of the data. - * - * This method is deprecated. Use get_slice instead. - */ - [[deprecated]] const_buffer_view get_immutable_data() const; - - // ------------------------------------------------------------------------- - // -- Utility Methods - // ------------------------------------------------------------------------- - - /** @brief Compares two MDBuffer objects for exact equality. - * - * Two MDBuffer objects are exactly equal if they have the same shape and - * if all of their corresponding elements are bitwise identical. - * In practice, the implementation stores a hash of the elements in the - * tensor and compares the hashes for equality rather than checking each - * element individually. - * - * @param[in] rhs The MDBuffer to compare against. - * - * @return True if *this and @p rhs are exactly equal and false otherwise. - * - * @throw None No throw guarantee. - */ - bool operator==(const my_type& rhs) const noexcept; - -protected: - /// Makes a deep polymorphic copy of *this - buffer_base_pointer clone_() const override; - - /// Implements are_equal by checking that rhs is an MDBuffer and then - /// calling operator== - bool are_equal_(const_buffer_base_reference rhs) const noexcept override; - - dsl_reference addition_assignment_(label_type this_labels, - const_labeled_reference lhs, - const_labeled_reference rhs) override; - dsl_reference subtraction_assignment_(label_type this_labels, - const_labeled_reference lhs, - const_labeled_reference rhs) override; - dsl_reference multiplication_assignment_( - label_type this_labels, const_labeled_reference lhs, - const_labeled_reference rhs) override; - - dsl_reference permute_assignment_(label_type this_labels, - const_labeled_reference rhs) override; - - dsl_reference scalar_multiplication_(label_type this_labels, double scalar, - const_labeled_reference rhs) override; - - /// Calls add_to_stream_ on a stringstream to implement - string_type to_string_() const override; - - /// Uses Eigen's printing capabilities to add to stream - std::ostream& add_to_stream_(std::ostream& os) const override; - -private: - /// Type for storing the hash of *this - using hash_type = std::size_t; - - /// Logic for validating that an index is within the bounds of the shape - void check_index_(const index_vector& index) const; - - /// Converts a coordinate index to a linear (ordinal) index - size_type coordinate_to_ordinal_(index_vector index) const; - - /// Returns the hash for the current state of *this, computing first if - /// needed. - hash_type get_hash_() const { - if(m_recalculate_hash_ or !m_hash_caching_) update_hash_(); - return m_hash_; - } - - /// Computes the hash for the current state of *this - void update_hash_() const; - - /// Designates that the state may have changed and to recalculate the hash. - /// This function is really just for readability and clarity. - void mark_for_rehash_() const { m_recalculate_hash_ = true; } - - /// Designates that state changes are not trackable and we should - /// recalculate the hash each time. - void turn_off_hash_caching_() const { m_hash_caching_ = false; } - - /// Tracks whether the hash needs to be redetermined - mutable bool m_recalculate_hash_ = true; - - /// Tracks whether hash caching has been turned off - mutable bool m_hash_caching_ = true; - - /// Holds the computed hash value for this instance's state - mutable hash_type m_hash_ = 0; - - /// How the hyper-rectangular array is shaped - shape_type m_shape_; - - /// The flat buffer holding the elements of *this - buffer_type m_buffer_; -}; - -} // namespace tensorwrapper::buffer diff --git a/include/tensorwrapper/forward_declarations.hpp b/include/tensorwrapper/forward_declarations.hpp index e030b3a9..fee14413 100644 --- a/include/tensorwrapper/forward_declarations.hpp +++ b/include/tensorwrapper/forward_declarations.hpp @@ -15,18 +15,11 @@ */ #pragma once +#include +#include namespace tensorwrapper { -namespace buffer { -namespace detail_ { -class MDBufferPIMPL; -} - -class MDBuffer; - -} // namespace buffer - namespace shape { template class SmoothView; diff --git a/include/tensorwrapper/tensor/detail_/tensor_input.hpp b/include/tensorwrapper/tensor/detail_/tensor_input.hpp index 744e6437..b953c173 100644 --- a/include/tensorwrapper/tensor/detail_/tensor_input.hpp +++ b/include/tensorwrapper/tensor/detail_/tensor_input.hpp @@ -17,7 +17,6 @@ #pragma once #include #include -#include #include #include #include @@ -90,18 +89,8 @@ struct TensorInput { /// Type of a pointer to an object of type physical_layout_type using physical_layout_pointer = std::unique_ptr; - /// Type all allocators inherit from - using allocator_base = allocator::AllocatorBase; - - /// Type of a read-only reference to an object of type allocator_base - using const_allocator_reference = - typename allocator_base::const_base_reference; - - /// Type of a pointer to an object of type allocator_base - using allocator_pointer = typename allocator_base::base_pointer; - /// Type all buffer object's inherit from - using buffer_base = typename allocator_base::buffer_base_type; + using buffer_base = typename buffer::BufferBase; /// Type of a mutable reference to a buffer_base object using buffer_reference = typename buffer_base::base_reference; @@ -116,7 +105,7 @@ struct TensorInput { using const_buffer_pointer = typename buffer_base::const_base_pointer; /// Type of a view of the runtime - using runtime_view_type = typename allocator_base::runtime_view_type; + using runtime_view_type = parallelzone::runtime::RuntimeView; TensorInput() = default; @@ -192,16 +181,6 @@ struct TensorInput { m_pphysical = std::move(pphysical); } - template - TensorInput(const_allocator_reference alloc, Args&&... args) : - TensorInput(alloc.clone(), std::forward(args)...) {} - - template - TensorInput(allocator_pointer palloc, Args&&... args) : - TensorInput(std::forward(args)...) { - m_palloc = std::move(palloc); - } - template TensorInput(const_buffer_reference buffer, Args&&... args) : TensorInput(buffer.clone(), std::forward(args)...) {} @@ -242,8 +221,6 @@ struct TensorInput { bool has_physical_layout() const noexcept { return m_pphysical != nullptr; } - bool has_allocator() const noexcept { return m_palloc != nullptr; } - bool has_buffer() const noexcept { return m_pbuffer != nullptr; } ///@} @@ -257,8 +234,6 @@ struct TensorInput { physical_layout_pointer m_pphysical; - allocator_pointer m_palloc; - buffer_pointer m_pbuffer; runtime_view_type m_rv; diff --git a/include/tensorwrapper/tensorwrapper.hpp b/include/tensorwrapper/tensorwrapper.hpp index f529a8a2..861b0131 100644 --- a/include/tensorwrapper/tensorwrapper.hpp +++ b/include/tensorwrapper/tensorwrapper.hpp @@ -15,7 +15,6 @@ */ #pragma once -#include #include #include #include diff --git a/include/tensorwrapper/types/mdbuffer_traits.hpp b/include/tensorwrapper/types/contiguous_traits.hpp similarity index 83% rename from include/tensorwrapper/types/mdbuffer_traits.hpp rename to include/tensorwrapper/types/contiguous_traits.hpp index aa60a608..9f4ce64e 100644 --- a/include/tensorwrapper/types/mdbuffer_traits.hpp +++ b/include/tensorwrapper/types/contiguous_traits.hpp @@ -22,7 +22,7 @@ namespace tensorwrapper::types { -struct MDBufferTraitsCommon { +struct ContiguousTraitsCommon { using value_type = wtf::fp::Float; using const_reference = wtf::fp::FloatView; using buffer_type = wtf::buffer::FloatBuffer; @@ -31,13 +31,11 @@ struct MDBufferTraitsCommon { using const_shape_view = shape::SmoothView; using rank_type = typename ClassTraits::rank_type; using size_type = typename ClassTraits::size_type; - using pimpl_type = tensorwrapper::buffer::detail_::MDBufferPIMPL; - using pimpl_pointer = std::unique_ptr; }; template<> -struct ClassTraits - : public MDBufferTraitsCommon { +struct ClassTraits + : public ContiguousTraitsCommon { using reference = wtf::fp::FloatView; using buffer_view = wtf::buffer::BufferView; @@ -45,8 +43,8 @@ struct ClassTraits }; template<> -struct ClassTraits - : public MDBufferTraitsCommon { +struct ClassTraits + : public ContiguousTraitsCommon { using reference = wtf::fp::FloatView; using buffer_view = wtf::buffer::BufferView; }; diff --git a/include/tensorwrapper/utilities/floating_point_dispatch.hpp b/include/tensorwrapper/utilities/floating_point_dispatch.hpp deleted file mode 100644 index cfc6bd4b..00000000 --- a/include/tensorwrapper/utilities/floating_point_dispatch.hpp +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include -#include - -namespace tensorwrapper::utilities { - -/** @brief Wraps the logic needed to work out the floating point type of buffer. - * - * @tparam KernelType Type of a functor. The functor must define a function - * template called `run` that takes one explicit template - * type parameter (will be the floating point type of @p - * buffer) and @p buffer. `run` may take an arbitrary amount - * of additional arguments. - * @tparam BufferType The type of @p buffer. Must be derived from BufferBase. - * May contain cv or reference qualifiers. - * @tparam Args The types of any additional arguments which will be forwarded - * to @p kernel. - * - * @param[in] kernel The functor instance to call `run` on. - * @param[in] buffer The type of the elements in @p buffer will be used to - * dispatch. - * @param[in] args Any additional arguments to forward to @p kernel. - * - * @return Returns whatever @p kernel returns. - * - * @throw std::runtime_error if @p buffer is not derived from - */ -template -decltype(auto) floating_point_dispatch(KernelType&& kernel, BufferType&& buffer, - Args&&... args) { - using buffer_clean = std::decay_t; - using buffer_base = buffer::BufferBase; - constexpr bool is_buffer = std::is_base_of_v; - static_assert(is_buffer); - - using types::udouble; - using types::ufloat; - - if(allocator::Eigen::can_rebind(buffer)) { - return kernel.template run(buffer, std::forward(args)...); - } else if(allocator::Eigen::can_rebind(buffer)) { - return kernel.template run(buffer, std::forward(args)...); - } else if(allocator::Eigen::can_rebind(buffer)) { - return kernel.template run(buffer, std::forward(args)...); - } else if(allocator::Eigen::can_rebind(buffer)) { - return kernel.template run(buffer, - std::forward(args)...); - } else { - throw std::runtime_error("Can't rebind buffer to Contiguous<>"); - } -} - -} // namespace tensorwrapper::utilities diff --git a/include/tensorwrapper/utilities/utilities.hpp b/include/tensorwrapper/utilities/utilities.hpp index 790313d7..39632702 100644 --- a/include/tensorwrapper/utilities/utilities.hpp +++ b/include/tensorwrapper/utilities/utilities.hpp @@ -16,7 +16,6 @@ #pragma once #include -#include #include /// Namespace for helper functions diff --git a/src/python/tensor/export_tensor.cpp b/src/python/tensor/export_tensor.cpp index 2769d609..31877149 100644 --- a/src/python/tensor/export_tensor.cpp +++ b/src/python/tensor/export_tensor.cpp @@ -20,50 +20,98 @@ #include namespace tensorwrapper { +namespace { -template -auto make_buffer_info(buffer::Contiguous& buffer) { - using size_type = std::size_t; - constexpr auto nbytes = sizeof(FloatType); - const auto desc = pybind11::format_descriptor::format(); - const auto rank = buffer.rank(); +template +auto get_desc_() -> decltype(pybind11::format_descriptor::format()) { + if constexpr(std::is_same_v) + return pybind11::format_descriptor::format(); + else if constexpr(std::is_same_v) + return pybind11::format_descriptor::format(); + else if constexpr(std::is_same_v) + return pybind11::format_descriptor::format(); + else + throw std::runtime_error("Unsupported floating point type!"); +} - const auto smooth_shape = buffer.layout().shape().as_smooth(); +struct GetBufferDataKernel { + using size_type = std::size_t; + using shape_type = shape::Smooth; + + GetBufferDataKernel(size_type rank, shape_type& smooth_shape) : + m_rank(rank), m_psmooth_shape(&smooth_shape) {} + + template + pybind11::buffer_info operator()(std::span buffer) { + using clean_type = std::decay_t; + + // We have only tested with doubles at the moment. + if constexpr(!std::is_same_v) + throw std::runtime_error("Expected doubles in the buffer!"); - std::vector shape(rank); - std::vector strides(rank); - for(size_type rank_i = 0; rank_i < rank; ++rank_i) { - shape[rank_i] = smooth_shape.extent(rank_i); - size_type stride_i = 1; - for(size_type mode_i = rank_i + 1; mode_i < rank; ++mode_i) - stride_i *= smooth_shape.extent(mode_i); - strides[rank_i] = stride_i * nbytes; + constexpr auto nbytes = sizeof(clean_type); + + const auto desc = get_desc_(); + const auto rank = m_rank; + + std::vector shape(rank); + std::vector strides(rank); + for(size_type rank_i = 0; rank_i < rank; ++rank_i) { + shape[rank_i] = m_psmooth_shape->extent(rank_i); + size_type stride_i = 1; + for(size_type mode_i = rank_i + 1; mode_i < rank; ++mode_i) + stride_i *= m_psmooth_shape->extent(mode_i); + strides[rank_i] = stride_i * nbytes; + } + auto* ptr = const_cast(buffer.data()); + return pybind11::buffer_info(ptr, nbytes, desc, rank, shape, strides); } - return pybind11::buffer_info(buffer.get_mutable_data(), nbytes, desc, rank, - shape, strides); -} -auto make_tensor(pybind11::buffer b) { - pybind11::buffer_info info = b.request(); - if(info.format != pybind11::format_descriptor::format()) + size_type m_rank; + shape_type* m_psmooth_shape; +}; + +template +Tensor make_tensor_(pybind11::buffer_info& info) { + if(info.format != pybind11::format_descriptor::format()) throw std::runtime_error( - "Incompatible format: expected a double array!"); + "Incompatible format: expected a float array!"); + // Work out physical layout of tensor std::vector dims(info.ndim); for(auto i = 0; i < info.ndim; ++i) { dims[i] = info.shape[i]; } + shape::Smooth shape(dims.begin(), dims.end()); + layout::Physical layout(shape); + + // Fill in Buffer object + auto n_elements = shape.size(); + std::vector data(n_elements); + auto pData = static_cast(info.ptr); + std::copy(pData, pData + n_elements, data.begin()); + auto pBuffer = std::make_unique(data, shape); + + return Tensor(shape, std::move(pBuffer)); +} - parallelzone::runtime::RuntimeView rv = {}; - allocator::Eigen allocator(rv); - shape::Smooth matrix_shape{dims.begin(), dims.end()}; - layout::Physical matrix_layout(matrix_shape); - auto pBuffer = allocator.allocate(matrix_layout); +} // namespace - auto n_elements = std::accumulate(dims.begin(), dims.end(), 1, - std::multiplies()); - auto pData = static_cast(info.ptr); - for(auto i = 0; i < n_elements; ++i) pBuffer->set_data(i, pData[i]); +auto make_buffer_info(buffer::Contiguous& buffer) { + const auto rank = buffer.rank(); + const auto smooth_shape = buffer.layout().shape().as_smooth(); + std::vector extents(rank); + for(std::size_t i = 0; i < rank; ++i) extents[i] = smooth_shape.extent(i); + shape::Smooth shape(extents.begin(), extents.end()); + GetBufferDataKernel kernel(rank, shape); + return buffer::visit_contiguous_buffer(kernel, buffer); +} - return Tensor(matrix_shape, std::move(pBuffer)); +Tensor make_tensor(pybind11::buffer b) { + pybind11::buffer_info info = b.request(); + if(info.format == pybind11::format_descriptor::format()) + return make_tensor_(info); + else + throw std::runtime_error( + "Incompatible format: expected a double array!"); } void export_tensor(py_module_reference m) { @@ -75,9 +123,9 @@ void export_tensor(py_module_reference m) { .def(pybind11::self != pybind11::self) .def("__str__", [](Tensor& self) { return self.to_string(); }) .def_buffer([](Tensor& t) { - auto pbuffer = dynamic_cast*>(&t.buffer()); + auto pbuffer = dynamic_cast(&t.buffer()); if(pbuffer == nullptr) - throw std::runtime_error("Expected buffer to hold doubles"); + throw std::runtime_error("Expected buffer to be contiguous"); return make_buffer_info(*pbuffer); }); } diff --git a/src/tensorwrapper/allocator/allocator_base.cpp b/src/tensorwrapper/allocator/allocator_base.cpp deleted file mode 100644 index 049b76d2..00000000 --- a/src/tensorwrapper/allocator/allocator_base.cpp +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ diff --git a/src/tensorwrapper/allocator/eigen.cpp b/src/tensorwrapper/allocator/eigen.cpp deleted file mode 100644 index d493ecae..00000000 --- a/src/tensorwrapper/allocator/eigen.cpp +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright 2024 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "../buffer/detail_/eigen_tensor.hpp" -#include "../tensor/detail_/il_utils.hpp" -#include -#include -#include -#include - -namespace tensorwrapper::allocator { - -#define TPARAMS template -#define EIGEN Eigen - -TPARAMS -bool EIGEN::can_rebind(const_buffer_base_reference buffer) { - auto pbuffer = dynamic_cast*>(&buffer); - return pbuffer != nullptr; -} - -TPARAMS -typename EIGEN::eigen_buffer_reference EIGEN::rebind( - buffer_base_reference buffer) { - if(can_rebind(buffer)) return static_cast(buffer); - throw std::runtime_error("Can not rebind buffer"); -} - -TPARAMS -typename EIGEN::const_eigen_buffer_reference EIGEN::rebind( - const_buffer_base_reference buffer) { - if(can_rebind(buffer)) - return dynamic_cast(buffer); - throw std::runtime_error("Can not rebind buffer"); -} - -// ----------------------------------------------------------------------------- -// -- Protected methods -// ----------------------------------------------------------------------------- - -#define ALLOCATE(Rank) \ - if(playout->rank() == Rank) { \ - using pimpl_type = buffer::detail_::EigenTensor; \ - auto ppimpl = \ - std::make_unique(playout->shape().as_smooth()); \ - return std::make_unique( \ - std::move(ppimpl), std::move(playout), this->clone()); \ - } - -TPARAMS -typename EIGEN::buffer_base_pointer EIGEN::allocate_(layout_pointer playout) { - using buffer_type = buffer::Eigen; - ALLOCATE(0) - else ALLOCATE(1) else ALLOCATE(2) else ALLOCATE(3) else ALLOCATE(4) else ALLOCATE(5) else ALLOCATE( - 6) else ALLOCATE(7) else ALLOCATE(8) else ALLOCATE(9) else ALLOCATE(10) else { - throw std::runtime_error("Tensors with rank > 10 not supported."); - } -} - -TPARAMS -typename EIGEN::contiguous_pointer EIGEN::construct_(rank0_il il) { - return il_construct_(il); -} - -TPARAMS -typename EIGEN::contiguous_pointer EIGEN::construct_(rank1_il il) { - return il_construct_(il); -} - -TPARAMS -typename EIGEN::contiguous_pointer EIGEN::construct_(rank2_il il) { - return il_construct_(il); -} - -TPARAMS -typename EIGEN::contiguous_pointer EIGEN::construct_(rank3_il il) { - return il_construct_(il); -} - -TPARAMS -typename EIGEN::contiguous_pointer EIGEN::construct_(rank4_il il) { - return il_construct_(il); -} - -TPARAMS -typename EIGEN::contiguous_pointer EIGEN::construct_(layout_pointer playout, - element_type value) { - auto pbuffer = this->allocate(std::move(playout)); - auto& contig_buffer = static_cast&>(*pbuffer); - contig_buffer.fill(value); - return pbuffer; -} - -// -- Private - -TPARAMS -template -typename EIGEN::contiguous_pointer EIGEN::il_construct_(ILType il) { - auto [extents, data] = unwrap_il(il); - shape::Smooth shape(extents.begin(), extents.end()); - auto playout = std::make_unique(std::move(shape)); - auto pbuffer = this->allocate(std::move(playout)); - auto& buffer_down = rebind(*pbuffer); - buffer_down.copy(data); - return pbuffer; -} - -#undef EIGEN -#undef TPARAMS - -// -- Explicit class template instantiation - -#define DEFINE_EIGEN_ALLOCATOR(TYPE) template class Eigen - -TW_APPLY_FLOATING_POINT_TYPES(DEFINE_EIGEN_ALLOCATOR); - -#undef DEFINE_EIGEN_ALLOCATOR - -} // namespace tensorwrapper::allocator diff --git a/src/tensorwrapper/backends/backends.hpp b/src/tensorwrapper/backends/backends.hpp index f08993c7..9daa4fe7 100644 --- a/src/tensorwrapper/backends/backends.hpp +++ b/src/tensorwrapper/backends/backends.hpp @@ -15,4 +15,5 @@ */ #pragma once +#include #include diff --git a/src/tensorwrapper/backends/eigen.hpp b/src/tensorwrapper/backends/eigen.hpp deleted file mode 100644 index fa564773..00000000 --- a/src/tensorwrapper/backends/eigen.hpp +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright 2024 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include - -namespace tensorwrapper::eigen { - -template -using data_type = Eigen::Tensor; - -} // namespace tensorwrapper::eigen diff --git a/src/tensorwrapper/buffer/buffer_base.cpp b/src/tensorwrapper/buffer/buffer_base.cpp index f592f89f..4411afe7 100644 --- a/src/tensorwrapper/buffer/buffer_base.cpp +++ b/src/tensorwrapper/buffer/buffer_base.cpp @@ -32,7 +32,6 @@ dsl_reference BufferBase::binary_op_common_(FxnType&& fxn, auto rlayout = rbuffer.layout()(rhs.labels()); if(!has_layout()) m_layout_ = lbuffer.layout().clone_as(); - if(!has_allocator()) m_allocator_ = lbuffer.allocator().clone(); fxn(m_layout_, this_labels, llayout, rlayout); @@ -74,7 +73,6 @@ dsl_reference BufferBase::permute_assignment_(label_type this_labels, auto rlayout = rhs.object().layout()(rhs.labels()); if(!has_layout()) m_layout_ = rhs.object().layout().clone_as(); - if(!has_allocator()) m_allocator_ = rhs.object().allocator().clone(); m_layout_->permute_assignment(this_labels, rlayout); diff --git a/src/tensorwrapper/buffer/contiguoues.cpp b/src/tensorwrapper/buffer/contiguoues.cpp deleted file mode 100644 index 36438832..00000000 --- a/src/tensorwrapper/buffer/contiguoues.cpp +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include - -namespace tensorwrapper::buffer { - -#define DEFINE_CONTIG_BUFFER(TYPE) template class Contiguous - -TW_APPLY_FLOATING_POINT_TYPES(DEFINE_CONTIG_BUFFER); - -#undef DEFINE_CONTIG_BUFFER - -} // namespace tensorwrapper::buffer diff --git a/src/tensorwrapper/buffer/mdbuffer.cpp b/src/tensorwrapper/buffer/contiguous.cpp similarity index 66% rename from src/tensorwrapper/buffer/mdbuffer.cpp rename to src/tensorwrapper/buffer/contiguous.cpp index 79400829..41c1cc4c 100644 --- a/src/tensorwrapper/buffer/mdbuffer.cpp +++ b/src/tensorwrapper/buffer/contiguous.cpp @@ -17,17 +17,18 @@ #include "../backends/eigen/eigen_tensor_impl.hpp" #include "detail_/binary_operation_visitor.hpp" #include "detail_/hash_utilities.hpp" -#include +#include #include namespace tensorwrapper::buffer { namespace { template -const MDBuffer& downcast(T&& object) { - auto* pobject = dynamic_cast(&object); +const Contiguous& downcast(T&& object) { + auto* pobject = dynamic_cast(&object); if(pobject == nullptr) { - throw std::invalid_argument("The provided buffer must be an MDBuffer."); + throw std::invalid_argument( + "The provided buffer must be an Contiguous."); } return *pobject; } @@ -35,10 +36,10 @@ const MDBuffer& downcast(T&& object) { using fp_types = types::floating_point_types; -MDBuffer::MDBuffer() noexcept = default; +Contiguous::Contiguous() noexcept = default; -MDBuffer::MDBuffer(buffer_type buffer, shape_type shape) : - my_base_type(std::make_unique(shape), nullptr), +Contiguous::Contiguous(buffer_type buffer, shape_type shape) : + my_base_type(std::make_unique(shape)), m_shape_(std::move(shape)), m_buffer_() { if(buffer.size() == shape.size()) { @@ -54,35 +55,43 @@ MDBuffer::MDBuffer(buffer_type buffer, shape_type shape) : // -- State Accessor // ----------------------------------------------------------------------------- -auto MDBuffer::shape() const -> const_shape_view { return m_shape_; } +auto Contiguous::shape() const -> const_shape_view { return m_shape_; } -auto MDBuffer::size() const noexcept -> size_type { return m_buffer_.size(); } +auto Contiguous::size() const noexcept -> size_type { return m_buffer_.size(); } -auto MDBuffer::get_elem(index_vector index) const -> const_reference { +auto Contiguous::get_elem(index_vector index) const -> const_reference { auto ordinal_index = coordinate_to_ordinal_(index); return m_buffer_.at(ordinal_index); } -void MDBuffer::set_elem(index_vector index, value_type new_value) { +void Contiguous::set_elem(index_vector index, value_type new_value) { auto ordinal_index = coordinate_to_ordinal_(index); mark_for_rehash_(); m_buffer_.at(ordinal_index) = new_value; } -auto MDBuffer::get_mutable_data() -> buffer_view { +auto Contiguous::get_mutable_data() -> buffer_view { mark_for_rehash_(); return m_buffer_; } -auto MDBuffer::get_immutable_data() const -> const_buffer_view { +auto Contiguous::get_immutable_data() const -> const_buffer_view { return m_buffer_; } +auto Contiguous::infinity_norm() const -> value_type { + if(m_buffer_.size() == 0) + throw std::runtime_error( + "Cannot compute the infinity norm of an empty tensor."); + detail_::InfinityNormVisitor visitor; + return wtf::buffer::visit_contiguous_buffer(visitor, m_buffer_); +} + // ----------------------------------------------------------------------------- // -- Utility Methods // ----------------------------------------------------------------------------- -bool MDBuffer::operator==(const my_type& rhs) const noexcept { +bool Contiguous::operator==(const my_type& rhs) const noexcept { if(!my_base_type::operator==(rhs)) return false; return get_hash_() == rhs.get_hash_(); } @@ -91,17 +100,17 @@ bool MDBuffer::operator==(const my_type& rhs) const noexcept { // -- Protected Methods // ----------------------------------------------------------------------------- -auto MDBuffer::clone_() const -> buffer_base_pointer { - return std::make_unique(*this); +auto Contiguous::clone_() const -> buffer_base_pointer { + return std::make_unique(*this); } -bool MDBuffer::are_equal_(const_buffer_base_reference rhs) const noexcept { +bool Contiguous::are_equal_(const_buffer_base_reference rhs) const noexcept { return my_base_type::template are_equal_impl_(rhs); } -auto MDBuffer::addition_assignment_(label_type this_labels, - const_labeled_reference lhs, - const_labeled_reference rhs) +auto Contiguous::addition_assignment_(label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) -> dsl_reference { const auto& lhs_down = downcast(lhs.object()); const auto& rhs_down = downcast(rhs.object()); @@ -126,9 +135,9 @@ auto MDBuffer::addition_assignment_(label_type this_labels, return *this; } -auto MDBuffer::subtraction_assignment_(label_type this_labels, - const_labeled_reference lhs, - const_labeled_reference rhs) +auto Contiguous::subtraction_assignment_(label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) -> dsl_reference { const auto& lhs_down = downcast(lhs.object()); const auto& rhs_down = downcast(rhs.object()); @@ -153,9 +162,9 @@ auto MDBuffer::subtraction_assignment_(label_type this_labels, return *this; } -auto MDBuffer::multiplication_assignment_(label_type this_labels, - const_labeled_reference lhs, - const_labeled_reference rhs) +auto Contiguous::multiplication_assignment_(label_type this_labels, + const_labeled_reference lhs, + const_labeled_reference rhs) -> dsl_reference { const auto& lhs_down = downcast(lhs.object()); const auto& rhs_down = downcast(rhs.object()); @@ -179,8 +188,8 @@ auto MDBuffer::multiplication_assignment_(label_type this_labels, return *this; } -auto MDBuffer::permute_assignment_(label_type this_labels, - const_labeled_reference rhs) +auto Contiguous::permute_assignment_(label_type this_labels, + const_labeled_reference rhs) -> dsl_reference { const auto& rhs_down = downcast(rhs.object()); const auto& rhs_labels = rhs.labels(); @@ -198,8 +207,8 @@ auto MDBuffer::permute_assignment_(label_type this_labels, return *this; } -auto MDBuffer::scalar_multiplication_(label_type this_labels, double scalar, - const_labeled_reference rhs) +auto Contiguous::scalar_multiplication_(label_type this_labels, double scalar, + const_labeled_reference rhs) -> dsl_reference { const auto& rhs_down = downcast(rhs.object()); const auto& rhs_labels = rhs.labels(); @@ -217,13 +226,26 @@ auto MDBuffer::scalar_multiplication_(label_type this_labels, double scalar, return *this; } -auto MDBuffer::to_string_() const -> string_type { +bool Contiguous::approximately_equal_(const_buffer_base_reference rhs, + double tol) const { + const auto& rhs_down = downcast(rhs); + if(rank() != rhs_down.rank()) return false; + + std::string index(rank() ? "i0" : ""); + for(std::size_t i = 1; i < rank(); ++i) index += (",i" + std::to_string(i)); + Contiguous result(*this); + result.subtraction_assignment(index, (*this)(index), rhs_down(index)); + detail_::ApproximatelyEqualVisitor k(tol); + return buffer::visit_contiguous_buffer(k, result); +} + +auto Contiguous::to_string_() const -> string_type { std::stringstream ss; add_to_stream_(ss); return ss.str(); } -std::ostream& MDBuffer::add_to_stream_(std::ostream& os) const { +std::ostream& Contiguous::add_to_stream_(std::ostream& os) const { /// XXX: EigenTensor should handle aliasing a const buffer correctly. That's /// a lot of work, just to get this to work though... @@ -243,7 +265,7 @@ std::ostream& MDBuffer::add_to_stream_(std::ostream& os) const { // -- Private Methods // ----------------------------------------------------------------------------- -void MDBuffer::check_index_(const index_vector& index) const { +void Contiguous::check_index_(const index_vector& index) const { if(index.size() != m_shape_.rank()) { throw std::out_of_range( "The length of the provided index does not match the rank of " @@ -258,7 +280,7 @@ void MDBuffer::check_index_(const index_vector& index) const { } } -auto MDBuffer::coordinate_to_ordinal_(index_vector index) const -> size_type { +auto Contiguous::coordinate_to_ordinal_(index_vector index) const -> size_type { check_index_(index); using size_type = typename decltype(index)::size_type; size_type ordinal = 0; @@ -270,7 +292,7 @@ auto MDBuffer::coordinate_to_ordinal_(index_vector index) const -> size_type { return ordinal; } -void MDBuffer::update_hash_() const { +void Contiguous::update_hash_() const { buffer::detail_::hash_utilities::HashVisitor visitor; if(m_buffer_.size()) { wtf::buffer::visit_contiguous_buffer(visitor, m_buffer_); @@ -279,4 +301,26 @@ void MDBuffer::update_hash_() const { m_recalculate_hash_ = false; } +// ----------------------------------------------------------------------------- +// Free functions +// ----------------------------------------------------------------------------- + +Contiguous make_contiguous(const buffer::BufferBase& buffer, + const shape::ShapeBase& shape) { + auto smooth_view = shape.as_smooth(); + using size_type = typename decltype(smooth_view)::size_type; + std::vector extents(smooth_view.rank()); + for(size_type i = 0; i < smooth_view.rank(); ++i) + extents[i] = smooth_view.extent(i); + shape::Smooth smooth_shape(extents.begin(), extents.end()); + + auto lambda = [=](const auto& span) { + using value_type = std::decay_t; + std::vector data(smooth_shape.size()); + return Contiguous(std::move(data), std::move(smooth_shape)); + }; + + return visit_contiguous_buffer(lambda, make_contiguous(buffer)); +} + } // namespace tensorwrapper::buffer diff --git a/src/tensorwrapper/buffer/detail_/eigen_dispatch.hpp b/src/tensorwrapper/buffer/detail_/eigen_dispatch.hpp deleted file mode 100644 index 90c26509..00000000 --- a/src/tensorwrapper/buffer/detail_/eigen_dispatch.hpp +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include -#include - -namespace tensorwrapper::buffer::detail_ { - -constexpr std::size_t MaxEigenRank = 8; - -template -using eigen_tensor_type = eigen::Tensor; - -template -using eigen_tensor_map = eigen::TensorMap>; - -template -auto wrap_tensor(std::span s, const shape::Smooth& shape) { - using tensor_type = eigen::Tensor; - using map_type = eigen::TensorMap; - - if constexpr(Rank > MaxEigenRank) { - static_assert( - Rank <= MaxEigenRank, - "Eigen tensors of rank > MaxEigenRank are not supported."); - } else { - if(shape.rank() == Rank) return variant_type(map_type(s)); - } -} - -template -auto eigen_dispatch_impl(VisitorType&& visitor, - eigen::TensorMap>& A, - Args&&... args) { - return visitor(A, std::forward(args)...); -} - -template -auto eigen_tensor_dispatch(std::span s, shape::Smooth shape, - Args&&... args) { - using tensor_type = eigen::Tensor; -} - -} // namespace tensorwrapper::buffer::detail_ diff --git a/src/tensorwrapper/buffer/detail_/eigen_pimpl.hpp b/src/tensorwrapper/buffer/detail_/eigen_pimpl.hpp deleted file mode 100644 index 8c297914..00000000 --- a/src/tensorwrapper/buffer/detail_/eigen_pimpl.hpp +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include "../../backends/eigen.hpp" -#include -#include - -namespace tensorwrapper::buffer::detail_ { - -/// Common API that type-erases Eigen's many tensor classes. -template -class EigenPIMPL - : public tensorwrapper::detail_::PolymorphicBase> { -private: - using my_type = EigenPIMPL; - using polymorphic_base = tensorwrapper::detail_::PolymorphicBase; - -public: - using parent_type = Eigen; - using pimpl_pointer = typename parent_type::pimpl_pointer; - using label_type = typename parent_type::label_type; - using element_type = typename parent_type::element_type; - using element_vector = typename parent_type::element_vector; - using reference = typename parent_type::reference; - using const_shape_reference = const shape::ShapeBase&; - using const_reference = typename parent_type::const_reference; - using pointer = typename parent_type::pointer; - using const_pointer = typename parent_type::const_pointer; - using string_type = typename polymorphic_base::string_type; - using index_vector = typename parent_type::index_vector; - using size_type = typename parent_type::size_type; - - using const_pimpl_reference = const EigenPIMPL&; - - using eigen_rank_type = unsigned int; - - eigen_rank_type rank() const noexcept { return rank_(); } - - size_type size() const noexcept { return size_(); } - - size_type extent(eigen_rank_type i) const { - assert(i < rank()); - return extent_(i); - } - - pointer get_mutable_data() noexcept { return get_mutable_data_(); } - - const_pointer get_immutable_data() const noexcept { - return get_immutable_data_(); - } - - const_reference get_elem(index_vector index) const { - assert(index.size() == rank()); - return get_elem_(std::move(index)); - } - - void set_elem(index_vector index, element_type new_value) { - assert(index.size() == rank()); - set_elem_(index, new_value); - } - - const_reference get_data(size_type index) const { - assert(index < size()); - return get_data_(std::move(index)); - } - - void set_data(size_type index, element_type new_value) { - assert(index < size()); - set_data_(index, new_value); - } - - void fill(element_type value) { fill_(std::move(value)); } - - void copy(const element_vector& values) { - assert(values.size() <= size()); - copy_(values); - } - - void addition_assignment(label_type this_labels, label_type lhs_labels, - label_type rhs_labels, const_pimpl_reference lhs, - const_pimpl_reference rhs) { - addition_assignment_(std::move(this_labels), std::move(lhs_labels), - std::move(rhs_labels), lhs, rhs); - } - - void subtraction_assignment(label_type this_labels, label_type lhs_labels, - label_type rhs_labels, - const_pimpl_reference lhs, - const_pimpl_reference rhs) { - subtraction_assignment_(std::move(this_labels), std::move(lhs_labels), - std::move(rhs_labels), lhs, rhs); - } - - void hadamard_assignment(label_type this_labels, label_type lhs_labels, - label_type rhs_labels, const_pimpl_reference lhs, - const_pimpl_reference rhs) { - hadamard_assignment_(std::move(this_labels), std::move(lhs_labels), - std::move(rhs_labels), lhs, rhs); - } - - void contraction_assignment(label_type this_labels, label_type lhs_labels, - label_type rhs_labels, - const_shape_reference result_shape, - const_pimpl_reference lhs, - const_pimpl_reference rhs) { - contraction_assignment_(std::move(this_labels), std::move(lhs_labels), - std::move(rhs_labels), result_shape, lhs, rhs); - } - - void permute_assignment(label_type this_labels, label_type rhs_labels, - const_pimpl_reference rhs) { - permute_assignment_(std::move(this_labels), std::move(rhs_labels), rhs); - } - - void scalar_multiplication(label_type this_labels, label_type rhs_labels, - FloatType scalar, const_pimpl_reference rhs) { - scalar_multiplication_(std::move(this_labels), std::move(rhs_labels), - scalar, rhs); - } - -protected: - virtual eigen_rank_type rank_() const noexcept = 0; - virtual size_type size_() const = 0; - virtual size_type extent_(eigen_rank_type i) const = 0; - virtual pointer get_mutable_data_() noexcept = 0; - virtual const_pointer get_immutable_data_() const noexcept = 0; - virtual const_reference get_elem_(index_vector index) const = 0; - virtual void set_elem_(index_vector index, element_type new_value) = 0; - virtual const_reference get_data_(size_type index) const = 0; - virtual void set_data_(size_type index, element_type new_value) = 0; - virtual void fill_(element_type value) = 0; - virtual void copy_(const element_vector& values) = 0; - virtual void addition_assignment_(label_type this_labels, - label_type lhs_labels, - label_type rhs_labels, - const_pimpl_reference lhs, - const_pimpl_reference rhs) = 0; - virtual void subtraction_assignment_(label_type this_labels, - label_type lhs_labels, - label_type rhs_labels, - const_pimpl_reference lhs, - const_pimpl_reference rhs) = 0; - virtual void hadamard_assignment_(label_type this_labels, - label_type lhs_labels, - label_type rhs_labels, - const_pimpl_reference lhs, - const_pimpl_reference rhs) = 0; - virtual void contraction_assignment_(label_type this_labels, - label_type lhs_labels, - label_type rhs_labels, - const_shape_reference result_shape, - const_pimpl_reference lhs, - const_pimpl_reference rhs) = 0; - virtual void permute_assignment_(label_type this_labels, - label_type rhs_labels, - const_pimpl_reference rhs) = 0; - virtual void scalar_multiplication_(label_type this_labels, - label_type rhs_labels, FloatType scalar, - const_pimpl_reference rhs) = 0; -}; - -} // namespace tensorwrapper::buffer::detail_ diff --git a/src/tensorwrapper/buffer/detail_/eigen_tensor.cpp b/src/tensorwrapper/buffer/detail_/eigen_tensor.cpp deleted file mode 100644 index 84096fb5..00000000 --- a/src/tensorwrapper/buffer/detail_/eigen_tensor.cpp +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "../../backends/eigen.hpp" -#include "../contraction_planner.hpp" -#include "eigen_tensor.hpp" - -namespace tensorwrapper::buffer::detail_ { - -#define TPARAMS template -#define EIGEN_TENSOR EigenTensor - -TPARAMS -template -void EIGEN_TENSOR::element_wise_op_(OperationType op, label_type this_labels, - label_type lhs_labels, - label_type rhs_labels, - const_pimpl_reference lhs, - const_pimpl_reference rhs) { - // Downcast LHS and RHS - const auto* lhs_down = dynamic_cast(&lhs); - const auto& lhs_eigen = lhs_down->m_tensor_; - const auto* rhs_down = dynamic_cast(&rhs); - const auto& rhs_eigen = rhs_down->m_tensor_; - - // Whose indices match whose? - bool this_matches_lhs = (this_labels == lhs_labels); - bool this_matches_rhs = (this_labels == rhs_labels); - bool lhs_matches_rhs = (lhs_labels == rhs_labels); - - // The three possible permutations we may need to apply - auto get_permutation = [](auto&& lhs_, auto&& rhs_) { - auto l_to_r = lhs_.permutation(rhs_); - return std::vector(l_to_r.begin(), l_to_r.end()); - }; - auto r_to_l = get_permutation(rhs_labels, lhs_labels); - auto l_to_r = get_permutation(lhs_labels, rhs_labels); - auto this_to_r = get_permutation(this_labels, rhs_labels); - - if(this_matches_lhs && this_matches_rhs) { // No permutations - m_tensor_ = op(lhs_eigen, rhs_eigen); - } else if(this_matches_lhs) { // RHS needs permuted - m_tensor_ = op(lhs_eigen, rhs_eigen.shuffle(r_to_l)); - } else if(this_matches_rhs) { // LHS needs permuted - m_tensor_ = op(lhs_eigen.shuffle(l_to_r), rhs_eigen); - } else if(lhs_matches_rhs) { // This needs permuted - m_tensor_ = op(lhs_eigen, rhs_eigen).shuffle(this_to_r); - } else { // Everything needs permuted - m_tensor_ = op(lhs_eigen.shuffle(l_to_r), rhs_eigen).shuffle(this_to_r); - } - mark_for_rehash_(); -} - -TPARAMS -void EIGEN_TENSOR::addition_assignment_(label_type this_labels, - label_type lhs_labels, - label_type rhs_labels, - const_pimpl_reference lhs, - const_pimpl_reference rhs) { - auto lambda = [](auto&& lhs, auto&& rhs) { return lhs + rhs; }; - element_wise_op_(lambda, std::move(this_labels), std::move(lhs_labels), - std::move(rhs_labels), lhs, rhs); - mark_for_rehash_(); -} - -template -auto matrix_size(TensorType&& t, std::size_t row_ranks) { - std::size_t nrows = 1; - for(std::size_t i = 0; i < row_ranks; ++i) nrows *= t.extent(i); - - std::size_t ncols = 1; - const auto rank = t.rank(); - for(std::size_t i = row_ranks; i < rank; ++i) ncols *= t.extent(i); - return std::make_pair(nrows, ncols); -} - -TPARAMS -void EIGEN_TENSOR::contraction_assignment_(label_type olabels, - label_type llabels, - label_type rlabels, - const_shape_reference result_shape, - const_pimpl_reference lhs, - const_pimpl_reference rhs) { - ContractionPlanner plan(olabels, llabels, rlabels); - - auto lt = lhs.clone(); - auto rt = rhs.clone(); - lt->permute_assignment(plan.lhs_permutation(), llabels, lhs); - rt->permute_assignment(plan.rhs_permutation(), rlabels, rhs); - - const auto [lrows, lcols] = matrix_size(*lt, plan.lhs_free().size()); - const auto [rrows, rcols] = matrix_size(*rt, plan.rhs_dummy().size()); - - // Work out the types of the matrix amd a map - constexpr auto e_dyn = ::Eigen::Dynamic; - constexpr auto e_row_major = ::Eigen::RowMajor; - using matrix_t = ::Eigen::Matrix; - using map_t = ::Eigen::Map; - - eigen::data_type buffer(lrows, rcols); - - map_t lmatrix(lt->get_mutable_data(), lrows, lcols); - map_t rmatrix(rt->get_mutable_data(), rrows, rcols); - map_t omatrix(buffer.data(), lrows, rcols); - omatrix = lmatrix * rmatrix; - - auto mlabels = plan.result_matrix_labels(); - auto oshape = result_shape(olabels); - - // oshapes is the final shape, permute it to shape omatrix is currently in - auto temp_shape = result_shape.clone(); - temp_shape->permute_assignment(mlabels, oshape); - auto mshape = temp_shape->as_smooth(); - - auto m_to_o = olabels.permutation(mlabels); // N.b. Eigen def is inverse us - - std::array out_size; - std::array m_to_o_array; - for(std::size_t i = 0; i < Rank; ++i) { - out_size[i] = mshape.extent(i); - m_to_o_array[i] = m_to_o[i]; - } - - auto tensor = buffer.reshape(out_size); - if constexpr(Rank > 0) { - m_tensor_ = tensor.shuffle(m_to_o_array); - } else { - m_tensor_ = tensor; - } - - mark_for_rehash_(); -} - -TPARAMS -void EIGEN_TENSOR::hadamard_assignment_(label_type this_labels, - label_type lhs_labels, - label_type rhs_labels, - const_pimpl_reference lhs, - const_pimpl_reference rhs) { - auto lambda = [](auto&& lhs, auto&& rhs) { return lhs * rhs; }; - element_wise_op_(lambda, std::move(this_labels), std::move(lhs_labels), - std::move(rhs_labels), lhs, rhs); - mark_for_rehash_(); -} - -TPARAMS -void EIGEN_TENSOR::permute_assignment_(label_type this_labels, - label_type rhs_labels, - const_pimpl_reference rhs) { - const auto* rhs_down = dynamic_cast(&rhs); - - if(this_labels != rhs_labels) { // We need to permute rhs before assignment - // Eigen adopts the opposite definition of permutation from us. - auto r_to_l = this_labels.permutation(rhs_labels); - // Eigen wants int objects - std::vector r_to_l2(r_to_l.begin(), r_to_l.end()); - m_tensor_ = rhs_down->m_tensor_.shuffle(r_to_l2); - } else { - m_tensor_ = rhs_down->m_tensor_; - } - mark_for_rehash_(); -} - -TPARAMS -void EIGEN_TENSOR::scalar_multiplication_(label_type this_labels, - label_type rhs_labels, - FloatType scalar, - const_pimpl_reference rhs) { - const auto* rhs_downcasted = dynamic_cast(&rhs); - - if(this_labels != rhs_labels) { // We need to permute rhs before assignment - auto r_to_l = rhs_labels.permutation(this_labels); - // Eigen wants int objects - std::vector r_to_l2(r_to_l.begin(), r_to_l.end()); - m_tensor_ = rhs_downcasted->m_tensor_.shuffle(r_to_l2) * scalar; - } else { - m_tensor_ = rhs_downcasted->m_tensor_ * scalar; - } - mark_for_rehash_(); -} - -TPARAMS -void EIGEN_TENSOR::subtraction_assignment_(label_type this_labels, - label_type lhs_labels, - label_type rhs_labels, - const_pimpl_reference lhs, - const_pimpl_reference rhs) { - auto lambda = [](auto&& lhs, auto&& rhs) { return lhs - rhs; }; - element_wise_op_(lambda, std::move(this_labels), std::move(lhs_labels), - std::move(rhs_labels), lhs, rhs); - mark_for_rehash_(); -} - -TPARAMS -void EIGEN_TENSOR::update_hash_() const { - m_hash_ = hash_type{rank_()}; - for(eigen_rank_type i = 0; i < rank_(); ++i) - hash_utilities::hash_input(m_hash_, m_tensor_.dimension(i)); - for(auto i = 0; i < m_tensor_.size(); ++i) - hash_utilities::hash_input(m_hash_, m_tensor_.data()[i]); - m_recalculate_hash_ = false; -} - -#undef EIGEN_TENSOR -#undef TPARAMS - -#define DEFINE_EIGEN_TENSOR(TYPE) \ - template class EigenTensor; \ - template class EigenTensor; \ - template class EigenTensor; \ - template class EigenTensor; \ - template class EigenTensor; \ - template class EigenTensor; \ - template class EigenTensor; \ - template class EigenTensor; \ - template class EigenTensor; \ - template class EigenTensor; \ - template class EigenTensor - -TW_APPLY_FLOATING_POINT_TYPES(DEFINE_EIGEN_TENSOR); - -#undef DEFINE_EIGEN_TENSOR - -} // namespace tensorwrapper::buffer::detail_ diff --git a/src/tensorwrapper/buffer/detail_/eigen_tensor.hpp b/src/tensorwrapper/buffer/detail_/eigen_tensor.hpp deleted file mode 100644 index 4f89f003..00000000 --- a/src/tensorwrapper/buffer/detail_/eigen_tensor.hpp +++ /dev/null @@ -1,236 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include "eigen_pimpl.hpp" -#include "hash_utilities.hpp" -#include -#include -#include - -namespace tensorwrapper::buffer::detail_ { - -/// Implements EigenPIMPL by wrapping eigen::Tensor -template -class EigenTensor : public EigenPIMPL { -private: - using my_type = EigenTensor; - using base_type = EigenPIMPL; - -public: - using typename base_type::const_base_reference; - using typename base_type::const_pimpl_reference; - using typename base_type::const_pointer; - using typename base_type::const_reference; - using typename base_type::const_shape_reference; - using typename base_type::eigen_rank_type; - using typename base_type::element_type; - using typename base_type::element_vector; - using typename base_type::index_vector; - using typename base_type::label_type; - using typename base_type::pimpl_pointer; - using typename base_type::pointer; - using typename base_type::reference; - using typename base_type::size_type; - using typename base_type::string_type; - - using smooth_view = shape::SmoothView; - using const_smooth_view = shape::SmoothView; - using const_smooth_view_reference = const const_smooth_view&; - using eigen_data_type = eigen::data_type; - using eigen_reference = eigen_data_type&; - using const_eigen_reference = const eigen_data_type&; - using hash_type = hash_utilities::hash_type; - - EigenTensor() = default; - - explicit EigenTensor(const_smooth_view_reference shape) : - m_tensor_(allocate_from_shape_(shape, std::make_index_sequence())) { - } - - /// Tests for exact equality - bool operator==(const my_type& rhs) const noexcept { - return get_hash() == rhs.get_hash(); - } - - // Returns the hash for the current state of *this, computing first if - // needed. - hash_type get_hash() const { - if(m_recalculate_hash_ or !m_hash_caching_) update_hash_(); - return m_hash_; - } - -protected: - pimpl_pointer clone_() const override { - return std::make_unique(*this); - } - - eigen_rank_type rank_() const noexcept override { return Rank; } - - size_type size_() const noexcept override { return m_tensor_.size(); } - - size_type extent_(eigen_rank_type i) const override { - return m_tensor_.dimension(i); - } - - pointer get_mutable_data_() noexcept override { - turn_off_hash_caching_(); - return m_tensor_.data(); - } - - const_pointer get_immutable_data_() const noexcept override { - return m_tensor_.data(); - } - - const_reference get_elem_(index_vector index) const override { - return unwrap_vector_(std::move(index), - std::make_index_sequence()); - } - - void set_elem_(index_vector index, element_type new_value) override { - mark_for_rehash_(); - unwrap_vector_(std::move(index), std::make_index_sequence()) = - new_value; - } - - const_reference get_data_(size_type index) const override { - return m_tensor_.data()[index]; - } - - void set_data_(size_type index, element_type new_value) override { - mark_for_rehash_(); - m_tensor_.data()[index] = new_value; - } - - void fill_(element_type value) override { - mark_for_rehash_(); - std::fill(m_tensor_.data(), m_tensor_.data() + m_tensor_.size(), value); - } - - void copy_(const element_vector& values) override { - mark_for_rehash_(); - std::copy(values.begin(), values.end(), m_tensor_.data()); - } - - bool are_equal_(const_base_reference rhs) const noexcept override { - return base_type::template are_equal_impl_(rhs); - } - - string_type to_string_() const override { - std::stringstream ss; - ss << m_tensor_; - return ss.str(); - } - - std::ostream& add_to_stream_(std::ostream& os) const override { - return os << m_tensor_; - } - - void addition_assignment_(label_type this_labels, label_type lhs_labels, - label_type rhs_labels, const_pimpl_reference lhs, - const_pimpl_reference rhs) override; - - void subtraction_assignment_(label_type this_labels, label_type lhs_labels, - label_type rhs_labels, - const_pimpl_reference lhs, - const_pimpl_reference rhs) override; - - void hadamard_assignment_(label_type this_labels, label_type lhs_labels, - label_type rhs_labels, const_pimpl_reference lhs, - const_pimpl_reference rhs) override; - - void contraction_assignment_(label_type this_labels, label_type lhs_labels, - label_type rhs_labels, - const_shape_reference result_shape, - const_pimpl_reference lhs, - const_pimpl_reference rhs) override; - - void permute_assignment_(label_type this_labels, label_type rhs_labels, - const_pimpl_reference rhs) override; - - void scalar_multiplication_(label_type this_labels, label_type rhs_labels, - FloatType scalar, - const_pimpl_reference rhs) override; - -private: - // Code factorization for implementing element-wise operations - template - void element_wise_op_(OperationType op, label_type this_labels, - label_type lhs_labels, label_type rhs_labels, - const_pimpl_reference lhs, const_pimpl_reference rhs); - - // Handles TMP needed to create an Eigen Tensor from a Smooth object - template - auto allocate_from_shape_(const_smooth_view_reference shape, - std::index_sequence) { - return eigen_data_type(shape.extent(I)...); - } - - // Gets an element from the Eigen Tensor by unwrapping a std::vector - template - reference unwrap_vector_(index_vector index, std::index_sequence) { - return m_tensor_(tensorwrapper::detail_::to_long(index.at(I))...); - } - - // Same as mutable version, but result is read-only - template - const_reference unwrap_vector_(index_vector index, - std::index_sequence) const { - return m_tensor_(tensorwrapper::detail_::to_long(index.at(I))...); - } - - // Computes the hash for the current state of *this - void update_hash_() const; - - // Designates that the state may have changed and to recalculate the hash. - // This function is really just for readability and clarity. - void mark_for_rehash_() const { m_recalculate_hash_ = true; } - - // Designates that state changes are not trackable and we should recalculate - // the hash each time. - void turn_off_hash_caching_() const { m_hash_caching_ = false; } - - // Tracks whether the hash needs to be redetermined - mutable bool m_recalculate_hash_ = true; - - // Tracks whether hash caching has been turned off - mutable bool m_hash_caching_ = true; - - // Holds the computed hash value for this instance's state - mutable hash_type m_hash_; - - // The Eigen tensor *this wraps - eigen_data_type m_tensor_; -}; - -#define DECLARE_EIGEN_TENSOR(TYPE) \ - extern template class EigenTensor; \ - extern template class EigenTensor; \ - extern template class EigenTensor; \ - extern template class EigenTensor; \ - extern template class EigenTensor; \ - extern template class EigenTensor; \ - extern template class EigenTensor; \ - extern template class EigenTensor; \ - extern template class EigenTensor; \ - extern template class EigenTensor; \ - extern template class EigenTensor - -TW_APPLY_FLOATING_POINT_TYPES(DECLARE_EIGEN_TENSOR); - -#undef DECLARE_EIGEN_TENSOR - -} // namespace tensorwrapper::buffer::detail_ diff --git a/src/tensorwrapper/buffer/detail_/unary_operation_visitor.hpp b/src/tensorwrapper/buffer/detail_/unary_operation_visitor.hpp index 4a99c003..34ac9d6f 100644 --- a/src/tensorwrapper/buffer/detail_/unary_operation_visitor.hpp +++ b/src/tensorwrapper/buffer/detail_/unary_operation_visitor.hpp @@ -143,4 +143,36 @@ class ScalarMultiplicationVisitor : public UnaryOperationVisitor { scalar_type m_scalar_; }; +class ApproximatelyEqualVisitor { +public: + explicit ApproximatelyEqualVisitor(double tol) : m_tol_(tol) {} + + template + bool operator()(const std::span result) { + const FloatType zero{0.0}; + const FloatType ptol = static_cast(m_tol_); + for(std::size_t i = 0; i < result.size(); ++i) { + auto diff = result[i]; + if(diff < zero) diff *= -1.0; + if(diff >= ptol) return false; + } + return true; + } + +private: + double m_tol_; +}; + +struct InfinityNormVisitor { + template + auto operator()(const std::span buffer) { + std::decay_t max_element{0.0}; + for(std::size_t i = 0; i < buffer.size(); ++i) { + auto elem = types::fabs(buffer[i]); + if(elem > max_element) max_element = elem; + } + return wtf::fp::make_float(max_element); + } +}; + } // namespace tensorwrapper::buffer::detail_ diff --git a/src/tensorwrapper/buffer/eigen.cpp b/src/tensorwrapper/buffer/eigen.cpp deleted file mode 100644 index f023e6f7..00000000 --- a/src/tensorwrapper/buffer/eigen.cpp +++ /dev/null @@ -1,259 +0,0 @@ -/* - * Copyright 2024 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "detail_/eigen_tensor.hpp" -#include -#include -#include -#include - -namespace tensorwrapper::buffer { - -#define TPARAMS template -#define EIGEN Eigen - -// -- Public Methods - -TPARAMS -EIGEN::Eigen() noexcept = default; - -TPARAMS -EIGEN::Eigen(pimpl_pointer pimpl, layout_pointer playout, - allocator_base_pointer pallocator) : - my_base_type(std::move(playout), std::move(pallocator)), - m_pimpl_(std::move(pimpl)) {} - -TPARAMS -EIGEN::Eigen(const Eigen& other) : - Eigen(other.has_pimpl_() ? other.m_pimpl_->clone() : nullptr, other.layout(), - other.allocator()) {} - -TPARAMS -EIGEN::Eigen(Eigen&& other) noexcept = default; - -TPARAMS -EIGEN& EIGEN::operator=(const Eigen& rhs) { - if(this != &rhs) Eigen(rhs).swap(*this); - return *this; -} - -TPARAMS -EIGEN& EIGEN::operator=(Eigen&& rhs) noexcept = default; - -TPARAMS -EIGEN::~Eigen() noexcept = default; - -TPARAMS -void EIGEN::swap(Eigen& other) noexcept { m_pimpl_.swap(other.m_pimpl_); } - -TPARAMS -bool EIGEN::operator==(const Eigen& rhs) const noexcept { - if(has_pimpl_() != rhs.has_pimpl_()) return false; - if(!has_pimpl_()) return true; - return m_pimpl_->are_equal(*rhs.m_pimpl_); -} - -// -- Protected Methods - -TPARAMS -typename EIGEN::buffer_base_pointer EIGEN::clone_() const { - return std::make_unique(*this); -} - -TPARAMS -bool EIGEN::are_equal_(const_buffer_base_reference rhs) const noexcept { - return my_base_type::template are_equal_impl_(rhs); -} - -TPARAMS -typename EIGEN::dsl_reference EIGEN::addition_assignment_( - label_type this_labels, const_labeled_reference lhs, - const_labeled_reference rhs) { - BufferBase::addition_assignment_(this_labels, lhs, rhs); - using alloc_type = allocator::Eigen; - const auto& lhs_down = alloc_type::rebind(lhs.object()); - const auto& rhs_down = alloc_type::rebind(rhs.object()); - if(!has_pimpl_()) m_pimpl_ = lhs_down.pimpl_().clone(); - pimpl_().addition_assignment(this_labels, lhs.labels(), rhs.labels(), - lhs_down.pimpl_(), rhs_down.pimpl_()); - - return *this; -} - -TPARAMS -typename EIGEN::dsl_reference EIGEN::subtraction_assignment_( - label_type this_labels, const_labeled_reference lhs, - const_labeled_reference rhs) { - BufferBase::subtraction_assignment_(this_labels, lhs, rhs); - using alloc_type = allocator::Eigen; - const auto& lhs_down = alloc_type::rebind(lhs.object()); - const auto& rhs_down = alloc_type::rebind(rhs.object()); - if(!has_pimpl_()) m_pimpl_ = lhs_down.pimpl_().clone(); - pimpl_().subtraction_assignment(this_labels, lhs.labels(), rhs.labels(), - lhs_down.pimpl_(), rhs_down.pimpl_()); - return *this; -} - -TPARAMS -typename EIGEN::dsl_reference EIGEN::multiplication_assignment_( - label_type this_labels, const_labeled_reference lhs, - const_labeled_reference rhs) { - BufferBase::multiplication_assignment_(this_labels, lhs, rhs); - - using alloc_type = allocator::Eigen; - const auto& lhs_down = alloc_type::rebind(lhs.object()); - const auto& rhs_down = alloc_type::rebind(rhs.object()); - - if(!has_pimpl_()) m_pimpl_ = lhs_down.pimpl_().clone(); - if(this_labels.is_hadamard_product(lhs.labels(), rhs.labels())) - pimpl_().hadamard_assignment(this_labels, lhs.labels(), rhs.labels(), - lhs_down.pimpl_(), rhs_down.pimpl_()); - else if(this_labels.is_contraction(lhs.labels(), rhs.labels())) - pimpl_().contraction_assignment(this_labels, lhs.labels(), rhs.labels(), - this->layout().shape(), - lhs_down.pimpl_(), rhs_down.pimpl_()); - else - throw std::runtime_error("Mixed products NYI"); - - return *this; -} - -TPARAMS -typename EIGEN::dsl_reference EIGEN::permute_assignment_( - label_type this_labels, const_labeled_reference rhs) { - BufferBase::permute_assignment_(this_labels, rhs); - using alloc_type = allocator::Eigen; - const auto& rhs_down = alloc_type::rebind(rhs.object()); - if(!has_pimpl_()) m_pimpl_ = rhs_down.pimpl_().clone(); - pimpl_().permute_assignment(this_labels, rhs.labels(), rhs_down.pimpl_()); - - return *this; -} - -TPARAMS -typename EIGEN::dsl_reference EIGEN::scalar_multiplication_( - label_type this_labels, double scalar, const_labeled_reference rhs) { - BufferBase::permute_assignment_(this_labels, rhs); - using alloc_type = allocator::Eigen; - const auto& rhs_down = alloc_type::rebind(rhs.object()); - if(!has_pimpl_()) m_pimpl_ = rhs_down.pimpl_().clone(); - pimpl_().scalar_multiplication(this_labels, rhs.labels(), scalar, - rhs_down.pimpl_()); - return *this; -} - -TPARAMS -typename EIGEN::pointer EIGEN::get_mutable_data_() noexcept { - return m_pimpl_ ? m_pimpl_->get_mutable_data() : nullptr; -} - -TPARAMS -typename EIGEN::const_pointer EIGEN::get_immutable_data_() const noexcept { - return m_pimpl_ ? m_pimpl_->get_immutable_data() : nullptr; -} - -TPARAMS -typename EIGEN::const_reference EIGEN::get_elem_(index_vector index) const { - return pimpl_().get_elem(std::move(index)); -} - -TPARAMS -void EIGEN::set_elem_(index_vector index, element_type new_value) { - return pimpl_().set_elem(std::move(index), std::move(new_value)); -} - -TPARAMS -typename EIGEN::const_reference EIGEN::get_data_(size_type index) const { - return pimpl_().get_data(std::move(index)); -} - -TPARAMS -void EIGEN::set_data_(size_type index, element_type new_value) { - return pimpl_().set_data(std::move(index), std::move(new_value)); -} - -TPARAMS -void EIGEN::fill_(element_type value) { - return pimpl_().fill(std::move(value)); -} - -TPARAMS -void EIGEN::copy_(const element_vector& values) { - return pimpl_().copy(values); -} - -TPARAMS -typename EIGEN::polymorphic_base::string_type EIGEN::to_string_() const { - return m_pimpl_ ? m_pimpl_->to_string() : ""; -} - -TPARAMS -std::ostream& EIGEN::add_to_stream_(std::ostream& os) const { - return m_pimpl_ ? m_pimpl_->add_to_stream(os) : os; -} - -// -- Private methods - -TPARAMS -bool EIGEN::has_pimpl_() const noexcept { return static_cast(m_pimpl_); } - -TPARAMS -void EIGEN::assert_pimpl_() const { - if(has_pimpl_()) return; - throw std::runtime_error("buffer::Eigen has no PIMPL!"); -} - -TPARAMS -typename EIGEN::pimpl_reference EIGEN::pimpl_() { - assert_pimpl_(); - return *m_pimpl_; -} - -TPARAMS -typename EIGEN::const_pimpl_reference EIGEN::pimpl_() const { - assert_pimpl_(); - return *m_pimpl_; -} - -TPARAMS -EIGEN& to_eigen_buffer(BufferBase& b) { - using allocator_type = allocator::Eigen; - return allocator_type::rebind(b); -} - -TPARAMS -const EIGEN& to_eigen_buffer(const BufferBase& b) { - using allocator_type = allocator::Eigen; - return allocator_type::rebind(b); -} - -#undef EIGEN -#undef TPARAMS - -#define DEFINE_EIGEN_BUFFER(TYPE) template class Eigen -#define DEFINE_TO_EIGEN_BUFFER(TYPE) \ - template Eigen& to_eigen_buffer(BufferBase&) -#define DEFINE_TO_CONST_EIGEN_BUFFER(TYPE) \ - template const Eigen& to_eigen_buffer(const BufferBase&) - -TW_APPLY_FLOATING_POINT_TYPES(DEFINE_EIGEN_BUFFER); -TW_APPLY_FLOATING_POINT_TYPES(DEFINE_TO_EIGEN_BUFFER); -TW_APPLY_FLOATING_POINT_TYPES(DEFINE_TO_CONST_EIGEN_BUFFER); - -#undef DEFINE_EIGEN_BUFFER -#undef DEFINE_TO_EIGEN_BUFFER -#undef DEFINE_TO_CONST_EIGEN_BUFFER - -} // namespace tensorwrapper::buffer diff --git a/src/tensorwrapper/diis/diis.cpp b/src/tensorwrapper/diis/diis.cpp index 94b3d6a6..585768ae 100644 --- a/src/tensorwrapper/diis/diis.cpp +++ b/src/tensorwrapper/diis/diis.cpp @@ -14,32 +14,23 @@ * limitations under the License. */ -#include #include #include -#include +#include namespace tensorwrapper::diis { namespace { struct Kernel { - using buffer_base_type = tensorwrapper::buffer::BufferBase; - template - auto run(const buffer_base_type& t) { - using alloc_type = tensorwrapper::allocator::Eigen; - alloc_type alloc(t.allocator().runtime()); - + auto operator()(const std::span& t) { + using clean_type = std::decay_t; double rv; - if constexpr(tensorwrapper::types::is_uncertain_v) { - const auto& t_eigen = alloc.rebind(t); - - rv = t_eigen.get_elem({}).mean(); + if constexpr(tensorwrapper::types::is_uncertain_v) { + rv = t[0].mean(); } else { - const auto& t_eigen = alloc.rebind(t); - - rv = t_eigen.get_elem({}); + rv = t[0]; } return rv; } @@ -49,8 +40,6 @@ struct Kernel { using tensor_type = DIIS::tensor_type; -using tensorwrapper::utilities::floating_point_dispatch; - tensor_type DIIS::extrapolate(const tensor_type& X, const tensor_type& E) { // Append new values to stored values m_samples_.push_back(X); @@ -81,9 +70,12 @@ tensor_type DIIS::extrapolate(const tensor_type& X, const tensor_type& E) { tensor_type& E_j = m_errors_.at(j); tensor_type temp; - temp("") = E_i("mu,nu") * E_j("mu,nu"); - m_B_(i, j) = floating_point_dispatch(Kernel{}, temp.buffer()); - + auto ei = buffer::make_contiguous(E_i.buffer()); + auto ej = buffer::make_contiguous(E_j.buffer()); + temp("") = E_i("mu,nu") * E_j("mu,nu"); + const auto& bdown = buffer::make_contiguous(temp.buffer()); + Kernel k; + m_B_(i, j) = buffer::visit_contiguous_buffer(k, bdown); // Fill in lower triangle if(i != j) m_B_(j, i) = m_B_(i, j); } diff --git a/src/tensorwrapper/operations/approximately_equal.cpp b/src/tensorwrapper/operations/approximately_equal.cpp index 3da9e93a..e5c0b335 100644 --- a/src/tensorwrapper/operations/approximately_equal.cpp +++ b/src/tensorwrapper/operations/approximately_equal.cpp @@ -13,46 +13,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include -#include #include -#include namespace tensorwrapper::operations { -namespace { - -struct Kernel { - template - bool run(const buffer::BufferBase& result, double tol) { - using allocator_type = allocator::Eigen; - const FloatType zero{0.0}; - const FloatType ptol = static_cast(tol); - auto& buffer_down = allocator_type::rebind(result); - - for(std::size_t i = 0; i < buffer_down.size(); ++i) { - auto diff = buffer_down.get_data(i); - if(diff < zero) diff *= -1.0; - if(diff >= ptol) return false; - } - return true; - } -}; - -} // namespace bool approximately_equal(const Tensor& lhs, const Tensor& rhs, double tol) { - if(lhs.rank() != rhs.rank()) return false; - - std::string index(lhs.rank() ? "i0" : ""); - for(std::size_t i = 1; i < lhs.rank(); ++i) - index += (",i" + std::to_string(i)); - Tensor result; - result(index) = lhs(index) - rhs(index); - - using tensorwrapper::utilities::floating_point_dispatch; - - return floating_point_dispatch(Kernel{}, result.buffer(), tol); + return lhs.buffer().approximately_equal(rhs.buffer(), tol); } } // namespace tensorwrapper::operations diff --git a/src/tensorwrapper/operations/norm.cpp b/src/tensorwrapper/operations/norm.cpp index 178337c9..47eba640 100644 --- a/src/tensorwrapper/operations/norm.cpp +++ b/src/tensorwrapper/operations/norm.cpp @@ -13,37 +13,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include -#include -#include -#include +#include +#include +#include +#include namespace tensorwrapper::operations { -namespace { -struct InfinityKernel { - template - Tensor run(const buffer::BufferBase& t) { - using allocator_type = allocator::Eigen; - allocator_type alloc(t.allocator().runtime()); - FloatType max_element{0.0}; - const auto& buffer_down = alloc.rebind(t); - for(std::size_t i = 0; i < buffer_down.size(); ++i) { - auto elem = types::fabs(buffer_down.get_data(i)); - if(elem > max_element) max_element = elem; - } - shape::Smooth s{}; - layout::Physical l(s); - auto pbuffer = alloc.construct(l, max_element); - return Tensor(s, std::move(pbuffer)); - } -}; - -} // namespace Tensor infinity_norm(const Tensor& t) { - InfinityKernel k; - return utilities::floating_point_dispatch(k, t.buffer()); + const auto& buffer_down = buffer::make_contiguous(t.buffer()); + auto max_value = buffer_down.infinity_norm(); + std::initializer_list il{max_value}; + using fp_types = types::floating_point_types; + auto wtf_buffer = wtf::buffer::make_float_buffer(il); + shape::Smooth shape; + buffer::Contiguous buffer(std::move(wtf_buffer), shape); + layout::Physical playout(shape); + layout::Logical llayout(shape); + return Tensor(std::move(playout), std::move(llayout), std::move(buffer)); } } // namespace tensorwrapper::operations diff --git a/src/tensorwrapper/tensor/detail_/tensor_factory.cpp b/src/tensorwrapper/tensor/detail_/tensor_factory.cpp index d7f4c684..0cb309bf 100644 --- a/src/tensorwrapper/tensor/detail_/tensor_factory.cpp +++ b/src/tensorwrapper/tensor/detail_/tensor_factory.cpp @@ -18,8 +18,7 @@ #include "tensor_factory.hpp" #include "tensor_pimpl.hpp" #include -#include -#include +#include #include namespace tensorwrapper::detail_ { @@ -31,7 +30,6 @@ using symmetry_pointer = typename TensorFactory::symmetry_pointer; using sparsity_pointer = typename TensorFactory::sparsity_pointer; using logical_layout_pointer = typename TensorFactory::logical_layout_pointer; using physical_layout_pointer = typename TensorFactory::physical_layout_pointer; -using allocator_pointer = typename TensorFactory::allocator_pointer; using buffer_pointer = typename pimpl_type::buffer_pointer; // ----------------------------------------------------------------------------- @@ -67,11 +65,6 @@ physical_layout_pointer TensorFactory::default_physical_layout( logical.shape(), logical.symmetry(), logical.sparsity()); } -allocator_pointer TensorFactory::default_allocator( - const_physical_reference physical, runtime_view_type rv) { - return std::make_unique>(rv); -} - bool TensorFactory::can_make_logical_layout(const input_type& input) noexcept { return input.has_shape() || input.has_logical_layout(); } @@ -157,13 +150,10 @@ pimpl_pointer TensorFactory::construct(TensorInput input) { input.m_pphysical = default_physical_layout(*input.m_plogical); } - if(!input.has_allocator()) { - input.m_palloc = default_allocator(*input.m_pphysical, input.m_rv); - } - // TODO: Check if we have initialization criteria - input.m_pbuffer = - input.m_palloc->allocate(std::move(input.m_pphysical)); + auto buffer = + buffer::make_contiguous(input.m_pphysical->shape()); + input.m_pbuffer = std::make_unique(std::move(buffer)); } // Now we have both a logical layout and a buffer so we're done @@ -177,8 +167,10 @@ namespace { /// Wraps the process of turning an initializer list into a TensorInput object template auto il_to_input(T il, parallelzone::runtime::RuntimeView rv = {}) { - allocator::Eigen alloc(rv); - auto pbuffer = alloc.construct(il); + auto [extents, data] = unwrap_il(il); + shape::Smooth shape(extents.begin(), extents.end()); + auto pbuffer = + std::make_unique(std::move(data), std::move(shape)); return TensorInput(pbuffer->layout().shape(), std::move(pbuffer)); } diff --git a/src/tensorwrapper/tensor/detail_/tensor_factory.hpp b/src/tensorwrapper/tensor/detail_/tensor_factory.hpp index 597cedd9..c8fb214a 100644 --- a/src/tensorwrapper/tensor/detail_/tensor_factory.hpp +++ b/src/tensorwrapper/tensor/detail_/tensor_factory.hpp @@ -66,7 +66,6 @@ class TensorFactory { using logical_layout_pointer = input_type::logical_layout_pointer; using const_physical_reference = input_type::const_physical_reference; using physical_layout_pointer = input_type::physical_layout_pointer; - using allocator_pointer = input_type::allocator_pointer; using runtime_view_type = input_type::runtime_view_type; // ------------------------------------------------------------------------- @@ -140,21 +139,6 @@ class TensorFactory { static physical_layout_pointer default_physical_layout( const_logical_reference logical); - /** @brief Constructs an allocator consistent with the physical layout. - * - * @param[in] physical The physical layout of the tensor we want to - * allocate. - * @param[in] rv The runtime that tensors will be allocated in. - * - * @return An allocator capable of allocating a tensor with the layout - * @p physical using the resources in @p rv. - * - * @throw std::bad_alloc if there is a problem allocating the return. - * Strong throw guarantee. - */ - static allocator_pointer default_allocator( - const_physical_reference physical, runtime_view_type rv); - /** @brief Actually constructs the tensor's PIMPL. * * This is the main entry point into this class (and is what callers diff --git a/src/tensorwrapper/tensor/tensor_class.cpp b/src/tensorwrapper/tensor/tensor_class.cpp index 0001b0db..9b3f9e55 100644 --- a/src/tensorwrapper/tensor/tensor_class.cpp +++ b/src/tensorwrapper/tensor/tensor_class.cpp @@ -17,6 +17,7 @@ #include "../layout/converter.hpp" #include "detail_/tensor_factory.hpp" #include "detail_/tensor_pimpl.hpp" +#include #include namespace tensorwrapper { @@ -116,8 +117,8 @@ Tensor::dsl_reference Tensor::binary_common_(FxnType&& fxn, const auto& lbuffer = lobject.buffer(); const auto& rbuffer = robject.buffer(); - auto palloc = lbuffer.allocator().clone(); - auto pthis_buffer = palloc->allocate(std::move(pphys_layout)); + auto buffer = buffer::make_contiguous(lbuffer, pphys_layout->shape()); + auto pthis_buffer = std::make_unique(std::move(buffer)); fxn(*pthis_buffer, this_labels, lbuffer(llabels), rbuffer(rlabels)); diff --git a/src/tensorwrapper/utilities/block_diagonal_matrix.cpp b/src/tensorwrapper/utilities/block_diagonal_matrix.cpp index 2fd764d5..9799adb7 100644 --- a/src/tensorwrapper/utilities/block_diagonal_matrix.cpp +++ b/src/tensorwrapper/utilities/block_diagonal_matrix.cpp @@ -13,70 +13,99 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include #include #include #include #include -#include namespace tensorwrapper::utilities { namespace { -struct BlockDiagonalMatrixKernel { +struct Initializer { + explicit Initializer(shape::Smooth shape) : m_shape(std::move(shape)) {} + template - auto run(const buffer::BufferBase& b, const std::vector& matrices) { - using allocator_type = tensorwrapper::allocator::Eigen; - - // All inputs must be Rank 2, square, and the same floating point type. - // If so, sum their extent sizes. - std::size_t size = 0; - for(const auto& matrix : matrices) { - if(!allocator_type::can_rebind(matrix.buffer())) - throw std::runtime_error( - "All inputs must have the same floating point type"); - - if(matrix.rank() != 2) - throw std::runtime_error( - "All inputs must be matrices (Rank == 2)"); - - const auto& mshape = matrix.buffer().layout().shape().as_smooth(); - if(mshape.extent(0) != mshape.extent(1)) - throw std::runtime_error("All inputs must be square matrices"); - - size += mshape.extent(0); - } + void operator()(const std::span) { + using clean_type = std::decay_t; + m_buffer = buffer::make_contiguous(m_shape); + } + + buffer::Contiguous m_buffer; + shape::Smooth m_shape; +}; - // Allocate new buffer - allocator_type allocator(b.allocator().runtime()); - shape::Smooth oshape{size, size}; - layout::Physical olayout(oshape); - auto obuffer = allocator.construct(olayout, 0.0); - - // Copy values from input into corresponding blocks - std::size_t offset = 0; - for(const auto& matrix : matrices) { - const auto& mbuffer = allocator.rebind(matrix.buffer()); - auto extent = mbuffer.layout().shape().as_smooth().extent(0); - for(std::size_t i = 0; i < extent; ++i) { - for(std::size_t j = 0; j < extent; ++j) { - obuffer->set_elem({offset + i, offset + j}, - mbuffer.get_elem({i, j})); - } +struct BlockDiagonalMatrixKernel { + // Initializes assuming square matrix + BlockDiagonalMatrixKernel(buffer::Contiguous& buffer, std::size_t offset, + std::size_t extent) : + m_pbuffer(&buffer), + m_offset(offset), + m_row_extent(extent), + m_col_extent(extent) {} + + template + void operator()(const std::span matrix_i) { + for(std::size_t i = 0; i < m_row_extent; ++i) { + for(std::size_t j = 0; j < m_col_extent; ++j) { + m_pbuffer->set_elem({m_offset + i, m_offset + j}, + matrix_i[i * m_col_extent + j]); } - offset += extent; } - return Tensor(oshape, std::move(obuffer)); } + + buffer::Contiguous* m_pbuffer; + + std::size_t m_offset; + + std::size_t m_row_extent; + std::size_t m_col_extent; }; } // namespace Tensor block_diagonal_matrix(std::vector matrices) { - const auto& buffer0 = matrices[0].buffer(); - BlockDiagonalMatrixKernel kernel; - return floating_point_dispatch(kernel, buffer0, matrices); + if(matrices.empty()) { + Tensor t; + return t; // No idea why the compiler won't let us do 'return {};' here + } + + // All inputs must be Rank 2, square, and the same floating point type. + // If so, sum their extent sizes. + std::size_t size = 0; + std::vector row_extents(matrices.size()); + for(const auto& matrix : matrices) { + if(matrix.rank() != 2) + throw std::runtime_error("All inputs must be matrices (Rank == 2)"); + + const auto& mshape = matrix.buffer().layout().shape().as_smooth(); + if(mshape.extent(0) != mshape.extent(1)) + throw std::runtime_error("All inputs must be square matrices"); + + row_extents.push_back(mshape.extent(0)); + size += row_extents.back(); + } + + shape::Smooth shape{size, size}; + layout::Physical olayout(shape); + + Initializer init_kernel(shape); + const auto& buffer0 = buffer::make_contiguous(matrices.front().buffer()); + buffer::visit_contiguous_buffer(init_kernel, buffer0); + + buffer::Contiguous buffer = std::move(init_kernel.m_buffer); + + std::size_t offset = 0; + + for(const auto& matrix : matrices) { + const auto& buffer_i = buffer::make_contiguous(matrix.buffer()); + std::size_t row_extent = buffer_i.shape().extent(0); + BlockDiagonalMatrixKernel kernel(buffer, offset, row_extent); + buffer::visit_contiguous_buffer(kernel, buffer_i); + offset += row_extent; + } + layout::Logical llayout(shape); + return Tensor(std::move(buffer), std::move(llayout), std::move(olayout)); } } // namespace tensorwrapper::utilities diff --git a/src/tensorwrapper/utilities/to_json.cpp b/src/tensorwrapper/utilities/to_json.cpp index 907257ff..22f7b7db 100644 --- a/src/tensorwrapper/utilities/to_json.cpp +++ b/src/tensorwrapper/utilities/to_json.cpp @@ -14,7 +14,7 @@ * limitations under the License. */ -#include +#include #include namespace tensorwrapper::utilities { @@ -22,16 +22,13 @@ namespace tensorwrapper::utilities { using offset_type = std::size_t; using offset_vector = std::vector; -template -using buffer_type = buffer::Contiguous; +using buffer_type = buffer::Contiguous; -template -void to_json_(std::ostream& os, const buffer_type& t, - offset_vector index) { +void to_json_(std::ostream& os, const buffer_type& t, offset_vector index) { const auto& shape = t.layout().shape().as_smooth(); auto rank = index.size(); if(rank == t.rank()) { - os << t.get_elem(index); + os << t.get_elem(index).to_string(); return; } else { auto n_elements = shape.extent(rank); @@ -48,8 +45,8 @@ void to_json_(std::ostream& os, const buffer_type& t, std::ostream& to_json(std::ostream& os, const Tensor& t) { offset_vector i; - const auto& buffer = buffer::to_eigen_buffer(t.buffer()); - to_json_(os, buffer, i); + auto buffer_down = buffer::make_contiguous(t.buffer()); + to_json_(os, buffer_down, i); return os; } diff --git a/tests/cxx/unit_tests/tensorwrapper/allocator/contiguous.cpp b/tests/cxx/unit_tests/tensorwrapper/allocator/contiguous.cpp deleted file mode 100644 index 7ba1ecf9..00000000 --- a/tests/cxx/unit_tests/tensorwrapper/allocator/contiguous.cpp +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "../testing/testing.hpp" -#include - -using namespace tensorwrapper; - -TEMPLATE_LIST_TEST_CASE("allocator::Contiguous", "", - types::floating_point_types) { - using allocator_type = allocator::Contiguous; - using layout_type = typename allocator_type::layout_type; - - auto alloc = testing::make_allocator(); - - auto scalar_corr = testing::eigen_scalar(); - auto vector_corr = testing::eigen_vector(); - auto matrix_corr = testing::eigen_matrix(); - - SECTION("allocate(layout)") { - auto pscalar = alloc.allocate(scalar_corr->layout()); - pscalar->set_data(0, 42.0); - REQUIRE(pscalar->are_equal(*scalar_corr)); - } - - SECTION("allocate(layout*)") { - auto pvector = alloc.allocate(vector_corr->layout()); - pvector->set_data(0, 0.0); - pvector->set_data(1, 1.0); - pvector->set_data(2, 2.0); - pvector->set_data(3, 3.0); - pvector->set_data(4, 4.0); - - REQUIRE(pvector->are_equal(*vector_corr)); - } - - SECTION("contruct(scalar)") { - auto pscalar = alloc.construct(42.0); - REQUIRE(pscalar->are_equal(*scalar_corr)); - } - - SECTION("construct(vector)") { - auto pvector = alloc.construct({0.0, 1.0, 2.0, 3.0, 4.0}); - REQUIRE(pvector->are_equal(*vector_corr)); - } - - SECTION("construct(matrix)") { - typename allocator_type::rank2_il il{{1.0, 2.0}, {3.0, 4.0}}; - auto pmatrix = alloc.construct(il); - REQUIRE(pmatrix->are_equal(*matrix_corr)); - } - - SECTION("construct(tensor3)") { - typename allocator_type::rank3_il il{{{1.0, 2.0}, {3.0, 4.0}}, - {{5.0, 6.0}, {7.0, 8.0}}}; - auto ptensor3 = alloc.construct(il); - REQUIRE(ptensor3->are_equal(*testing::eigen_tensor3())); - } - - SECTION("construct(tensor4)") { - typename allocator_type::rank4_il il{ - {{{1.0, 2.0}, {3.0, 4.0}}, {{5.0, 6.0}, {7.0, 8.0}}}, - {{{9.0, 10.0}, {11.0, 12.0}}, {{13.0, 14.0}, {15.0, 16.0}}}}; - auto ptensor4 = alloc.construct(il); - REQUIRE(ptensor4->are_equal(*testing::eigen_tensor4())); - } - - SECTION("construct(layout, value)") { - auto pmatrix = alloc.construct(matrix_corr->layout(), 0.0); - matrix_corr->set_elem({0, 0}, 0.0); - matrix_corr->set_elem({0, 1}, 0.0); - matrix_corr->set_elem({1, 0}, 0.0); - matrix_corr->set_elem({1, 1}, 0.0); - - REQUIRE(pmatrix->are_equal(*matrix_corr)); - } - - SECTION("construct(layout*, value)") { - auto pmatrix = alloc.construct( - matrix_corr->layout().template clone_as(), 0.0); - matrix_corr->set_elem({0, 0}, 0.0); - matrix_corr->set_elem({0, 1}, 0.0); - matrix_corr->set_elem({1, 0}, 0.0); - matrix_corr->set_elem({1, 1}, 0.0); - - REQUIRE(pmatrix->are_equal(*matrix_corr)); - } -} diff --git a/tests/cxx/unit_tests/tensorwrapper/allocator/eigen.cpp b/tests/cxx/unit_tests/tensorwrapper/allocator/eigen.cpp deleted file mode 100644 index 5e50001f..00000000 --- a/tests/cxx/unit_tests/tensorwrapper/allocator/eigen.cpp +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright 2024 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "../testing/testing.hpp" -#include -#include -#include -#include - -using namespace tensorwrapper; - -using types2test = types::floating_point_types; - -TEMPLATE_LIST_TEST_CASE("EigenAllocator", "", types2test) { - using alloc_type = allocator::Eigen; - - parallelzone::runtime::RuntimeView rv; - auto scalar_layout = testing::scalar_physical(); - auto vector_layout = testing::vector_physical(2); - auto matrix_layout = testing::matrix_physical(2, 2); - using layout_type = decltype(scalar_layout); - - auto pscalar_corr = testing::eigen_scalar(); - auto& scalar_corr = *pscalar_corr; - scalar_corr.set_elem({}, 0.0); - - auto pvector_corr = testing::eigen_vector(2); - auto& vector_corr = *pvector_corr; - vector_corr.set_elem({0}, 1); - vector_corr.set_elem({1}, 1); - - auto pmatrix_corr = testing::eigen_matrix(2, 2); - auto& matrix_corr = *pmatrix_corr; - matrix_corr.set_elem({0, 0}, 2); - matrix_corr.set_elem({0, 1}, 2); - matrix_corr.set_elem({1, 0}, 2); - matrix_corr.set_elem({1, 1}, 2); - - alloc_type alloc(rv); - - SECTION("Ctor") { - SECTION("runtime") { REQUIRE(alloc.runtime() == rv); } - testing::test_copy_and_move_ctors(alloc); - } - - SECTION("allocate(Layout)") { - // N.b. allocate doesn't initialize tensor, so only compare layouts - auto pscalar = alloc.allocate(scalar_layout); - REQUIRE(pscalar->layout().are_equal(scalar_layout)); - - auto pvector = alloc.allocate(vector_layout); - REQUIRE(pvector->layout().are_equal(vector_layout)); - - auto pmatrix = alloc.allocate(matrix_layout); - REQUIRE(pmatrix->layout().are_equal(matrix_layout)); - - // Works if ranks don't match - pvector = alloc.allocate(vector_layout); - REQUIRE(pvector->layout().are_equal(vector_layout)); - } - - SECTION("allocate(std::unique_ptr)") { - // N.b. allocate doesn't initialize tensor, so only compare layouts - auto pscalar_layout = std::make_unique(scalar_layout); - auto pscalar = alloc.allocate(std::move(pscalar_layout)); - REQUIRE(pscalar->layout().are_equal(scalar_layout)); - - auto pvector_layout = std::make_unique(vector_layout); - auto pvector = alloc.allocate(std::move(pvector_layout)); - REQUIRE(pvector->layout().are_equal(vector_layout)); - - auto pmatrix_layout = std::make_unique(matrix_layout); - auto pmatrix = alloc.allocate(std::move(pmatrix_layout)); - REQUIRE(pmatrix->layout().are_equal(matrix_layout)); - } - - SECTION("construct(value)") { - auto pscalar = alloc.construct(scalar_layout, 0); - REQUIRE(*pscalar == scalar_corr); - - auto pvector = alloc.construct(vector_layout, 1); - REQUIRE(*pvector == vector_corr); - - auto pmatrix_layout = std::make_unique(matrix_layout); - auto pmatrix = alloc.construct(std::move(pmatrix_layout), 2); - REQUIRE(*pmatrix == matrix_corr); - } - - SECTION("can_rebind") { REQUIRE(alloc.can_rebind(scalar_corr)); } - - SECTION("rebind(non-const)") { - using type = typename alloc_type::buffer_base_reference; - type scalar_base = scalar_corr; - auto& eigen_buffer = alloc.rebind(scalar_base); - REQUIRE(&eigen_buffer == &scalar_corr); - } - - SECTION("rebind(const)") { - using type = typename alloc_type::const_buffer_base_reference; - type scalar_base = scalar_corr; - auto& eigen_buffer = alloc.rebind(scalar_base); - REQUIRE(&eigen_buffer == &scalar_corr); - } - - SECTION("operator==") { REQUIRE(alloc == alloc_type(rv)); } - - SECTION("virtual_methods") { - SECTION("clone") { - auto pscalar = alloc.clone(); - REQUIRE(pscalar->are_equal(alloc)); - } - - SECTION("are_equal") { REQUIRE(alloc.are_equal(alloc_type(rv))); } - } -} diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/buffer_base.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/buffer_base.cpp index 3ef4784c..6de12348 100644 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/buffer_base.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/buffer_base.cpp @@ -15,7 +15,7 @@ */ #include "../testing/testing.hpp" -#include +#include #include #include @@ -47,7 +47,7 @@ TEST_CASE("BufferBase") { auto scalar_layout = testing::scalar_physical(); auto vector_layout = testing::vector_physical(2); - buffer::Eigen defaulted; + buffer::Contiguous defaulted; BufferBase& defaulted_base = defaulted; BufferBase& scalar_base = scalar; BufferBase& vector_base = vector; @@ -58,26 +58,15 @@ TEST_CASE("BufferBase") { REQUIRE(vector_base.has_layout()); } - SECTION("has_allocator") { REQUIRE_FALSE(defaulted_base.has_allocator()); } - SECTION("layout") { REQUIRE_THROWS_AS(defaulted_base.layout(), std::runtime_error); REQUIRE(scalar_base.layout().are_equal(scalar_layout)); REQUIRE(vector_base.layout().are_equal(vector_layout)); } - SECTION("allocator()") { - REQUIRE_THROWS_AS(defaulted_base.allocator(), std::runtime_error); - } - - SECTION("allocator() const") { - REQUIRE_THROWS_AS(std::as_const(defaulted_base).allocator(), - std::runtime_error); - } - SECTION("operator==") { // Defaulted layout == defaulted layout - REQUIRE(defaulted_base == buffer::Eigen{}); + REQUIRE(defaulted_base == buffer::Contiguous{}); // Defaulted layout != non-defaulted layout REQUIRE_FALSE(defaulted_base == scalar_base); @@ -89,6 +78,6 @@ TEST_CASE("BufferBase") { SECTION("operator!=") { // Just spot check because it negates operator==, which was tested REQUIRE(defaulted_base != scalar_base); - REQUIRE_FALSE(defaulted_base != buffer::Eigen()); + REQUIRE_FALSE(defaulted_base != buffer::Contiguous()); } } diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp index ddcd4f4e..d31553f9 100644 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/contiguous.cpp @@ -1,5 +1,5 @@ /* - * Copyright 2024 NWChemEx-Project + * Copyright 2025 NWChemEx-Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,120 +15,520 @@ */ #include "../testing/testing.hpp" -#include -#include -#include +#include +#include using namespace tensorwrapper; -using namespace buffer; - -/* Testing strategy: - * - * - Contiguous is an abstract class. To test it we must create an instance of - * a derived class. We then will upcast to Contiguous and perform checks - * through the BufferBase interface. +/* Testing notes: * + * The various operations (addition_assignment, etc.) are not exhaustively + * tested here. These operations are implemented via visitors that dispatch to + * various backends. The visitors themselves are tested in their own unit tests. + * Here we assume the visitors work and spot check a couple of operations for + * to help catch any integration issues. */ -TEMPLATE_LIST_TEST_CASE("buffer::Contiguous", "", types::floating_point_types) { - using base_type = Contiguous; - auto pt0 = testing::eigen_scalar(); - auto pt1 = testing::eigen_vector(); - auto& t0 = *pt0; - auto& t1 = *pt1; +TEMPLATE_LIST_TEST_CASE("Contiguous", "", types::floating_point_types) { + using buffer::Contiguous; + using buffer_type = Contiguous::buffer_type; + using shape_type = typename Contiguous::shape_type; + using label_type = typename Contiguous::label_type; + + TestType one(1.0), two(2.0), three(3.0), four(4.0); + std::vector data = {one, two, three, four}; + + shape_type scalar_shape({}); + shape_type vector_shape({4}); + shape_type matrix_shape({2, 2}); + + Contiguous defaulted; + Contiguous scalar(std::vector{one}, scalar_shape); + Contiguous vector(data, vector_shape); + Contiguous matrix(data, matrix_shape); + + SECTION("Ctors and assignment") { + SECTION("Default ctor") { + REQUIRE(defaulted.size() == 0); + REQUIRE(defaulted.shape() == shape_type()); + } + + SECTION("vector ctor") { + REQUIRE(scalar.size() == 1); + REQUIRE(scalar.shape() == scalar_shape); + REQUIRE(scalar.get_elem({}) == one); + + REQUIRE(vector.size() == 4); + REQUIRE(vector.shape() == vector_shape); + REQUIRE(vector.get_elem({0}) == one); + REQUIRE(vector.get_elem({1}) == two); + REQUIRE(vector.get_elem({2}) == three); + REQUIRE(vector.get_elem({3}) == four); + + REQUIRE(matrix.size() == 4); + REQUIRE(matrix.shape() == matrix_shape); + REQUIRE(matrix.get_elem({0, 0}) == one); + REQUIRE(matrix.get_elem({0, 1}) == two); + REQUIRE(matrix.get_elem({1, 0}) == three); + REQUIRE(matrix.get_elem({1, 1}) == four); + + REQUIRE_THROWS_AS(Contiguous(data, scalar_shape), + std::invalid_argument); + } + + SECTION("FloatBuffer ctor") { + buffer_type buf(data); + + Contiguous vector_buf(buf, vector_shape); + REQUIRE(vector_buf == vector); + + Contiguous matrix_buf(buf, matrix_shape); + REQUIRE(matrix_buf == matrix); + + REQUIRE_THROWS_AS(Contiguous(buf, scalar_shape), + std::invalid_argument); + } + + SECTION("Copy ctor") { + Contiguous defaulted_copy(defaulted); + REQUIRE(defaulted_copy == defaulted); + + Contiguous scalar_copy(scalar); + REQUIRE(scalar_copy == scalar); - auto& base0 = static_cast(t0); - auto& base1 = static_cast(t1); + Contiguous vector_copy(vector); + REQUIRE(vector_copy == vector); + + Contiguous matrix_copy(matrix); + REQUIRE(matrix_copy == matrix); + } + + SECTION("Move ctor") { + Contiguous defaulted_temp(defaulted); + Contiguous defaulted_move(std::move(defaulted_temp)); + REQUIRE(defaulted_move == defaulted); + + Contiguous scalar_temp(scalar); + Contiguous scalar_move(std::move(scalar_temp)); + REQUIRE(scalar_move == scalar); + + Contiguous vector_temp(vector); + Contiguous vector_move(std::move(vector_temp)); + REQUIRE(vector_move == vector); + + Contiguous matrix_temp(matrix); + Contiguous matrix_move(std::move(matrix_temp)); + REQUIRE(matrix_move == matrix); + } + + SECTION("Copy assignment") { + Contiguous defaulted_copy; + auto pdefaulted_copy = &(defaulted_copy = defaulted); + REQUIRE(defaulted_copy == defaulted); + REQUIRE(pdefaulted_copy == &defaulted_copy); + + Contiguous scalar_copy; + auto pscalar_copy = &(scalar_copy = scalar); + REQUIRE(scalar_copy == scalar); + REQUIRE(pscalar_copy == &scalar_copy); + + Contiguous vector_copy; + auto pvector_copy = &(vector_copy = vector); + REQUIRE(vector_copy == vector); + REQUIRE(pvector_copy == &vector_copy); + + Contiguous matrix_copy; + auto pmatrix_copy = &(matrix_copy = matrix); + REQUIRE(matrix_copy == matrix); + REQUIRE(pmatrix_copy == &matrix_copy); + } + + SECTION("Move assignment") { + Contiguous defaulted_temp(defaulted); + Contiguous defaulted_move; + auto pdefaulted_move = + &(defaulted_move = std::move(defaulted_temp)); + REQUIRE(defaulted_move == defaulted); + REQUIRE(pdefaulted_move == &defaulted_move); + + Contiguous scalar_temp(scalar); + Contiguous scalar_move; + auto pscalar_move = &(scalar_move = std::move(scalar_temp)); + REQUIRE(scalar_move == scalar); + REQUIRE(pscalar_move == &scalar_move); + + Contiguous vector_temp(vector); + Contiguous vector_move; + auto pvector_move = &(vector_move = std::move(vector_temp)); + REQUIRE(vector_move == vector); + REQUIRE(pvector_move == &vector_move); + + Contiguous matrix_temp(matrix); + Contiguous matrix_move; + auto pmatrix_move = &(matrix_move = std::move(matrix_temp)); + REQUIRE(matrix_move == matrix); + REQUIRE(pmatrix_move == &matrix_move); + } + } + + SECTION("shape") { + REQUIRE(defaulted.shape() == shape_type()); + REQUIRE(scalar.shape() == scalar_shape); + REQUIRE(vector.shape() == vector_shape); + REQUIRE(matrix.shape() == matrix_shape); + } SECTION("size") { - REQUIRE(base0.size() == 1); - REQUIRE(base1.size() == 5); + REQUIRE(defaulted.size() == 0); + REQUIRE(scalar.size() == 1); + REQUIRE(vector.size() == 4); + REQUIRE(matrix.size() == 4); } - SECTION("get_mutable_data()") { - REQUIRE(*base0.get_mutable_data() == TestType(42.0)); + SECTION("get_elem") { + REQUIRE_THROWS_AS(defaulted.get_elem({}), std::out_of_range); - REQUIRE(*(base1.get_mutable_data() + 0) == TestType(0.0)); - REQUIRE(*(base1.get_mutable_data() + 1) == TestType(1.0)); - REQUIRE(*(base1.get_mutable_data() + 2) == TestType(2.0)); - REQUIRE(*(base1.get_mutable_data() + 3) == TestType(3.0)); - REQUIRE(*(base1.get_mutable_data() + 4) == TestType(4.0)); + REQUIRE(scalar.get_elem({}) == one); + REQUIRE_THROWS_AS(scalar.get_elem({0}), std::out_of_range); + + REQUIRE(vector.get_elem({0}) == one); + REQUIRE(vector.get_elem({1}) == two); + REQUIRE(vector.get_elem({2}) == three); + REQUIRE(vector.get_elem({3}) == four); + REQUIRE_THROWS_AS(vector.get_elem({4}), std::out_of_range); + + REQUIRE(matrix.get_elem({0, 0}) == one); + REQUIRE(matrix.get_elem({0, 1}) == two); + REQUIRE(matrix.get_elem({1, 0}) == three); + REQUIRE(matrix.get_elem({1, 1}) == four); + REQUIRE_THROWS_AS(matrix.get_elem({2, 0}), std::out_of_range); + } + + SECTION("set_elem") { + REQUIRE_THROWS_AS(defaulted.set_elem({}, one), std::out_of_range); + + REQUIRE(scalar.get_elem({}) != two); + scalar.set_elem({}, two); + REQUIRE(scalar.get_elem({}) == two); + + REQUIRE(vector.get_elem({2}) != four); + vector.set_elem({2}, four); + REQUIRE(vector.get_elem({2}) == four); + + REQUIRE(matrix.get_elem({1, 0}) != one); + matrix.set_elem({1, 0}, one); + REQUIRE(matrix.get_elem({1, 0}) == one); + } + + SECTION("infinity_norm") { + REQUIRE_THROWS_AS(defaulted.infinity_norm(), std::runtime_error); + REQUIRE(scalar.infinity_norm() == one); + REQUIRE(vector.infinity_norm() == four); + REQUIRE(matrix.infinity_norm() == four); + } + + SECTION("operator==") { + // Same object + REQUIRE(defaulted == defaulted); + + Contiguous scalar_copy(std::vector{one}, scalar_shape); + REQUIRE(scalar == scalar_copy); + + Contiguous vector_copy(data, vector_shape); + REQUIRE(vector == vector_copy); + + Contiguous matrix_copy(data, matrix_shape); + REQUIRE(matrix == matrix_copy); + + // Different ranks + REQUIRE_FALSE(scalar == vector); + REQUIRE_FALSE(vector == matrix); + REQUIRE_FALSE(scalar == matrix); + + // Different shapes + shape_type matrix_shape2({4, 1}); + REQUIRE_FALSE(scalar == Contiguous(data, matrix_shape2)); + + // Different values + std::vector diff_data = {two, three, four, one}; + Contiguous scalar_diff(std::vector{two}, scalar_shape); + REQUIRE_FALSE(scalar == scalar_diff); + REQUIRE_FALSE(vector == Contiguous(diff_data, vector_shape)); + REQUIRE_FALSE(matrix == Contiguous(diff_data, matrix_shape)); + } + + SECTION("approximately_equal") { + Contiguous scalar2(std::vector{one}, scalar_shape); + Contiguous vector2(data, vector_shape); + Contiguous matrix2(data, matrix_shape); + double default_tol = 1e-16; + SECTION("different ranks") { + REQUIRE_FALSE(scalar.approximately_equal(vector, default_tol)); + REQUIRE_FALSE(scalar.approximately_equal(matrix, default_tol)); + REQUIRE_FALSE(vector.approximately_equal(scalar, default_tol)); + REQUIRE_FALSE(vector.approximately_equal(matrix, default_tol)); + REQUIRE_FALSE(matrix.approximately_equal(scalar, default_tol)); + REQUIRE_FALSE(matrix.approximately_equal(vector, default_tol)); + } + + SECTION("Same values") { + REQUIRE(scalar.approximately_equal(scalar2, default_tol)); + REQUIRE(scalar2.approximately_equal(scalar, default_tol)); + REQUIRE(vector.approximately_equal(vector2, default_tol)); + REQUIRE(vector2.approximately_equal(vector, default_tol)); + REQUIRE(matrix.approximately_equal(matrix2, default_tol)); + REQUIRE(matrix2.approximately_equal(matrix, default_tol)); + } + + SECTION("Differ by more than provided tolerance") { + TestType diff = 1e-1; + scalar2.set_elem({}, one + diff); + vector2.set_elem({0}, one + diff); + matrix2.set_elem({0, 0}, one + diff); + double tol = 1e-1; + REQUIRE_FALSE(scalar.approximately_equal(scalar2, tol)); + REQUIRE_FALSE(scalar2.approximately_equal(scalar, tol)); + REQUIRE_FALSE(vector.approximately_equal(vector2, tol)); + REQUIRE_FALSE(vector2.approximately_equal(vector, tol)); + REQUIRE_FALSE(matrix.approximately_equal(matrix2, tol)); + REQUIRE_FALSE(matrix2.approximately_equal(matrix, tol)); + } + + SECTION("Differ by less than provided tolerance") { + TestType diff = 1e-10; + double tol = 1e-1; + scalar2.set_elem({}, one + diff); + vector2.set_elem({0}, one + diff); + matrix2.set_elem({0, 0}, one + diff); + REQUIRE(scalar.approximately_equal(scalar2, tol)); + REQUIRE(scalar2.approximately_equal(scalar, tol)); + REQUIRE(vector.approximately_equal(vector2, tol)); + REQUIRE(vector2.approximately_equal(vector, tol)); + REQUIRE(matrix.approximately_equal(matrix2, tol)); + REQUIRE(matrix2.approximately_equal(matrix, tol)); + } } - SECTION("get_immutable_data() const") { - REQUIRE(*std::as_const(base0).get_immutable_data() == TestType(42.0)); - - REQUIRE(*(std::as_const(base1).get_immutable_data() + 0) == - TestType(0.0)); - REQUIRE(*(std::as_const(base1).get_immutable_data() + 1) == - TestType(1.0)); - REQUIRE(*(std::as_const(base1).get_immutable_data() + 2) == - TestType(2.0)); - REQUIRE(*(std::as_const(base1).get_immutable_data() + 3) == - TestType(3.0)); - REQUIRE(*(std::as_const(base1).get_immutable_data() + 4) == - TestType(4.0)); + SECTION("addition_assignment_") { + SECTION("scalar") { + label_type labels(""); + Contiguous result; + result.addition_assignment(labels, scalar(labels), scalar(labels)); + REQUIRE(result.shape() == scalar_shape); + REQUIRE(result.get_elem({}) == TestType(2.0)); + } + + SECTION("vector") { + label_type labels("i"); + Contiguous result; + result.addition_assignment(labels, vector(labels), vector(labels)); + REQUIRE(result.shape() == vector_shape); + REQUIRE(result.get_elem({0}) == TestType(2.0)); + REQUIRE(result.get_elem({1}) == TestType(4.0)); + REQUIRE(result.get_elem({2}) == TestType(6.0)); + REQUIRE(result.get_elem({3}) == TestType(8.0)); + } + + SECTION("matrix") { + label_type labels("i,j"); + Contiguous result; + result.addition_assignment(labels, matrix(labels), matrix(labels)); + REQUIRE(result.shape() == matrix_shape); + REQUIRE(result.get_elem({0, 0}) == TestType(2.0)); + REQUIRE(result.get_elem({0, 1}) == TestType(4.0)); + REQUIRE(result.get_elem({1, 0}) == TestType(6.0)); + REQUIRE(result.get_elem({1, 1}) == TestType(8.0)); + } } - SECTION("get_elem() const") { - REQUIRE(base0.get_elem({}) == TestType(42.0)); + SECTION("subtraction_assignment_") { + SECTION("scalar") { + label_type labels(""); + Contiguous result; + result.subtraction_assignment(labels, scalar(labels), + scalar(labels)); + REQUIRE(result.shape() == scalar_shape); + REQUIRE(result.get_elem({}) == TestType(0.0)); + } - REQUIRE(base1.get_elem({0}) == TestType(0.0)); - REQUIRE(base1.get_elem({1}) == TestType(1.0)); - REQUIRE(base1.get_elem({2}) == TestType(2.0)); - REQUIRE(base1.get_elem({3}) == TestType(3.0)); - REQUIRE(base1.get_elem({4}) == TestType(4.0)); + SECTION("vector") { + label_type labels("i"); + Contiguous result; + result.subtraction_assignment(labels, vector(labels), + vector(labels)); + REQUIRE(result.shape() == vector_shape); + REQUIRE(result.get_elem({0}) == TestType(0.0)); + REQUIRE(result.get_elem({1}) == TestType(0.0)); + REQUIRE(result.get_elem({2}) == TestType(0.0)); + REQUIRE(result.get_elem({3}) == TestType(0.0)); + } - REQUIRE_THROWS_AS(base0.get_elem({0}), std::runtime_error); + SECTION("matrix") { + label_type labels("i,j"); + Contiguous result; + result.subtraction_assignment(labels, matrix(labels), + matrix(labels)); + REQUIRE(result.shape() == matrix_shape); + REQUIRE(result.get_elem({0, 0}) == TestType(0.0)); + REQUIRE(result.get_elem({0, 1}) == TestType(0.0)); + REQUIRE(result.get_elem({1, 0}) == TestType(0.0)); + REQUIRE(result.get_elem({1, 1}) == TestType(0.0)); + } } - SECTION("set_elem() const") { - base0.set_elem({}, TestType(43.0)); - REQUIRE(base0.get_elem({}) == TestType(43.0)); + SECTION("multiplication_assignment_") { + // N.b., dispatching among hadamard, contraction, etc. is the visitor's + // responsibility and happens there. Here we just test hadamard. - base1.set_elem({0}, TestType(43.0)); - REQUIRE(base1.get_elem({0}) == TestType(43.0)); + SECTION("scalar") { + label_type labels(""); + Contiguous result; + result.multiplication_assignment(labels, scalar(labels), + scalar(labels)); + REQUIRE(result.shape() == scalar_shape); + REQUIRE(result.get_elem({}) == TestType(1.0)); + } - REQUIRE_THROWS_AS(base0.set_elem({0}, TestType{0.0}), - std::runtime_error); + SECTION("vector") { + label_type labels("i"); + Contiguous result; + result.multiplication_assignment(labels, vector(labels), + vector(labels)); + REQUIRE(result.shape() == vector_shape); + REQUIRE(result.get_elem({0}) == TestType(1.0)); + REQUIRE(result.get_elem({1}) == TestType(4.0)); + REQUIRE(result.get_elem({2}) == TestType(9.0)); + REQUIRE(result.get_elem({3}) == TestType(16.0)); + } + + SECTION("matrix") { + label_type labels("i,j"); + Contiguous result; + result.multiplication_assignment(labels, matrix(labels), + matrix(labels)); + REQUIRE(result.shape() == matrix_shape); + REQUIRE(result.get_elem({0, 0}) == TestType(1.0)); + REQUIRE(result.get_elem({0, 1}) == TestType(4.0)); + REQUIRE(result.get_elem({1, 0}) == TestType(9.0)); + REQUIRE(result.get_elem({1, 1}) == TestType(16.0)); + } } - SECTION("get_data() const") { - REQUIRE(base0.get_data(0) == TestType(42.0)); + SECTION("scalar_multiplication_") { + // TODO: Test with other scalar types when public API supports it + using scalar_type = double; + scalar_type scalar_value_{2.0}; + TestType scalar_value(scalar_value_); + SECTION("scalar") { + label_type labels(""); + Contiguous result; + result.scalar_multiplication(labels, scalar_value_, scalar(labels)); + REQUIRE(result.shape() == scalar_shape); + REQUIRE(result.get_elem({}) == TestType(1.0) * scalar_value); + } - REQUIRE(base1.get_data(0) == TestType(0.0)); - REQUIRE(base1.get_data(1) == TestType(1.0)); - REQUIRE(base1.get_data(2) == TestType(2.0)); - REQUIRE(base1.get_data(3) == TestType(3.0)); - REQUIRE(base1.get_data(4) == TestType(4.0)); + SECTION("vector") { + label_type labels("i"); + Contiguous result; + result.scalar_multiplication(labels, scalar_value_, vector(labels)); + REQUIRE(result.shape() == vector_shape); + REQUIRE(result.get_elem({0}) == TestType(1.0) * scalar_value); + REQUIRE(result.get_elem({1}) == TestType(2.0) * scalar_value); + REQUIRE(result.get_elem({2}) == TestType(3.0) * scalar_value); + REQUIRE(result.get_elem({3}) == TestType(4.0) * scalar_value); + } - REQUIRE_THROWS_AS(base0.get_data(1), std::runtime_error); + SECTION("matrix") { + label_type rhs_labels("i,j"); + label_type lhs_labels("j,i"); + Contiguous result; + result.scalar_multiplication(lhs_labels, scalar_value_, + matrix(rhs_labels)); + REQUIRE(result.shape() == matrix_shape); + REQUIRE(result.get_elem({0, 0}) == TestType(1.0) * scalar_value); + REQUIRE(result.get_elem({0, 1}) == TestType(3.0) * scalar_value); + REQUIRE(result.get_elem({1, 0}) == TestType(2.0) * scalar_value); + REQUIRE(result.get_elem({1, 1}) == TestType(4.0) * scalar_value); + } } - SECTION("set_data() const") { - base0.set_data(0, TestType(43.0)); - REQUIRE(base0.get_elem({}) == TestType(43.0)); + SECTION("permute_assignment_") { + SECTION("scalar") { + label_type labels(""); + Contiguous result; + result.permute_assignment(labels, scalar(labels)); + REQUIRE(result.shape() == scalar_shape); + REQUIRE(result.get_elem({}) == TestType(1.0)); + } + + SECTION("vector") { + label_type labels("i"); + Contiguous result; + result.permute_assignment(labels, vector(labels)); + REQUIRE(result.shape() == vector_shape); + REQUIRE(result.get_elem({0}) == TestType(1.0)); + REQUIRE(result.get_elem({1}) == TestType(2.0)); + REQUIRE(result.get_elem({2}) == TestType(3.0)); + REQUIRE(result.get_elem({3}) == TestType(4.0)); + } - REQUIRE_THROWS_AS(base0.set_data(1, TestType{0.0}), std::runtime_error); + SECTION("matrix") { + label_type rhs_labels("i,j"); + label_type lhs_labels("j,i"); + Contiguous result; + result.permute_assignment(lhs_labels, matrix(rhs_labels)); + REQUIRE(result.shape() == matrix_shape); + REQUIRE(result.get_elem({0, 0}) == TestType(1.0)); + REQUIRE(result.get_elem({0, 1}) == TestType(3.0)); + REQUIRE(result.get_elem({1, 0}) == TestType(2.0)); + REQUIRE(result.get_elem({1, 1}) == TestType(4.0)); + } } - SECTION("fill()") { - base1.fill(TestType{43.0}); - REQUIRE(base1.get_data(0) == TestType(43.0)); - REQUIRE(base1.get_data(1) == TestType(43.0)); - REQUIRE(base1.get_data(2) == TestType(43.0)); - REQUIRE(base1.get_data(3) == TestType(43.0)); - REQUIRE(base1.get_data(4) == TestType(43.0)); + SECTION("to_string") { + REQUIRE(defaulted.to_string().empty()); + REQUIRE_FALSE(scalar.to_string().empty()); + REQUIRE_FALSE(vector.to_string().empty()); + REQUIRE_FALSE(matrix.to_string().empty()); } - SECTION("copy()") { - auto data = std::vector(5, TestType(43.0)); - base1.copy(data); - REQUIRE(base1.get_data(0) == TestType(43.0)); - REQUIRE(base1.get_data(1) == TestType(43.0)); - REQUIRE(base1.get_data(2) == TestType(43.0)); - REQUIRE(base1.get_data(3) == TestType(43.0)); - REQUIRE(base1.get_data(4) == TestType(43.0)); + SECTION("add_to_stream") { + std::stringstream ss; + SECTION("defaulted") { + defaulted.add_to_stream(ss); + REQUIRE(ss.str().empty()); + } + SECTION("scalar") { + scalar.add_to_stream(ss); + REQUIRE_FALSE(ss.str().empty()); + } + SECTION("vector") { + vector.add_to_stream(ss); + REQUIRE_FALSE(ss.str().empty()); + } + SECTION("matrix") { + matrix.add_to_stream(ss); + REQUIRE_FALSE(ss.str().empty()); + } } } + +TEST_CASE("make_contiguous(buffer, shape)") { + using buffer::Contiguous; + using tensor_type = Tensor; + using shape_type = shape::Smooth; + + std::vector data = {1.0, 2.0, 3.0, 4.0}; + shape_type shape({2, 2}); + buffer::Contiguous buffer(data, shape); + + tensor_type tensor(shape.clone(), + std::make_unique(buffer)); + + shape_type other({3, 4, 5}); + Contiguous contig = buffer::make_contiguous(tensor.buffer(), other); + + REQUIRE(contig.shape() == other); + REQUIRE(contig.size() == 60); // 3*4*5 = 60 + REQUIRE(contig.get_elem({0, 0, 0}) == 0.0); +} diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/contraction_planner.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/contraction_planner.cpp index 47a26d2f..53299bba 100644 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/contraction_planner.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/contraction_planner.cpp @@ -16,7 +16,6 @@ #include "../testing/testing.hpp" #include -#include using namespace tensorwrapper; using namespace buffer; diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/detail_/eigen_tensor.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/detail_/eigen_tensor.cpp deleted file mode 100644 index e7f565e6..00000000 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/detail_/eigen_tensor.cpp +++ /dev/null @@ -1,789 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "../../testing/testing.hpp" -#include -#include - -using namespace tensorwrapper; -using namespace testing; - -using buffer::detail_::hash_utilities::hash_input; - -template -using pimpl_type = buffer::detail_::EigenTensor; -using shape_type = shape::Smooth; - -// Should be the same regardless of template parameters -using label_type = typename pimpl_type::label_type; -using hash_type = typename pimpl_type::hash_type; - -TEMPLATE_LIST_TEST_CASE("EigenTensor", "", types::floating_point_types) { - pimpl_type scalar(shape_type{}); - scalar.set_elem({}, 1.0); - - pimpl_type vector(shape_type{2}); - vector.set_elem({0}, 1.0); - vector.set_elem({1}, 2.0); - - pimpl_type matrix(shape_type{2, 2}); - matrix.set_elem({0, 0}, 1.0); - matrix.set_elem({0, 1}, 2.0); - matrix.set_elem({1, 0}, 3.0); - matrix.set_elem({1, 1}, 4.0); - - pimpl_type tensor(shape_type{2, 2, 2}); - tensor.set_elem({0, 0, 0}, 1.0); - tensor.set_elem({0, 0, 1}, 2.0); - tensor.set_elem({0, 1, 0}, 3.0); - tensor.set_elem({0, 1, 1}, 4.0); - tensor.set_elem({1, 0, 0}, 5.0); - tensor.set_elem({1, 0, 1}, 6.0); - tensor.set_elem({1, 1, 0}, 7.0); - tensor.set_elem({1, 1, 1}, 8.0); - - // ------------------------------------------------------------------------- - // -- Public methods - // ------------------------------------------------------------------------- - - SECTION("operator==") { - SECTION("Same State") { - pimpl_type scalar2(scalar); - REQUIRE(scalar2 == scalar); - } - - SECTION("Different Value") { - pimpl_type scalar2(scalar); - scalar2.set_elem({}, 42.0); - REQUIRE_FALSE(scalar2 == scalar); - // Ensure hash is recalculated after change - scalar2.set_elem({}, 1.0); - REQUIRE(scalar2 == scalar); - } - - SECTION("Different Extents") { - pimpl_type vector2(shape_type{1}); - vector.set_elem({0}, 1.0); - REQUIRE_FALSE(vector2 == vector); - } - - if constexpr(types::is_uncertain_v) { - SECTION("Check Error Sources Match") { - pimpl_type uscalar(shape_type{}); - uscalar.set_elem({}, TestType(1.0, 0.0)); - pimpl_type uscalar2(uscalar); - REQUIRE(uscalar2 == uscalar); - } - } - } - - SECTION("get_hash") { - SECTION("scalar") { - hash_type scalar_hash = scalar.get_hash(); - - hash_type corr{std::as_const(scalar).rank()}; - hash_input(corr, std::as_const(scalar).get_elem({})); - REQUIRE(scalar_hash == corr); - } - SECTION("vector") { - hash_type vector_hash = vector.get_hash(); - - using buffer::detail_::hash_utilities::hash_input; - hash_type corr{std::as_const(vector).rank()}; - hash_input(corr, std::as_const(vector).extent(0)); - hash_input(corr, std::as_const(vector).get_elem({0})); - hash_input(corr, std::as_const(vector).get_elem({1})); - REQUIRE(vector_hash == corr); - } - } - - // ------------------------------------------------------------------------- - // -- Protected methods - // ------------------------------------------------------------------------- - - SECTION("clone_") { - REQUIRE(scalar.clone()->are_equal(scalar)); - REQUIRE(vector.clone()->are_equal(vector)); - REQUIRE(matrix.clone()->are_equal(matrix)); - REQUIRE(tensor.clone()->are_equal(tensor)); - } - - SECTION("rank_") { - REQUIRE(scalar.rank() == 0); - REQUIRE(vector.rank() == 1); - REQUIRE(matrix.rank() == 2); - REQUIRE(tensor.rank() == 3); - - pimpl_type defaulted; - REQUIRE(defaulted.rank() == 6); - } - - SECTION("size_") { - REQUIRE(scalar.size() == 1); - REQUIRE(vector.size() == 2); - REQUIRE(matrix.size() == 4); - REQUIRE(tensor.size() == 8); - } - - SECTION("extent_") { - REQUIRE(vector.extent(0) == 2); - - REQUIRE(matrix.extent(0) == 2); - REQUIRE(matrix.extent(1) == 2); - - REQUIRE(tensor.extent(0) == 2); - REQUIRE(tensor.extent(1) == 2); - REQUIRE(tensor.extent(2) == 2); - } - - SECTION("get_mutable_data_()") { - SECTION("accessing") { - REQUIRE(*scalar.get_mutable_data() == TestType{1.0}); - - REQUIRE(*vector.get_mutable_data() == TestType{1.0}); - REQUIRE(*(vector.get_mutable_data() + 1) == TestType{2.0}); - - REQUIRE(*matrix.get_mutable_data() == TestType{1.0}); - REQUIRE(*(matrix.get_mutable_data() + 1) == TestType{2.0}); - REQUIRE(*(matrix.get_mutable_data() + 2) == TestType{3.0}); - REQUIRE(*(matrix.get_mutable_data() + 3) == TestType{4.0}); - - REQUIRE(*tensor.get_mutable_data() == TestType{1.0}); - REQUIRE(*(tensor.get_mutable_data() + 1) == TestType{2.0}); - REQUIRE(*(tensor.get_mutable_data() + 2) == TestType{3.0}); - REQUIRE(*(tensor.get_mutable_data() + 3) == TestType{4.0}); - REQUIRE(*(tensor.get_mutable_data() + 4) == TestType{5.0}); - REQUIRE(*(tensor.get_mutable_data() + 5) == TestType{6.0}); - REQUIRE(*(tensor.get_mutable_data() + 6) == TestType{7.0}); - REQUIRE(*(tensor.get_mutable_data() + 7) == TestType{8.0}); - } - - SECTION("comparison behavior") { - // Initial state is the same - pimpl_type s(shape_type{}); - s.set_elem({}, 1.0); - REQUIRE(s == scalar); - // Still the same, but the normal hash recalculation flag would be - // reset after this comparison. - auto* pdata = s.get_mutable_data(); - REQUIRE(s == scalar); - // Changes state. The internal hash will have to be recalculated for - // the comparison to be false, ensuring that the hash caching has - // been turned off. - pdata[0] = 2.0; - REQUIRE_FALSE(s == scalar); - } - } - - SECTION("get_immutable_data_() const") { - REQUIRE(*std::as_const(scalar).get_immutable_data() == TestType{1.0}); - - REQUIRE(*std::as_const(vector).get_immutable_data() == TestType{1.0}); - REQUIRE(*(std::as_const(vector).get_immutable_data() + 1) == - TestType{2.0}); - - REQUIRE(*std::as_const(matrix).get_immutable_data() == TestType{1.0}); - REQUIRE(*(std::as_const(matrix).get_immutable_data() + 1) == - TestType{2.0}); - REQUIRE(*(std::as_const(matrix).get_immutable_data() + 2) == - TestType{3.0}); - REQUIRE(*(std::as_const(matrix).get_immutable_data() + 3) == - TestType{4.0}); - - REQUIRE(*std::as_const(tensor).get_immutable_data() == TestType{1.0}); - REQUIRE(*(std::as_const(tensor).get_immutable_data() + 1) == - TestType{2.0}); - REQUIRE(*(std::as_const(tensor).get_immutable_data() + 2) == - TestType{3.0}); - REQUIRE(*(std::as_const(tensor).get_immutable_data() + 3) == - TestType{4.0}); - REQUIRE(*(std::as_const(tensor).get_immutable_data() + 4) == - TestType{5.0}); - REQUIRE(*(std::as_const(tensor).get_immutable_data() + 5) == - TestType{6.0}); - REQUIRE(*(std::as_const(tensor).get_immutable_data() + 6) == - TestType{7.0}); - REQUIRE(*(std::as_const(tensor).get_immutable_data() + 7) == - TestType{8.0}); - } - - SECTION("get_elem_() const") { - REQUIRE(std::as_const(scalar).get_elem({}) == TestType{1.0}); - - REQUIRE(std::as_const(vector).get_elem({0}) == TestType{1.0}); - REQUIRE(std::as_const(vector).get_elem({1}) == TestType{2.0}); - - REQUIRE(std::as_const(matrix).get_elem({0, 0}) == TestType{1.0}); - REQUIRE(std::as_const(matrix).get_elem({0, 1}) == TestType{2.0}); - REQUIRE(std::as_const(matrix).get_elem({1, 0}) == TestType{3.0}); - REQUIRE(std::as_const(matrix).get_elem({1, 1}) == TestType{4.0}); - - REQUIRE(std::as_const(tensor).get_elem({0, 0, 0}) == TestType{1.0}); - REQUIRE(std::as_const(tensor).get_elem({0, 0, 1}) == TestType{2.0}); - REQUIRE(std::as_const(tensor).get_elem({0, 1, 0}) == TestType{3.0}); - REQUIRE(std::as_const(tensor).get_elem({0, 1, 1}) == TestType{4.0}); - REQUIRE(std::as_const(tensor).get_elem({1, 0, 0}) == TestType{5.0}); - REQUIRE(std::as_const(tensor).get_elem({1, 0, 1}) == TestType{6.0}); - REQUIRE(std::as_const(tensor).get_elem({1, 1, 0}) == TestType{7.0}); - REQUIRE(std::as_const(tensor).get_elem({1, 1, 1}) == TestType{8.0}); - } - - SECTION("set_elem_()") { - scalar.set_elem({}, TestType{2.0}); - REQUIRE(scalar.get_elem({}) == TestType{2.0}); - - vector.set_elem({0}, TestType{2.0}); - REQUIRE(vector.get_elem({0}) == TestType{2.0}); - REQUIRE(vector.get_elem({1}) == TestType{2.0}); - } - - SECTION("get_data() const") { - REQUIRE(std::as_const(scalar).get_data(0) == TestType{1.0}); - - REQUIRE(std::as_const(vector).get_data(0) == TestType{1.0}); - REQUIRE(std::as_const(vector).get_data(1) == TestType{2.0}); - - REQUIRE(std::as_const(matrix).get_data(0) == TestType{1.0}); - REQUIRE(std::as_const(matrix).get_data(1) == TestType{2.0}); - REQUIRE(std::as_const(matrix).get_data(2) == TestType{3.0}); - REQUIRE(std::as_const(matrix).get_data(3) == TestType{4.0}); - - REQUIRE(std::as_const(tensor).get_data(0) == TestType{1.0}); - REQUIRE(std::as_const(tensor).get_data(1) == TestType{2.0}); - REQUIRE(std::as_const(tensor).get_data(2) == TestType{3.0}); - REQUIRE(std::as_const(tensor).get_data(3) == TestType{4.0}); - REQUIRE(std::as_const(tensor).get_data(4) == TestType{5.0}); - REQUIRE(std::as_const(tensor).get_data(5) == TestType{6.0}); - REQUIRE(std::as_const(tensor).get_data(6) == TestType{7.0}); - REQUIRE(std::as_const(tensor).get_data(7) == TestType{8.0}); - } - - SECTION("set_data_()") { - scalar.set_data(0, TestType{2.0}); - REQUIRE(scalar.get_data(0) == TestType{2.0}); - - vector.set_data(0, TestType{2.0}); - REQUIRE(vector.get_data(0) == TestType{2.0}); - REQUIRE(vector.get_data(1) == TestType{2.0}); - } - - SECTION("fill_()") { - vector.fill(TestType{42.0}); - REQUIRE(vector.get_data(0) == TestType(42.0)); - REQUIRE(vector.get_data(1) == TestType(42.0)); - } - - SECTION("copy_()") { - auto data = std::vector(2, TestType(42.0)); - vector.copy(data); - REQUIRE(vector.get_data(0) == TestType(42.0)); - REQUIRE(vector.get_data(1) == TestType(42.0)); - } - - SECTION("are_equal_") { - pimpl_type scalar2(scalar); - REQUIRE(scalar2.are_equal(scalar)); - - scalar2.set_elem({}, 42.0); - REQUIRE_FALSE(scalar2.are_equal(scalar)); - } - - SECTION("to_string_") { - std::stringstream sone; - sone << TestType{1.0}; - - std::stringstream stwo; - stwo << TestType{2.0}; - - REQUIRE(scalar.to_string() == sone.str()); - REQUIRE(vector.to_string() == sone.str() + " " + stwo.str()); - } - - SECTION("add_to_stream_") { - std::stringstream ss, ss_corr; - ss << std::fixed << std::setprecision(4); - scalar.add_to_stream(ss); - ss_corr << std::fixed << std::setprecision(4); - ss_corr << TestType{1.0}; - REQUIRE(ss.str() == ss_corr.str()); - REQUIRE_FALSE(ss.str() == scalar.to_string()); - } - - SECTION("addition_assignment_") { - SECTION("scalar") { - pimpl_type output; - label_type s(""); - output.addition_assignment(s, s, s, scalar, scalar); - - pimpl_type corr(shape_type{}); - corr.set_elem({}, 2.0); - REQUIRE(output == corr); - } - - SECTION("tensor : permute none") { - pimpl_type output; - label_type o("i,j,k"); - label_type l("i,j,k"); - label_type r("i,j,k"); - - output.addition_assignment(o, l, r, tensor, tensor); - - pimpl_type corr(shape_type{2, 2, 2}); - corr.set_elem({0, 0, 0}, 2.0); - corr.set_elem({0, 0, 1}, 4.0); - corr.set_elem({0, 1, 0}, 6.0); - corr.set_elem({0, 1, 1}, 8.0); - corr.set_elem({1, 0, 0}, 10.0); - corr.set_elem({1, 0, 1}, 12.0); - corr.set_elem({1, 1, 0}, 14.0); - corr.set_elem({1, 1, 1}, 16.0); - REQUIRE(output == corr); - } - - SECTION("tensor : permute LHS") { - pimpl_type output; - label_type o("k,j,i"); - label_type l("i,j,k"); - label_type r("k,j,i"); - - output.addition_assignment(o, l, r, tensor, tensor); - - pimpl_type corr(shape_type{2, 2, 2}); - corr.set_elem({0, 0, 0}, 2.0); - corr.set_elem({0, 0, 1}, 7.0); - corr.set_elem({0, 1, 0}, 6.0); - corr.set_elem({0, 1, 1}, 11.0); - corr.set_elem({1, 0, 0}, 7.0); - corr.set_elem({1, 0, 1}, 12.0); - corr.set_elem({1, 1, 0}, 11.0); - corr.set_elem({1, 1, 1}, 16.0); - REQUIRE(output == corr); - } - - SECTION("tensor : permute RHS") { - pimpl_type output; - label_type o("k,j,i"); - label_type l("k,j,i"); - label_type r("i,j,k"); - - output.addition_assignment(o, l, r, tensor, tensor); - - pimpl_type corr(shape_type{2, 2, 2}); - corr.set_elem({0, 0, 0}, 2.0); - corr.set_elem({0, 0, 1}, 7.0); - corr.set_elem({0, 1, 0}, 6.0); - corr.set_elem({0, 1, 1}, 11.0); - corr.set_elem({1, 0, 0}, 7.0); - corr.set_elem({1, 0, 1}, 12.0); - corr.set_elem({1, 1, 0}, 11.0); - corr.set_elem({1, 1, 1}, 16.0); - REQUIRE(output == corr); - } - - SECTION("tensor : permute all") { - pimpl_type output; - label_type o("k,j,i"); - label_type l("i,j,k"); - label_type r("j,i,k"); - - output.addition_assignment(o, l, r, tensor, tensor); - - pimpl_type corr(shape_type{2, 2, 2}); - corr.set_elem({0, 0, 0}, 2.0); - corr.set_elem({0, 0, 1}, 8.0); - corr.set_elem({0, 1, 0}, 8.0); - corr.set_elem({0, 1, 1}, 14.0); - corr.set_elem({1, 0, 0}, 4.0); - corr.set_elem({1, 0, 1}, 10.0); - corr.set_elem({1, 1, 0}, 10.0); - corr.set_elem({1, 1, 1}, 16.0); - REQUIRE(output == corr); - } - } - - SECTION("subtraction_assignment_") { - SECTION("scalar") { - pimpl_type output; - label_type s(""); - output.subtraction_assignment(s, s, s, scalar, scalar); - - pimpl_type corr(shape_type{}); - corr.set_elem({}, 0.0); - REQUIRE(output == corr); - } - - SECTION("tensor : permute none") { - pimpl_type output; - label_type o("i,j,k"); - label_type l("i,j,k"); - label_type r("i,j,k"); - - output.subtraction_assignment(o, l, r, tensor, tensor); - - pimpl_type corr(shape_type{2, 2, 2}); - corr.set_elem({0, 0, 0}, 0.0); - corr.set_elem({0, 0, 1}, 0.0); - corr.set_elem({0, 1, 0}, 0.0); - corr.set_elem({0, 1, 1}, 0.0); - corr.set_elem({1, 0, 0}, 0.0); - corr.set_elem({1, 0, 1}, 0.0); - corr.set_elem({1, 1, 0}, 0.0); - corr.set_elem({1, 1, 1}, 0.0); - REQUIRE(output == corr); - } - - SECTION("tensor : permute LHS") { - pimpl_type output; - label_type o("k,j,i"); - label_type l("i,j,k"); - label_type r("k,j,i"); - - output.subtraction_assignment(o, l, r, tensor, tensor); - - pimpl_type corr(shape_type{2, 2, 2}); - corr.set_elem({0, 0, 0}, 0.0); - corr.set_elem({0, 0, 1}, 3.0); - corr.set_elem({0, 1, 0}, 0.0); - corr.set_elem({0, 1, 1}, 3.0); - corr.set_elem({1, 0, 0}, -3.0); - corr.set_elem({1, 0, 1}, 0.0); - corr.set_elem({1, 1, 0}, -3.0); - corr.set_elem({1, 1, 1}, 0.0); - REQUIRE(output == corr); - } - - SECTION("tensor : permute RHS") { - pimpl_type output; - label_type o("k,j,i"); - label_type l("k,j,i"); - label_type r("i,j,k"); - - output.subtraction_assignment(o, l, r, tensor, tensor); - - pimpl_type corr(shape_type{2, 2, 2}); - corr.set_elem({0, 0, 0}, 0.0); - corr.set_elem({0, 0, 1}, -3.0); - corr.set_elem({0, 1, 0}, 0.0); - corr.set_elem({0, 1, 1}, -3.0); - corr.set_elem({1, 0, 0}, 3.0); - corr.set_elem({1, 0, 1}, 0.0); - corr.set_elem({1, 1, 0}, 3.0); - corr.set_elem({1, 1, 1}, 0.0); - REQUIRE(output == corr); - } - - SECTION("tensor : permute all") { - pimpl_type output; - label_type o("k,j,i"); - label_type l("i,j,k"); - label_type r("j,i,k"); - - output.subtraction_assignment(o, l, r, tensor, tensor); - - pimpl_type corr(shape_type{2, 2, 2}); - corr.set_elem({0, 0, 0}, 0.0); - corr.set_elem({0, 0, 1}, 2.0); - corr.set_elem({0, 1, 0}, -2.0); - corr.set_elem({0, 1, 1}, 0.0); - corr.set_elem({1, 0, 0}, 0.0); - corr.set_elem({1, 0, 1}, 2.0); - corr.set_elem({1, 1, 0}, -2.0); - corr.set_elem({1, 1, 1}, 0.0); - REQUIRE(output == corr); - } - } - - SECTION("hadamard_assignment_") { - SECTION("scalar") { - pimpl_type output; - label_type s(""); - output.hadamard_assignment(s, s, s, scalar, scalar); - - pimpl_type corr(shape_type{}); - corr.set_elem({}, 1.0); - REQUIRE(output == corr); - } - - SECTION("tensor : permute none") { - pimpl_type output; - label_type o("i,j,k"); - label_type l("i,j,k"); - label_type r("i,j,k"); - - output.hadamard_assignment(o, l, r, tensor, tensor); - - pimpl_type corr(shape_type{2, 2, 2}); - corr.set_elem({0, 0, 0}, 1.0); - corr.set_elem({0, 0, 1}, 4.0); - corr.set_elem({0, 1, 0}, 9.0); - corr.set_elem({0, 1, 1}, 16.0); - corr.set_elem({1, 0, 0}, 25.0); - corr.set_elem({1, 0, 1}, 36.0); - corr.set_elem({1, 1, 0}, 49.0); - corr.set_elem({1, 1, 1}, 64.0); - REQUIRE(output == corr); - } - - SECTION("tensor : permute LHS") { - pimpl_type output; - label_type o("k,j,i"); - label_type l("i,j,k"); - label_type r("k,j,i"); - - output.hadamard_assignment(o, l, r, tensor, tensor); - - pimpl_type corr(shape_type{2, 2, 2}); - corr.set_elem({0, 0, 0}, 1.0); - corr.set_elem({0, 0, 1}, 10.0); - corr.set_elem({0, 1, 0}, 9.0); - corr.set_elem({0, 1, 1}, 28.0); - corr.set_elem({1, 0, 0}, 10.0); - corr.set_elem({1, 0, 1}, 36.0); - corr.set_elem({1, 1, 0}, 28.0); - corr.set_elem({1, 1, 1}, 64.0); - REQUIRE(output == corr); - } - - SECTION("tensor : permute RHS") { - pimpl_type output; - label_type o("k,j,i"); - label_type l("k,j,i"); - label_type r("i,j,k"); - - output.hadamard_assignment(o, l, r, tensor, tensor); - - pimpl_type corr(shape_type{2, 2, 2}); - corr.set_elem({0, 0, 0}, 1.0); - corr.set_elem({0, 0, 1}, 10.0); - corr.set_elem({0, 1, 0}, 9.0); - corr.set_elem({0, 1, 1}, 28.0); - corr.set_elem({1, 0, 0}, 10.0); - corr.set_elem({1, 0, 1}, 36.0); - corr.set_elem({1, 1, 0}, 28.0); - corr.set_elem({1, 1, 1}, 64.0); - REQUIRE(output == corr); - } - - SECTION("tensor : permute all") { - pimpl_type output; - label_type o("k,j,i"); - label_type l("i,j,k"); - label_type r("j,i,k"); - - output.hadamard_assignment(o, l, r, tensor, tensor); - - pimpl_type corr(shape_type{2, 2, 2}); - corr.set_elem({0, 0, 0}, 1.0); - corr.set_elem({0, 0, 1}, 15.0); - corr.set_elem({0, 1, 0}, 15.0); - corr.set_elem({0, 1, 1}, 49.0); - corr.set_elem({1, 0, 0}, 4.0); - corr.set_elem({1, 0, 1}, 24.0); - corr.set_elem({1, 1, 0}, 24.0); - corr.set_elem({1, 1, 1}, 64.0); - REQUIRE(output == corr); - } - } - - SECTION("contraction_assignment") { - SECTION("ijk,ijk->") { - pimpl_type output; - - label_type o(""); - label_type l("i,j,k"); - label_type r("i,j,k"); - shape_type oshape{}; - output.contraction_assignment(o, l, r, oshape, tensor, tensor); - - pimpl_type corr(oshape); - corr.set_elem({}, 204.0); - REQUIRE(output == corr); - } - - SECTION("ijk,jik->") { - pimpl_type output; - - label_type o(""); - label_type l("i,j,k"); - label_type r("j,i,k"); - shape_type oshape{}; - output.contraction_assignment(o, l, r, oshape, tensor, tensor); - - pimpl_type corr(oshape); - corr.set_elem({}, 196.0); - REQUIRE(output == corr); - } - - SECTION("ijk,jkl->il") { - pimpl_type output; - - label_type o("i,l"); - label_type l("i,j,k"); - label_type r("j,k,l"); - shape_type oshape{2, 2}; - output.contraction_assignment(o, l, r, oshape, tensor, tensor); - - pimpl_type corr(oshape); - corr.set_elem({0, 0}, 50.0); - corr.set_elem({0, 1}, 60.0); - corr.set_elem({1, 0}, 114.0); - corr.set_elem({1, 1}, 140.0); - REQUIRE(output == corr); - } - - SECTION("ijk,jlk->il") { - pimpl_type output; - - label_type o("i,l"); - label_type l("i,j,k"); - label_type r("j,l,k"); - shape_type oshape{2, 2}; - output.contraction_assignment(o, l, r, oshape, tensor, tensor); - - pimpl_type corr(oshape); - corr.set_elem({0, 0}, 44.0); - corr.set_elem({0, 1}, 64.0); - corr.set_elem({1, 0}, 100.0); - corr.set_elem({1, 1}, 152.0); - REQUIRE(output == corr); - } - - SECTION("ijk,jlk->li") { - pimpl_type output; - - label_type o("l,i"); - label_type l("i,j,k"); - label_type r("j,l,k"); - shape_type oshape{2, 2}; - output.contraction_assignment(o, l, r, oshape, tensor, tensor); - - pimpl_type corr(oshape); - corr.set_elem({0, 0}, 44.0); - corr.set_elem({0, 1}, 100.0); - corr.set_elem({1, 0}, 64.0); - corr.set_elem({1, 1}, 152.0); - REQUIRE(output == corr); - } - - SECTION("ijk,ljm->iklm") { - pimpl_type output; - - label_type o("i,k,l,m"); - label_type l("i,j,k"); - label_type r("l,j,m"); - shape_type oshape{2, 2, 2, 2}; - output.contraction_assignment(o, l, r, oshape, tensor, tensor); - - pimpl_type corr(oshape); - corr.set_elem({0, 0, 0, 0}, 10.0); - corr.set_elem({0, 0, 0, 1}, 14.0); - corr.set_elem({0, 0, 1, 0}, 26.0); - corr.set_elem({0, 0, 1, 1}, 30.0); - corr.set_elem({0, 1, 0, 0}, 14.0); - corr.set_elem({0, 1, 0, 1}, 20.0); - corr.set_elem({0, 1, 1, 0}, 38.0); - corr.set_elem({0, 1, 1, 1}, 44.0); - corr.set_elem({1, 0, 0, 0}, 26.0); - corr.set_elem({1, 0, 0, 1}, 38.0); - corr.set_elem({1, 0, 1, 0}, 74.0); - corr.set_elem({1, 0, 1, 1}, 86.0); - corr.set_elem({1, 1, 0, 0}, 30.0); - corr.set_elem({1, 1, 0, 1}, 44.0); - corr.set_elem({1, 1, 1, 0}, 86.0); - corr.set_elem({1, 1, 1, 1}, 100.0); - - REQUIRE(output == corr); - } - - SECTION("ij,jkl->ikl") { - pimpl_type output; - - label_type o("i,k,l"); - label_type l("i,j"); - label_type r("j,k,l"); - shape_type oshape{2, 2, 2}; - output.contraction_assignment(o, l, r, oshape, matrix, tensor); - - pimpl_type corr(oshape); - corr.set_elem({0, 0, 0}, 11.0); - corr.set_elem({0, 0, 1}, 14.0); - corr.set_elem({0, 1, 0}, 17.0); - corr.set_elem({0, 1, 1}, 20.0); - corr.set_elem({1, 0, 0}, 23.0); - corr.set_elem({1, 0, 1}, 30.0); - corr.set_elem({1, 1, 0}, 37.0); - corr.set_elem({1, 1, 1}, 44.0); - - REQUIRE(corr == output); - } - } - - SECTION("permute_assignment") { - pimpl_type output; - - SECTION("matrix : no permute") { - label_type o("i,j"); - label_type i("i,j"); - output.permute_assignment(o, i, matrix); - - REQUIRE(output == matrix); - } - - SECTION("matrix : permute") { - label_type o("i,j"); - label_type i("j,i"); - output.permute_assignment(o, i, matrix); - - pimpl_type corr(shape_type{2, 2}); - corr.set_elem({0, 0}, 1.0); - corr.set_elem({0, 1}, 3.0); - corr.set_elem({1, 0}, 2.0); - corr.set_elem({1, 1}, 4.0); - REQUIRE(output == corr); - } - } - - SECTION("scalar_multiplication") { - pimpl_type output; - - SECTION("matrix : no permute") { - label_type o("i,j"); - label_type i("i,j"); - output.scalar_multiplication(o, i, 2.0, matrix); - - pimpl_type corr(shape_type{2, 2}); - corr.set_elem({0, 0}, 2.0); - corr.set_elem({0, 1}, 4.0); - corr.set_elem({1, 0}, 6.0); - corr.set_elem({1, 1}, 8.0); - - REQUIRE(output == corr); - } - - SECTION("matrix : permute") { - label_type o("i,j"); - label_type i("j,i"); - output.scalar_multiplication(o, i, 2.0, matrix); - - pimpl_type corr(shape_type{2, 2}); - corr.set_elem({0, 0}, 2.0); - corr.set_elem({0, 1}, 6.0); - corr.set_elem({1, 0}, 4.0); - corr.set_elem({1, 1}, 8.0); - REQUIRE(output == corr); - } - } -} diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/detail_/unary_operation_visitor.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/detail_/unary_operation_visitor.cpp index 83d9b675..eb4b8200 100644 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/detail_/unary_operation_visitor.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/buffer/detail_/unary_operation_visitor.cpp @@ -149,3 +149,62 @@ TEMPLATE_LIST_TEST_CASE("ScalarMultiplicationVisitor", "[buffer][detail_]", REQUIRE(this_buffer.at(5) == TestType(6.0) * scalar); } } + +TEMPLATE_LIST_TEST_CASE("ApproximatelyEqualVisitor", "[buffer][detail_]", + types::floating_point_types) { + using VisitorType = buffer::detail_::ApproximatelyEqualVisitor; + using vector_type = std::vector; + using span_type = std::span; + using cspan_type = std::span; + double default_tol = 1e-16; + + vector_type scalar_diff{0.000001}; + vector_type scalar_same{0.0}; + vector_type vector_diff{0.000001, -0.000001}; + vector_type vector_same{0.0, 0.0}; + + span_type scalar_diff_span(scalar_diff.data(), scalar_diff.size()); + cspan_type cscalar_diff_span(scalar_diff.data(), scalar_diff.size()); + span_type scalar_same_span(scalar_same.data(), scalar_same.size()); + cspan_type cscalar_same_span(scalar_same.data(), scalar_same.size()); + span_type vector_diff_span(vector_diff.data(), vector_diff.size()); + cspan_type cvector_diff_span(vector_diff.data(), vector_diff.size()); + span_type vector_same_span(vector_same.data(), vector_same.size()); + cspan_type cvector_same_span(vector_same.data(), vector_same.size()); + + SECTION("Differ by more than default tolerance") { + VisitorType v(default_tol); + REQUIRE_FALSE(v(scalar_diff_span)); + REQUIRE_FALSE(v(cscalar_diff_span)); + REQUIRE_FALSE(v(vector_diff_span)); + REQUIRE_FALSE(v(cvector_diff_span)); + } + + SECTION("Differ by less than default tolerance") { + VisitorType v(default_tol); + REQUIRE(v(scalar_same_span)); + REQUIRE(v(cscalar_same_span)); + REQUIRE(v(vector_same_span)); + REQUIRE(v(cvector_same_span)); + } + + SECTION("Differ by more than provided tolerance") { + VisitorType v(1e-8); + REQUIRE_FALSE(v(scalar_diff_span)); + REQUIRE_FALSE(v(cscalar_diff_span)); + REQUIRE_FALSE(v(vector_diff_span)); + REQUIRE_FALSE(v(cvector_diff_span)); + } + + SECTION("Differ by less than provided tolerance") { + VisitorType v(1e-1); + REQUIRE(v(scalar_diff_span)); + REQUIRE(v(cscalar_diff_span)); + REQUIRE(v(vector_diff_span)); + REQUIRE(v(cvector_diff_span)); + REQUIRE(v(scalar_same_span)); + REQUIRE(v(cscalar_same_span)); + REQUIRE(v(vector_same_span)); + REQUIRE(v(cvector_same_span)); + } +} diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/eigen.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/eigen.cpp deleted file mode 100644 index f783eff9..00000000 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/eigen.cpp +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Copyright 2024 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "../testing/testing.hpp" -#include -#include -#include - -using namespace tensorwrapper; -using namespace testing; - -TEMPLATE_LIST_TEST_CASE("Eigen", "", types::floating_point_types) { - // N.B. we actually get Contiguous objects back - using buffer_type = buffer::Eigen; - - auto pscalar = testing::eigen_scalar(); - auto& eigen_scalar = static_cast(*pscalar); - eigen_scalar.set_elem({}, 10.0); - - auto pvector = testing::eigen_vector(2); - auto& eigen_vector = static_cast(*pvector); - eigen_vector.set_elem({0}, 10.0); - eigen_vector.set_elem({1}, 20.0); - - auto pmatrix = testing::eigen_matrix(2, 3); - auto& eigen_matrix = static_cast(*pmatrix); - eigen_matrix.set_elem({0, 0}, 10.0); - eigen_matrix.set_elem({0, 1}, 20.0); - eigen_matrix.set_elem({0, 2}, 30.0); - eigen_matrix.set_elem({1, 0}, 40.0); - eigen_matrix.set_elem({1, 1}, 50.0); - eigen_matrix.set_elem({1, 2}, 60.0); - - auto ptensor = testing::eigen_tensor3(1, 2, 3); - auto& eigen_tensor = static_cast(*ptensor); - eigen_tensor.set_elem({0, 0, 0}, 10.0); - eigen_tensor.set_elem({0, 0, 1}, 20.0); - eigen_tensor.set_elem({0, 0, 2}, 30.0); - eigen_tensor.set_elem({0, 1, 0}, 40.0); - eigen_tensor.set_elem({0, 1, 1}, 50.0); - eigen_tensor.set_elem({0, 1, 2}, 60.0); - - auto scalar_layout = scalar_physical(); - auto vector_layout = vector_physical(2); - auto matrix_layout = matrix_physical(2, 3); - auto tensor_layout = tensor3_physical(1, 2, 3); - - buffer_type defaulted; - - SECTION("ctors, assignment") { - SECTION("default ctor") { - REQUIRE(defaulted.get_immutable_data() == nullptr); - } - - SECTION("value ctor") { - REQUIRE(eigen_scalar.layout().are_equal(scalar_layout)); - REQUIRE(eigen_vector.layout().are_equal(vector_layout)); - REQUIRE(eigen_matrix.layout().are_equal(matrix_layout)); - REQUIRE(eigen_tensor.layout().are_equal(tensor_layout)); - } - - test_copy_move_ctor_and_assignment(eigen_scalar, eigen_vector, - eigen_matrix, eigen_tensor); - } - - SECTION("swap") { - buffer_type copy(eigen_scalar); - eigen_scalar.swap(defaulted); - REQUIRE(defaulted == copy); - REQUIRE(eigen_scalar == buffer_type{}); - } - - SECTION("operator==") { - // Checking Layout/Allocator falls to base class tests - auto pscalar2 = testing::eigen_scalar(); - auto& eigen_scalar2 = static_cast(*pscalar2); - eigen_scalar2.set_elem({}, 10.0); - - // Defaulted != scalar - REQUIRE_FALSE(defaulted == eigen_scalar); - - // Everything the same - REQUIRE(eigen_scalar == eigen_scalar2); - - SECTION("Different buffer value") { - eigen_scalar2.set_elem({}, 2.0); - REQUIRE_FALSE(eigen_scalar == eigen_scalar2); - } - } - - SECTION("operator!=") { - auto pscalar2 = testing::eigen_scalar(); - auto& eigen_scalar2 = static_cast(*pscalar2); - eigen_scalar2.set_elem({}, 10.0); - - REQUIRE_FALSE(eigen_scalar != eigen_scalar2); - eigen_scalar2.set_elem({}, 2.0); - REQUIRE(eigen_scalar != eigen_scalar2); - } - - SECTION("virtual method overrides") { - SECTION("clone") { - REQUIRE(eigen_scalar.clone()->are_equal(eigen_scalar)); - REQUIRE(eigen_vector.clone()->are_equal(eigen_vector)); - REQUIRE(eigen_matrix.clone()->are_equal(eigen_matrix)); - } - - SECTION("are_equal") { - REQUIRE(eigen_scalar.are_equal(eigen_scalar)); - REQUIRE_FALSE(eigen_matrix.are_equal(eigen_scalar)); - } - - SECTION("addition_assignment") { - buffer_type output; - auto vi = eigen_vector("i"); - output.addition_assignment("i", vi, vi); - - auto corr = testing::eigen_vector(2); - corr->set_elem({0}, 20.0); - corr->set_elem({1}, 40.0); - - REQUIRE(output.are_equal(*corr)); - } - - SECTION("subtraction_assignment") { - buffer_type output; - auto vi = eigen_vector("i"); - output.subtraction_assignment("i", vi, vi); - - auto corr = testing::eigen_vector(2); - corr->set_elem({0}, 0.0); - corr->set_elem({1}, 0.0); - - REQUIRE(output.are_equal(*corr)); - } - - SECTION("multiplication_assignment") { - buffer_type output; - auto vi = eigen_vector("i"); - output.multiplication_assignment("i", vi, vi); - - auto corr = testing::eigen_vector(2); - corr->set_elem({0}, 100.0); - corr->set_elem({1}, 400.0); - - REQUIRE(output.are_equal(*corr)); - } - - SECTION("permute_assignment") { - buffer_type output; - auto mij = eigen_matrix("i,j"); - output.permute_assignment("j,i", mij); - - auto corr = testing::eigen_matrix(3, 2); - corr->set_elem({0, 0}, 10.0); - corr->set_elem({0, 1}, 40.0); - corr->set_elem({1, 0}, 20.0); - corr->set_elem({1, 1}, 50.0); - corr->set_elem({2, 0}, 30.0); - corr->set_elem({2, 1}, 60.0); - - REQUIRE(output.are_equal(*corr)); - } - - SECTION("scalar_multiplication") { - buffer_type output; - auto vi = eigen_vector("i"); - output.scalar_multiplication("i", 2.0, vi); - - auto corr = testing::eigen_vector(2); - corr->set_elem({0}, 20.0); - corr->set_elem({1}, 40.0); - - REQUIRE(output.are_equal(*corr)); - } - - SECTION("get_mutable_data_()") { - REQUIRE(defaulted.get_mutable_data() == nullptr); - REQUIRE(*eigen_scalar.get_mutable_data() == TestType{10.0}); - REQUIRE(*eigen_matrix.get_mutable_data() == TestType{10.0}); - } - - SECTION("get_immutable_data_() const") { - REQUIRE(std::as_const(defaulted).get_immutable_data() == nullptr); - REQUIRE(*std::as_const(eigen_scalar).get_immutable_data() == - TestType{10.0}); - REQUIRE(*std::as_const(eigen_matrix).get_immutable_data() == - TestType{10.0}); - } - - SECTION("get_elem_() const") { - TestType corr{10.0}; - REQUIRE(std::as_const(eigen_scalar).get_elem({}) == corr); - REQUIRE(std::as_const(eigen_vector).get_elem({0}) == corr); - REQUIRE(std::as_const(eigen_matrix).get_elem({0, 0}) == corr); - } - - SECTION("set_elem_()") { - eigen_vector.set_elem({0}, TestType{42.0}); - REQUIRE(eigen_vector.get_elem({0}) == TestType{42.0}); - } - - SECTION("get_data_() const") { - TestType corr{10.0}; - REQUIRE(std::as_const(eigen_scalar).get_data(0) == corr); - REQUIRE(std::as_const(eigen_vector).get_data(0) == corr); - REQUIRE(std::as_const(eigen_matrix).get_data(0) == corr); - } - - SECTION("set_data_()") { - eigen_vector.set_data(0, TestType{42.0}); - REQUIRE(eigen_vector.get_data(0) == TestType{42.0}); - } - - SECTION("fill_()") { - eigen_vector.fill(TestType{42.0}); - REQUIRE(eigen_vector.get_data(0) == TestType(42.0)); - REQUIRE(eigen_vector.get_data(1) == TestType(42.0)); - } - - SECTION("copy_()") { - auto data = std::vector(2, TestType(42.0)); - eigen_vector.copy(data); - REQUIRE(eigen_vector.get_data(0) == TestType(42.0)); - REQUIRE(eigen_vector.get_data(1) == TestType(42.0)); - } - } -} - -TEMPLATE_LIST_TEST_CASE("to_eigen_buffer", "", types::floating_point_types) { - using buffer_type = buffer::Eigen; - - auto pscalar = testing::eigen_scalar(); - auto& eigen_scalar = static_cast(*pscalar); - eigen_scalar.set_elem({}, 10.0); - - buffer::BufferBase& scalar_base = eigen_scalar; - REQUIRE(&buffer::to_eigen_buffer(scalar_base) == &eigen_scalar); - - const buffer::BufferBase& cscalar_base = eigen_scalar; - REQUIRE(&buffer::to_eigen_buffer(cscalar_base) == &eigen_scalar); -} diff --git a/tests/cxx/unit_tests/tensorwrapper/buffer/mdbuffer.cpp b/tests/cxx/unit_tests/tensorwrapper/buffer/mdbuffer.cpp deleted file mode 100644 index 33c13421..00000000 --- a/tests/cxx/unit_tests/tensorwrapper/buffer/mdbuffer.cpp +++ /dev/null @@ -1,455 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "../testing/testing.hpp" -#include -#include - -using namespace tensorwrapper; - -/* Testing notes: - * - * The various operations (addition_assignment, etc.) are not exhaustively - * tested here. These operations are implemented via visitors that dispatch to - * various backends. The visitors themselves are tested in their own unit tests. - * Here we assume the visitors work and spot check a couple of operations for - * to help catch any integration issues. - */ - -TEMPLATE_LIST_TEST_CASE("MDBuffer", "", types::floating_point_types) { - using buffer::MDBuffer; - using buffer_type = MDBuffer::buffer_type; - using shape_type = typename MDBuffer::shape_type; - using label_type = typename MDBuffer::label_type; - - TestType one(1.0), two(2.0), three(3.0), four(4.0); - std::vector data = {one, two, three, four}; - - shape_type scalar_shape({}); - shape_type vector_shape({4}); - shape_type matrix_shape({2, 2}); - - MDBuffer defaulted; - MDBuffer scalar(std::vector{one}, scalar_shape); - MDBuffer vector(data, vector_shape); - MDBuffer matrix(data, matrix_shape); - - SECTION("Ctors and assignment") { - SECTION("Default ctor") { - REQUIRE(defaulted.size() == 0); - REQUIRE(defaulted.shape() == shape_type()); - } - - SECTION("vector ctor") { - REQUIRE(scalar.size() == 1); - REQUIRE(scalar.shape() == scalar_shape); - REQUIRE(scalar.get_elem({}) == one); - - REQUIRE(vector.size() == 4); - REQUIRE(vector.shape() == vector_shape); - REQUIRE(vector.get_elem({0}) == one); - REQUIRE(vector.get_elem({1}) == two); - REQUIRE(vector.get_elem({2}) == three); - REQUIRE(vector.get_elem({3}) == four); - - REQUIRE(matrix.size() == 4); - REQUIRE(matrix.shape() == matrix_shape); - REQUIRE(matrix.get_elem({0, 0}) == one); - REQUIRE(matrix.get_elem({0, 1}) == two); - REQUIRE(matrix.get_elem({1, 0}) == three); - REQUIRE(matrix.get_elem({1, 1}) == four); - - REQUIRE_THROWS_AS(MDBuffer(data, scalar_shape), - std::invalid_argument); - } - - SECTION("FloatBuffer ctor") { - buffer_type buf(data); - - MDBuffer vector_buf(buf, vector_shape); - REQUIRE(vector_buf == vector); - - MDBuffer matrix_buf(buf, matrix_shape); - REQUIRE(matrix_buf == matrix); - - REQUIRE_THROWS_AS(MDBuffer(buf, scalar_shape), - std::invalid_argument); - } - - SECTION("Copy ctor") { - MDBuffer defaulted_copy(defaulted); - REQUIRE(defaulted_copy == defaulted); - - MDBuffer scalar_copy(scalar); - REQUIRE(scalar_copy == scalar); - - MDBuffer vector_copy(vector); - REQUIRE(vector_copy == vector); - - MDBuffer matrix_copy(matrix); - REQUIRE(matrix_copy == matrix); - } - - SECTION("Move ctor") { - MDBuffer defaulted_temp(defaulted); - MDBuffer defaulted_move(std::move(defaulted_temp)); - REQUIRE(defaulted_move == defaulted); - - MDBuffer scalar_temp(scalar); - MDBuffer scalar_move(std::move(scalar_temp)); - REQUIRE(scalar_move == scalar); - - MDBuffer vector_temp(vector); - MDBuffer vector_move(std::move(vector_temp)); - REQUIRE(vector_move == vector); - - MDBuffer matrix_temp(matrix); - MDBuffer matrix_move(std::move(matrix_temp)); - REQUIRE(matrix_move == matrix); - } - - SECTION("Copy assignment") { - MDBuffer defaulted_copy; - auto pdefaulted_copy = &(defaulted_copy = defaulted); - REQUIRE(defaulted_copy == defaulted); - REQUIRE(pdefaulted_copy == &defaulted_copy); - - MDBuffer scalar_copy; - auto pscalar_copy = &(scalar_copy = scalar); - REQUIRE(scalar_copy == scalar); - REQUIRE(pscalar_copy == &scalar_copy); - - MDBuffer vector_copy; - auto pvector_copy = &(vector_copy = vector); - REQUIRE(vector_copy == vector); - REQUIRE(pvector_copy == &vector_copy); - - MDBuffer matrix_copy; - auto pmatrix_copy = &(matrix_copy = matrix); - REQUIRE(matrix_copy == matrix); - REQUIRE(pmatrix_copy == &matrix_copy); - } - - SECTION("Move assignment") { - MDBuffer defaulted_temp(defaulted); - MDBuffer defaulted_move; - auto pdefaulted_move = - &(defaulted_move = std::move(defaulted_temp)); - REQUIRE(defaulted_move == defaulted); - REQUIRE(pdefaulted_move == &defaulted_move); - - MDBuffer scalar_temp(scalar); - MDBuffer scalar_move; - auto pscalar_move = &(scalar_move = std::move(scalar_temp)); - REQUIRE(scalar_move == scalar); - REQUIRE(pscalar_move == &scalar_move); - - MDBuffer vector_temp(vector); - MDBuffer vector_move; - auto pvector_move = &(vector_move = std::move(vector_temp)); - REQUIRE(vector_move == vector); - REQUIRE(pvector_move == &vector_move); - - MDBuffer matrix_temp(matrix); - MDBuffer matrix_move; - auto pmatrix_move = &(matrix_move = std::move(matrix_temp)); - REQUIRE(matrix_move == matrix); - REQUIRE(pmatrix_move == &matrix_move); - } - } - - SECTION("shape") { - REQUIRE(defaulted.shape() == shape_type()); - REQUIRE(scalar.shape() == scalar_shape); - REQUIRE(vector.shape() == vector_shape); - REQUIRE(matrix.shape() == matrix_shape); - } - - SECTION("size") { - REQUIRE(defaulted.size() == 0); - REQUIRE(scalar.size() == 1); - REQUIRE(vector.size() == 4); - REQUIRE(matrix.size() == 4); - } - - SECTION("get_elem") { - REQUIRE_THROWS_AS(defaulted.get_elem({}), std::out_of_range); - - REQUIRE(scalar.get_elem({}) == one); - REQUIRE_THROWS_AS(scalar.get_elem({0}), std::out_of_range); - - REQUIRE(vector.get_elem({0}) == one); - REQUIRE(vector.get_elem({1}) == two); - REQUIRE(vector.get_elem({2}) == three); - REQUIRE(vector.get_elem({3}) == four); - REQUIRE_THROWS_AS(vector.get_elem({4}), std::out_of_range); - - REQUIRE(matrix.get_elem({0, 0}) == one); - REQUIRE(matrix.get_elem({0, 1}) == two); - REQUIRE(matrix.get_elem({1, 0}) == three); - REQUIRE(matrix.get_elem({1, 1}) == four); - REQUIRE_THROWS_AS(matrix.get_elem({2, 0}), std::out_of_range); - } - - SECTION("set_elem") { - REQUIRE_THROWS_AS(defaulted.set_elem({}, one), std::out_of_range); - - REQUIRE(scalar.get_elem({}) != two); - scalar.set_elem({}, two); - REQUIRE(scalar.get_elem({}) == two); - - REQUIRE(vector.get_elem({2}) != four); - vector.set_elem({2}, four); - REQUIRE(vector.get_elem({2}) == four); - - REQUIRE(matrix.get_elem({1, 0}) != one); - matrix.set_elem({1, 0}, one); - REQUIRE(matrix.get_elem({1, 0}) == one); - } - - SECTION("operator==") { - // Same object - REQUIRE(defaulted == defaulted); - - MDBuffer scalar_copy(std::vector{one}, scalar_shape); - REQUIRE(scalar == scalar_copy); - - MDBuffer vector_copy(data, vector_shape); - REQUIRE(vector == vector_copy); - - MDBuffer matrix_copy(data, matrix_shape); - REQUIRE(matrix == matrix_copy); - - // Different ranks - REQUIRE_FALSE(scalar == vector); - REQUIRE_FALSE(vector == matrix); - REQUIRE_FALSE(scalar == matrix); - - // Different shapes - shape_type matrix_shape2({4, 1}); - REQUIRE_FALSE(scalar == MDBuffer(data, matrix_shape2)); - - // Different values - std::vector diff_data = {two, three, four, one}; - MDBuffer scalar_diff(std::vector{two}, scalar_shape); - REQUIRE_FALSE(scalar == scalar_diff); - REQUIRE_FALSE(vector == MDBuffer(diff_data, vector_shape)); - REQUIRE_FALSE(matrix == MDBuffer(diff_data, matrix_shape)); - } - - SECTION("addition_assignment_") { - SECTION("scalar") { - label_type labels(""); - MDBuffer result; - result.addition_assignment(labels, scalar(labels), scalar(labels)); - REQUIRE(result.shape() == scalar_shape); - REQUIRE(result.get_elem({}) == TestType(2.0)); - } - - SECTION("vector") { - label_type labels("i"); - MDBuffer result; - result.addition_assignment(labels, vector(labels), vector(labels)); - REQUIRE(result.shape() == vector_shape); - REQUIRE(result.get_elem({0}) == TestType(2.0)); - REQUIRE(result.get_elem({1}) == TestType(4.0)); - REQUIRE(result.get_elem({2}) == TestType(6.0)); - REQUIRE(result.get_elem({3}) == TestType(8.0)); - } - - SECTION("matrix") { - label_type labels("i,j"); - MDBuffer result; - result.addition_assignment(labels, matrix(labels), matrix(labels)); - REQUIRE(result.shape() == matrix_shape); - REQUIRE(result.get_elem({0, 0}) == TestType(2.0)); - REQUIRE(result.get_elem({0, 1}) == TestType(4.0)); - REQUIRE(result.get_elem({1, 0}) == TestType(6.0)); - REQUIRE(result.get_elem({1, 1}) == TestType(8.0)); - } - } - - SECTION("subtraction_assignment_") { - SECTION("scalar") { - label_type labels(""); - MDBuffer result; - result.subtraction_assignment(labels, scalar(labels), - scalar(labels)); - REQUIRE(result.shape() == scalar_shape); - REQUIRE(result.get_elem({}) == TestType(0.0)); - } - - SECTION("vector") { - label_type labels("i"); - MDBuffer result; - result.subtraction_assignment(labels, vector(labels), - vector(labels)); - REQUIRE(result.shape() == vector_shape); - REQUIRE(result.get_elem({0}) == TestType(0.0)); - REQUIRE(result.get_elem({1}) == TestType(0.0)); - REQUIRE(result.get_elem({2}) == TestType(0.0)); - REQUIRE(result.get_elem({3}) == TestType(0.0)); - } - - SECTION("matrix") { - label_type labels("i,j"); - MDBuffer result; - result.subtraction_assignment(labels, matrix(labels), - matrix(labels)); - REQUIRE(result.shape() == matrix_shape); - REQUIRE(result.get_elem({0, 0}) == TestType(0.0)); - REQUIRE(result.get_elem({0, 1}) == TestType(0.0)); - REQUIRE(result.get_elem({1, 0}) == TestType(0.0)); - REQUIRE(result.get_elem({1, 1}) == TestType(0.0)); - } - } - - SECTION("multiplication_assignment_") { - // N.b., dispatching among hadamard, contraction, etc. is the visitor's - // responsibility and happens there. Here we just test hadamard. - - SECTION("scalar") { - label_type labels(""); - MDBuffer result; - result.multiplication_assignment(labels, scalar(labels), - scalar(labels)); - REQUIRE(result.shape() == scalar_shape); - REQUIRE(result.get_elem({}) == TestType(1.0)); - } - - SECTION("vector") { - label_type labels("i"); - MDBuffer result; - result.multiplication_assignment(labels, vector(labels), - vector(labels)); - REQUIRE(result.shape() == vector_shape); - REQUIRE(result.get_elem({0}) == TestType(1.0)); - REQUIRE(result.get_elem({1}) == TestType(4.0)); - REQUIRE(result.get_elem({2}) == TestType(9.0)); - REQUIRE(result.get_elem({3}) == TestType(16.0)); - } - - SECTION("matrix") { - label_type labels("i,j"); - MDBuffer result; - result.multiplication_assignment(labels, matrix(labels), - matrix(labels)); - REQUIRE(result.shape() == matrix_shape); - REQUIRE(result.get_elem({0, 0}) == TestType(1.0)); - REQUIRE(result.get_elem({0, 1}) == TestType(4.0)); - REQUIRE(result.get_elem({1, 0}) == TestType(9.0)); - REQUIRE(result.get_elem({1, 1}) == TestType(16.0)); - } - } - - SECTION("scalar_multiplication_") { - // TODO: Test with other scalar types when public API supports it - using scalar_type = double; - scalar_type scalar_value_{2.0}; - TestType scalar_value(scalar_value_); - SECTION("scalar") { - label_type labels(""); - MDBuffer result; - result.scalar_multiplication(labels, scalar_value_, scalar(labels)); - REQUIRE(result.shape() == scalar_shape); - REQUIRE(result.get_elem({}) == TestType(1.0) * scalar_value); - } - - SECTION("vector") { - label_type labels("i"); - MDBuffer result; - result.scalar_multiplication(labels, scalar_value_, vector(labels)); - REQUIRE(result.shape() == vector_shape); - REQUIRE(result.get_elem({0}) == TestType(1.0) * scalar_value); - REQUIRE(result.get_elem({1}) == TestType(2.0) * scalar_value); - REQUIRE(result.get_elem({2}) == TestType(3.0) * scalar_value); - REQUIRE(result.get_elem({3}) == TestType(4.0) * scalar_value); - } - - SECTION("matrix") { - label_type rhs_labels("i,j"); - label_type lhs_labels("j,i"); - MDBuffer result; - result.scalar_multiplication(lhs_labels, scalar_value_, - matrix(rhs_labels)); - REQUIRE(result.shape() == matrix_shape); - REQUIRE(result.get_elem({0, 0}) == TestType(1.0) * scalar_value); - REQUIRE(result.get_elem({0, 1}) == TestType(3.0) * scalar_value); - REQUIRE(result.get_elem({1, 0}) == TestType(2.0) * scalar_value); - REQUIRE(result.get_elem({1, 1}) == TestType(4.0) * scalar_value); - } - } - - SECTION("permute_assignment_") { - SECTION("scalar") { - label_type labels(""); - MDBuffer result; - result.permute_assignment(labels, scalar(labels)); - REQUIRE(result.shape() == scalar_shape); - REQUIRE(result.get_elem({}) == TestType(1.0)); - } - - SECTION("vector") { - label_type labels("i"); - MDBuffer result; - result.permute_assignment(labels, vector(labels)); - REQUIRE(result.shape() == vector_shape); - REQUIRE(result.get_elem({0}) == TestType(1.0)); - REQUIRE(result.get_elem({1}) == TestType(2.0)); - REQUIRE(result.get_elem({2}) == TestType(3.0)); - REQUIRE(result.get_elem({3}) == TestType(4.0)); - } - - SECTION("matrix") { - label_type rhs_labels("i,j"); - label_type lhs_labels("j,i"); - MDBuffer result; - result.permute_assignment(lhs_labels, matrix(rhs_labels)); - REQUIRE(result.shape() == matrix_shape); - REQUIRE(result.get_elem({0, 0}) == TestType(1.0)); - REQUIRE(result.get_elem({0, 1}) == TestType(3.0)); - REQUIRE(result.get_elem({1, 0}) == TestType(2.0)); - REQUIRE(result.get_elem({1, 1}) == TestType(4.0)); - } - } - - SECTION("to_string") { - REQUIRE(defaulted.to_string().empty()); - REQUIRE_FALSE(scalar.to_string().empty()); - REQUIRE_FALSE(vector.to_string().empty()); - REQUIRE_FALSE(matrix.to_string().empty()); - } - - SECTION("add_to_stream") { - std::stringstream ss; - SECTION("defaulted") { - defaulted.add_to_stream(ss); - REQUIRE(ss.str().empty()); - } - SECTION("scalar") { - scalar.add_to_stream(ss); - REQUIRE_FALSE(ss.str().empty()); - } - SECTION("vector") { - vector.add_to_stream(ss); - REQUIRE_FALSE(ss.str().empty()); - } - SECTION("matrix") { - matrix.add_to_stream(ss); - REQUIRE_FALSE(ss.str().empty()); - } - } -} diff --git a/tests/cxx/unit_tests/tensorwrapper/diis/diis.cpp b/tests/cxx/unit_tests/tensorwrapper/diis/diis.cpp index adb171f9..672f2342 100644 --- a/tests/cxx/unit_tests/tensorwrapper/diis/diis.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/diis/diis.cpp @@ -18,27 +18,28 @@ #include #include -using diis_type = tensorwrapper::diis::DIIS; -using tensor_type = tensorwrapper::Tensor; -using elements_type = std::vector; +using diis_type = tensorwrapper::diis::DIIS; +using tensor_type = tensorwrapper::Tensor; template -tensor_type make_tensor(elements_type elems) { - auto pbuffer = tensorwrapper::testing::eigen_matrix(2, 2); - pbuffer->set_elem({0, 0}, elems[0]); - pbuffer->set_elem({0, 1}, elems[1]); - pbuffer->set_elem({1, 0}, elems[2]); - pbuffer->set_elem({1, 1}, elems[3]); - auto pshape = pbuffer->layout().shape().clone(); +tensor_type make_tensor(std::vector elems) { + using namespace tensorwrapper; + shape::Smooth shape{2, 2}; + buffer::Contiguous buffer(std::move(elems), shape); + auto pbuffer = std::make_unique(std::move(buffer)); + auto pshape = pbuffer->layout().shape().clone(); return tensor_type(std::move(pshape), std::move(pbuffer)); } TEMPLATE_LIST_TEST_CASE("DIIS", "", tensorwrapper::types::floating_point_types) { // Inputs - tensor_type i1 = make_tensor({1.0, 2.0, 3.0, 4.0}); - tensor_type i2 = make_tensor({6.0, 5.0, 8.0, 7.0}); - tensor_type i3 = make_tensor({12.0, 11.0, 10.0, 9.0}); + std::vector i1_data{1.0, 2.0, 3.0, 4.0}; + std::vector i2_data{6.0, 5.0, 8.0, 7.0}; + std::vector i3_data{12.0, 11.0, 10.0, 9.0}; + tensor_type i1 = make_tensor(i1_data); + tensor_type i2 = make_tensor(i2_data); + tensor_type i3 = make_tensor(i3_data); SECTION("Typedefs") { SECTION("size_type") { @@ -72,10 +73,13 @@ TEMPLATE_LIST_TEST_CASE("DIIS", "", SECTION("extrapolate") { // Outputs - tensor_type corr1 = make_tensor({1.0, 2.0, 3.0, 4.0}); - tensor_type corr2 = make_tensor({12.0, 8.6, 14.0, 10.6}); - tensor_type corr3 = make_tensor( - {15.35294118, 14.35294118, 11.11764706, 10.11764706}); + std::vector v0{1.0, 2.0, 3.0, 4.0}; + std::vector v1{12.0, 8.6, 14.0, 10.6}; + std::vector v2{15.35294118, 14.35294118, 11.11764706, + 10.11764706}; + tensor_type corr1 = make_tensor(v0); + tensor_type corr2 = make_tensor(v1); + tensor_type corr3 = make_tensor(v2); // Call extrapolate enough to require removing an old value auto diis = diis_type(2); diff --git a/tests/cxx/unit_tests/tensorwrapper/dsl/dsl.cpp b/tests/cxx/unit_tests/tensorwrapper/dsl/dsl.cpp index 6e2b560a..7f9b27b6 100644 --- a/tests/cxx/unit_tests/tensorwrapper/dsl/dsl.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/dsl/dsl.cpp @@ -94,9 +94,9 @@ TEST_CASE("DSLr : buffer::Eigen") { auto& scalar2 = *pscalar2; auto& corr = *pcorr; - scalar0.set_data(0, 1.0); - scalar1.set_data(0, 2.0); - scalar2.set_data(0, 3.0); + scalar0.set_elem({}, float{1.0}); + scalar1.set_elem({}, float{2.0}); + scalar2.set_elem({}, float{3.0}); SECTION("assignment") { SECTION("scalar") { diff --git a/tests/cxx/unit_tests/tensorwrapper/dsl/pairwise_parser.cpp b/tests/cxx/unit_tests/tensorwrapper/dsl/pairwise_parser.cpp index 40a86519..887f64c1 100644 --- a/tests/cxx/unit_tests/tensorwrapper/dsl/pairwise_parser.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/dsl/pairwise_parser.cpp @@ -134,9 +134,10 @@ TEST_CASE("PairwiseParser : buffer::Eigen") { auto& scalar2 = *pscalar2; auto& corr = *pcorr; - scalar0.set_data(0, 1.0); - scalar1.set_data(0, 2.0); - scalar2.set_data(0, 3.0); + float one{1.0f}, two{2.0f}, three{3.0f}; + scalar0.set_elem({}, one); + scalar1.set_elem({}, two); + scalar2.set_elem({}, three); dsl::PairwiseParser p; diff --git a/tests/cxx/unit_tests/tensorwrapper/operations/approximately_equal.cpp b/tests/cxx/unit_tests/tensorwrapper/operations/approximately_equal.cpp index e9858dab..d874031b 100644 --- a/tests/cxx/unit_tests/tensorwrapper/operations/approximately_equal.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/operations/approximately_equal.cpp @@ -32,16 +32,16 @@ using namespace operations; TEMPLATE_LIST_TEST_CASE("approximately_equal", "", types::floating_point_types) { auto pscalar = testing::eigen_scalar(); - pscalar->set_data(0, 42.0); + pscalar->set_elem({}, TestType{42.0}); auto pvector = testing::eigen_vector(2); - pvector->set_data(0, 1.23); - pvector->set_data(1, 2.34); + pvector->set_elem({0}, TestType{1.23}); + pvector->set_elem({1}, TestType{2.34}); auto pscalar2 = testing::eigen_scalar(); - pscalar2->set_data(0, 42.0); + pscalar2->set_elem({}, TestType{42.0}); auto pvector2 = testing::eigen_vector(2); - pvector2->set_data(0, 1.23); - pvector2->set_data(1, 2.34); + pvector2->set_elem({0}, TestType{1.23}); + pvector2->set_elem({1}, TestType{2.34}); shape::Smooth s0{}; shape::Smooth s1{2}; @@ -65,9 +65,9 @@ TEMPLATE_LIST_TEST_CASE("approximately_equal", "", } SECTION("Differ by more than default tolerance") { - double value = 1e-1; - pscalar2->set_data(0, 42.0 + value); - pvector2->set_data(0, 1.23 + value); + TestType value = 1e-1; + pscalar2->set_elem({}, TestType{42.0} + value); + pvector2->set_elem({0}, TestType{1.23} + value); Tensor scalar2(s0, std::move(pscalar2)); Tensor vector2(s1, std::move(pvector2)); REQUIRE_FALSE(approximately_equal(scalar, scalar2)); @@ -77,9 +77,9 @@ TEMPLATE_LIST_TEST_CASE("approximately_equal", "", } SECTION("Differ by less than default tolerance") { - double value = 1e-17; - pscalar2->set_data(0, 42.0 + value); - pvector2->set_data(0, 1.23 + value); + TestType value = 1e-17; + pscalar2->set_elem({}, TestType{42.0} + value); + pvector2->set_elem({0}, TestType{1.23} + value); Tensor scalar2(s0, std::move(pscalar2)); Tensor vector2(s1, std::move(pvector2)); REQUIRE(approximately_equal(scalar, scalar2)); @@ -89,9 +89,9 @@ TEMPLATE_LIST_TEST_CASE("approximately_equal", "", } SECTION("Differ by more than provided tolerance") { - float value = 1e-1; - pscalar2->set_data(0, 43.0); - pvector2->set_data(0, 2.23); + double value = 1e-1; + pscalar2->set_elem({}, TestType{43.0}); + pvector2->set_elem({0}, TestType{2.23}); Tensor scalar2(s0, std::move(pscalar2)); Tensor vector2(s1, std::move(pvector2)); REQUIRE_FALSE(approximately_equal(scalar, scalar2, value)); @@ -101,9 +101,9 @@ TEMPLATE_LIST_TEST_CASE("approximately_equal", "", } SECTION("Differ by less than provided tolerance") { - double value = 1e-10; - pscalar2->set_data(0, 42.0 + value); - pvector2->set_data(0, 1.23 + value); + TestType value = 1e-10; + pscalar2->set_elem({}, TestType{42.0} + value); + pvector2->set_elem({0}, TestType{1.23} + value); Tensor scalar2(s0, std::move(pscalar2)); Tensor vector2(s1, std::move(pvector2)); REQUIRE(approximately_equal(scalar, scalar2, 1e-1)); diff --git a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_factory.cpp b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_factory.cpp index fd4e3f63..7eb59543 100644 --- a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_factory.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_factory.cpp @@ -51,8 +51,8 @@ TEST_CASE("TensorFactory") { layout::Physical physical(shape, g, sparsity); auto pphysical = physical.clone_as(); - allocator::Eigen alloc(rv); - auto pbuffer = alloc.allocate(std::move(pphysical)); + std::vector data{0.0}; + auto pbuffer = std::make_unique(data, shape); auto buffer_address = pbuffer.get(); SECTION("default_logical_symmetry") { @@ -88,11 +88,6 @@ TEST_CASE("TensorFactory") { REQUIRE(result->are_equal(physical)); } - SECTION("default_allocator") { - auto result = TensorFactory::default_allocator(physical, rv); - REQUIRE(result->are_equal(alloc)); - } - SECTION("construct(input)") { SECTION("Can create default pimpl") { auto pdefaulted = TensorFactory::construct(TensorInput{}); diff --git a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_input.cpp b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_input.cpp index d7657add..93390585 100644 --- a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_input.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_input.cpp @@ -15,6 +15,7 @@ */ #include "../../testing/testing.hpp" +#include using namespace tensorwrapper; @@ -36,8 +37,9 @@ TEST_CASE("TensorInput") { sparsity::Pattern sparsity(2); layout::Logical logical(shape, g, sparsity); layout::Physical physical(shape, g, sparsity); - allocator::Eigen alloc(rv); - auto pbuffer = alloc.construct(42.0); + + std::vector data{42.0}; + auto pbuffer = std::make_unique(data, shape::Smooth{}); auto& buffer = *pbuffer; detail_::TensorInput defaulted; @@ -51,7 +53,6 @@ TEST_CASE("TensorInput") { REQUIRE(defaulted.m_psparsity == nullptr); REQUIRE(defaulted.m_plogical == nullptr); REQUIRE(defaulted.m_pphysical == nullptr); - REQUIRE(defaulted.m_palloc == nullptr); REQUIRE(defaulted.m_pbuffer == nullptr); REQUIRE(defaulted.m_rv == rv); } @@ -62,7 +63,6 @@ TEST_CASE("TensorInput") { REQUIRE(scalar.m_psparsity == nullptr); REQUIRE(scalar.m_plogical == nullptr); REQUIRE(scalar.m_pphysical == nullptr); - REQUIRE(scalar.m_palloc == nullptr); REQUIRE(scalar.m_pbuffer == nullptr); REQUIRE(scalar.m_rv == rv); @@ -80,7 +80,6 @@ TEST_CASE("TensorInput") { REQUIRE(i.m_psparsity == nullptr); REQUIRE(i.m_plogical == nullptr); REQUIRE(i.m_pphysical == nullptr); - REQUIRE(i.m_palloc == nullptr); REQUIRE(i.m_pbuffer == nullptr); REQUIRE(i.m_rv == rv); @@ -93,7 +92,6 @@ TEST_CASE("TensorInput") { REQUIRE(symm_matrix.m_psparsity == nullptr); REQUIRE(symm_matrix.m_plogical == nullptr); REQUIRE(symm_matrix.m_pphysical == nullptr); - REQUIRE(symm_matrix.m_palloc == nullptr); REQUIRE(symm_matrix.m_pbuffer == nullptr); REQUIRE(symm_matrix.m_rv == rv); @@ -110,7 +108,6 @@ TEST_CASE("TensorInput") { REQUIRE(i.m_psparsity == nullptr); REQUIRE(i.m_plogical == nullptr); REQUIRE(i.m_pphysical == nullptr); - REQUIRE(i.m_palloc == nullptr); REQUIRE(i.m_pbuffer == nullptr); REQUIRE(i.m_rv == rv); @@ -124,7 +121,6 @@ TEST_CASE("TensorInput") { REQUIRE(*i.m_psparsity == sparsity); REQUIRE(i.m_plogical == nullptr); REQUIRE(i.m_pphysical == nullptr); - REQUIRE(i.m_palloc == nullptr); REQUIRE(i.m_pbuffer == nullptr); REQUIRE(i.m_rv == rv); @@ -141,7 +137,6 @@ TEST_CASE("TensorInput") { REQUIRE(i.m_psparsity.get() == psparsity_address); REQUIRE(i.m_plogical == nullptr); REQUIRE(i.m_pphysical == nullptr); - REQUIRE(i.m_palloc == nullptr); REQUIRE(i.m_pbuffer == nullptr); REQUIRE(i.m_rv == rv); @@ -155,7 +150,6 @@ TEST_CASE("TensorInput") { REQUIRE(i.m_psparsity == nullptr); REQUIRE(i.m_plogical->are_equal(logical)); REQUIRE(i.m_pphysical == nullptr); - REQUIRE(i.m_palloc == nullptr); REQUIRE(i.m_pbuffer == nullptr); REQUIRE(i.m_rv == rv); @@ -172,7 +166,6 @@ TEST_CASE("TensorInput") { REQUIRE(i.m_plogical->are_equal(logical)); REQUIRE(i.m_plogical.get() == plogical_address); REQUIRE(i.m_pphysical == nullptr); - REQUIRE(i.m_palloc == nullptr); REQUIRE(i.m_pbuffer == nullptr); REQUIRE(i.m_rv == rv); @@ -186,7 +179,6 @@ TEST_CASE("TensorInput") { REQUIRE(i.m_psparsity == nullptr); REQUIRE(i.m_plogical->are_equal(logical)); REQUIRE(i.m_pphysical->are_equal(physical)); - REQUIRE(i.m_palloc == nullptr); REQUIRE(i.m_pbuffer == nullptr); REQUIRE(i.m_rv == rv); @@ -203,73 +195,36 @@ TEST_CASE("TensorInput") { REQUIRE(i.m_plogical->are_equal(logical)); REQUIRE(i.m_pphysical->are_equal(physical)); REQUIRE(i.m_pphysical.get() == pphysical_address); - REQUIRE(i.m_palloc == nullptr); REQUIRE(i.m_pbuffer == nullptr); REQUIRE(i.m_rv == rv); REQUIRE(i.has_physical_layout()); } - SECTION("Allocator (by value)") { - detail_::TensorInput i(physical, alloc, logical); - REQUIRE(i.m_pshape == nullptr); - REQUIRE(i.m_psymmetry == nullptr); - REQUIRE(i.m_psparsity == nullptr); - REQUIRE(i.m_plogical->are_equal(logical)); - REQUIRE(i.m_pphysical->are_equal(physical)); - REQUIRE(i.m_palloc->are_equal(alloc)); - REQUIRE(i.m_pbuffer == nullptr); - REQUIRE(i.m_rv == rv); - - REQUIRE(i.has_allocator()); - } - - SECTION("Allocator (by pointer)") { - auto palloc = alloc.clone(); - auto alloc_address = palloc.get(); - detail_::TensorInput i(physical, std::move(palloc), logical); - REQUIRE(i.m_pshape == nullptr); - REQUIRE(i.m_psymmetry == nullptr); - REQUIRE(i.m_psparsity == nullptr); - REQUIRE(i.m_plogical->are_equal(logical)); - REQUIRE(i.m_pphysical->are_equal(physical)); - REQUIRE(i.m_palloc->are_equal(alloc)); - REQUIRE(i.m_palloc.get() == alloc_address); - REQUIRE(i.m_pbuffer == nullptr); - REQUIRE(i.m_rv == rv); - - REQUIRE(i.has_allocator()); - } - SECTION("Buffer (by value)") { - detail_::TensorInput i(physical, alloc, logical, buffer); + detail_::TensorInput i(physical, logical, buffer); REQUIRE(i.m_pshape == nullptr); REQUIRE(i.m_psymmetry == nullptr); REQUIRE(i.m_psparsity == nullptr); REQUIRE(i.m_plogical->are_equal(logical)); REQUIRE(i.m_pphysical->are_equal(physical)); - REQUIRE(i.m_palloc->are_equal(alloc)); - // REQUIRE(i.m_pbuffer->are_equal(buffer)); + REQUIRE(i.m_pbuffer->are_equal(buffer)); REQUIRE(i.m_rv == rv); - REQUIRE(i.has_buffer()); } SECTION("Buffer (by pointer)") { auto pbuffer = buffer.clone(); auto buffer_address = pbuffer.get(); - detail_::TensorInput i(physical, alloc, logical, - std::move(pbuffer)); + detail_::TensorInput i(physical, logical, std::move(pbuffer)); REQUIRE(i.m_pshape == nullptr); REQUIRE(i.m_psymmetry == nullptr); REQUIRE(i.m_psparsity == nullptr); REQUIRE(i.m_plogical->are_equal(logical)); REQUIRE(i.m_pphysical->are_equal(physical)); - REQUIRE(i.m_palloc->are_equal(alloc)); - // REQUIRE(i.m_pbuffer->are_equal(buffer)); + REQUIRE(i.m_pbuffer->are_equal(buffer)); REQUIRE(i.m_pbuffer.get() == buffer_address); REQUIRE(i.m_rv == rv); - REQUIRE(i.has_buffer()); } @@ -280,7 +235,6 @@ TEST_CASE("TensorInput") { REQUIRE(i.m_psparsity == nullptr); REQUIRE(i.m_plogical == nullptr); REQUIRE(i.m_pphysical == nullptr); - REQUIRE(i.m_palloc == nullptr); REQUIRE(i.m_pbuffer == nullptr); REQUIRE(i.m_rv == rv); } @@ -317,13 +271,6 @@ TEST_CASE("TensorInput") { REQUIRE(w_physical.has_physical_layout()); } - SECTION("has_allocator") { - REQUIRE_FALSE(defaulted.has_allocator()); - - detail_::TensorInput w_allocator(alloc); - REQUIRE(w_allocator.has_allocator()); - } - SECTION("has_buffer") { REQUIRE_FALSE(defaulted.has_buffer()); diff --git a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_pimpl.cpp b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_pimpl.cpp index 0c92de48..efa562b6 100644 --- a/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_pimpl.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/tensor/detail_/tensor_pimpl.cpp @@ -15,7 +15,7 @@ */ #include "../../testing/testing.hpp" -#include +#include #include #include #include diff --git a/tests/cxx/unit_tests/tensorwrapper/testing/eigen_buffers.hpp b/tests/cxx/unit_tests/tensorwrapper/testing/eigen_buffers.hpp index 97cd8d75..9da73299 100644 --- a/tests/cxx/unit_tests/tensorwrapper/testing/eigen_buffers.hpp +++ b/tests/cxx/unit_tests/tensorwrapper/testing/eigen_buffers.hpp @@ -26,65 +26,62 @@ namespace tensorwrapper::testing { -template -auto make_allocator() { - parallelzone::runtime::RuntimeView rv; - return allocator::Eigen(rv); -} - template auto eigen_scalar(FloatType value = 42.0) { - auto alloc = make_allocator(); - return alloc.construct(value); + shape::Smooth shape{}; + std::vector data{value}; + return std::make_unique(std::move(data), + std::move(shape)); } template auto eigen_vector(std::size_t n = 5) { - layout::Physical l(shape::Smooth{n}); - auto alloc = make_allocator(); - auto buffer = alloc.allocate(l); - for(std::size_t i = 0; i < n; ++i) buffer->set_elem({i}, i); - return buffer; + shape::Smooth shape{n}; + std::vector data(n); + for(std::size_t i = 0; i < n; ++i) data[i] = static_cast(i); + return std::make_unique(std::move(data), + std::move(shape)); } template auto eigen_matrix(std::size_t n = 2, std::size_t m = 2) { - layout::Physical l(shape::Smooth{n, m}); - auto alloc = make_allocator(); - auto buffer = alloc.allocate(l); + shape::Smooth shape{n, m}; + std::vector data(n * m); double counter = 1.0; for(decltype(n) i = 0; i < n; ++i) - for(decltype(m) j = 0; j < m; ++j) buffer->set_elem({i, j}, counter++); - return buffer; + for(decltype(m) j = 0; j < m; ++j) + data[i * m + j] = static_cast(counter++); + return std::make_unique(std::move(data), + std::move(shape)); } template auto eigen_tensor3(std::size_t n = 2, std::size_t m = 2, std::size_t l = 2) { - layout::Physical layout(shape::Smooth{n, m, l}); - auto alloc = make_allocator(); - auto buffer = alloc.allocate(layout); + shape::Smooth shape{n, m, l}; + std::vector data(shape.size()); + buffer::Contiguous buffer(std::move(data), std::move(shape)); double counter = 1.0; for(decltype(n) i = 0; i < n; ++i) for(decltype(m) j = 0; j < m; ++j) for(decltype(l) k = 0; k < l; ++k) - buffer->set_elem({i, j, k}, counter++); - return buffer; + buffer.set_elem({i, j, k}, static_cast(counter++)); + return std::make_unique(std::move(buffer)); } template auto eigen_tensor4(std::array extents = {2, 2, 2, 2}) { - shape::Smooth shape{extents[0], extents[1], extents[2], extents[3]}; - layout::Physical layout(shape); - auto alloc = make_allocator(); - auto buffer = alloc.allocate(layout); + shape::Smooth shape(extents.begin(), extents.end()); + std::vector data(shape.size()); + buffer::Contiguous buffer(std::move(data), std::move(shape)); double counter = 1.0; - decltype(extents) i; - for(i[0] = 0; i[0] < extents[0]; ++i[0]) - for(i[1] = 0; i[1] < extents[1]; ++i[1]) - for(i[2] = 0; i[2] < extents[2]; ++i[2]) - for(i[3] = 0; i[3] < extents[3]; ++i[3]) - buffer->set_elem({i[0], i[1], i[2], i[3]}, counter++); - return buffer; + for(std::size_t i = 0; i < extents[0]; ++i) + for(decltype(i) j = 0; j < extents[1]; ++j) + for(decltype(i) k = 0; k < extents[2]; ++k) + for(decltype(i) l = 0; l < extents[3]; ++l) + buffer.set_elem({i, j, k, l}, + static_cast(counter++)); + + return std::make_unique(std::move(buffer)); } } // namespace tensorwrapper::testing diff --git a/tests/cxx/unit_tests/tensorwrapper/utilities/block_diagonal_matrix.cpp b/tests/cxx/unit_tests/tensorwrapper/utilities/block_diagonal_matrix.cpp index 93e5c0ec..77673b1e 100644 --- a/tests/cxx/unit_tests/tensorwrapper/utilities/block_diagonal_matrix.cpp +++ b/tests/cxx/unit_tests/tensorwrapper/utilities/block_diagonal_matrix.cpp @@ -52,17 +52,18 @@ TEMPLATE_LIST_TEST_CASE("block_diagonal_matrix", "", SECTION("All matrices are square") { shape::Smooth corr_shape{5, 5}; layout::Physical corr_layout(corr_shape); - auto allocator = make_allocator(); - auto corr_buffer = allocator.allocate(corr_layout); + auto corr_buffer = buffer::make_contiguous(corr_shape); double counter1 = 1.0, counter2 = 1.0; for(std::size_t i = 0; i < 5; ++i) { for(std::size_t j = 0; j < 5; ++j) { if(i >= 2 and j >= 2) - corr_buffer->set_elem({i, j}, counter1++); + corr_buffer.set_elem({i, j}, + static_cast(counter1++)); else if(i < 2 and j < 2) - corr_buffer->set_elem({i, j}, counter2++); + corr_buffer.set_elem({i, j}, + static_cast(counter2++)); else - corr_buffer->set_elem({i, j}, 0.0); + corr_buffer.set_elem({i, j}, TestType{0.0}); } } Tensor corr(corr_shape, std::move(corr_buffer)); diff --git a/tests/cxx/unit_tests/tensorwrapper/utilities/floating_point_dispatch.cpp b/tests/cxx/unit_tests/tensorwrapper/utilities/floating_point_dispatch.cpp deleted file mode 100644 index f04922c9..00000000 --- a/tests/cxx/unit_tests/tensorwrapper/utilities/floating_point_dispatch.cpp +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2025 NWChemEx-Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "../testing/testing.hpp" - -using namespace tensorwrapper; -using namespace tensorwrapper::utilities; - -struct Kernel { - template - void run(buffer::BufferBase& buffer) { - auto corr = testing::eigen_matrix(); - REQUIRE(corr->are_equal(buffer)); - } - - template - bool run(buffer::BufferBase& buffer, buffer::BufferBase& corr) { - return corr.are_equal(buffer); - } -}; - -TEMPLATE_LIST_TEST_CASE("floating_point_dispatch", "", - types::floating_point_types) { - Kernel kernel; - auto tensor = testing::eigen_matrix(); - - SECTION("Single input, no return") { - floating_point_dispatch(kernel, *tensor); - } - - SECTION("Two inputs and a return") { - REQUIRE(floating_point_dispatch(kernel, *tensor, *tensor)); - } -}