Skip to content

Commit

Permalink
Merge pull request #121 from marty1885/master
Browse files Browse the repository at this point in the history
Make tensor support iteration
  • Loading branch information
marty1885 committed Dec 14, 2019
2 parents 9f18299 + 9281a71 commit 9be40a5
Show file tree
Hide file tree
Showing 5 changed files with 115 additions and 8 deletions.
5 changes: 2 additions & 3 deletions Etaler/Backends/OpenCLBackend.cpp
Expand Up @@ -519,7 +519,6 @@ std::shared_ptr<TensorImpl> OpenCLBackend::reverseBurst(const TensorImpl* x)
std::vector<uint32_t> seed1(global_size);
std::vector<uint32_t> seed2(global_size);

for(auto& v : seed1) v = rng();
for(auto& v : seed1) v = rng();

auto s1 = createTensor({global_size}, DType::Int32, seed1.data());
Expand Down Expand Up @@ -670,7 +669,7 @@ int location_func$ID(int location)

replaceAll(func, "$STRIDE", to_string(x->stride()));
replaceAll(func, "$BIAS", std::to_string(x->offset()));
return func;
return func;
}

static std::vector<std::string> jitCopyFromView(const TensorImpl* x)
Expand Down Expand Up @@ -730,7 +729,7 @@ kernel void copy(global Type* restrict x, global Type* restrict y)
std::shared_ptr<TensorImpl> OpenCLBackend::realize(const TensorImpl* x)
{
requireProperties(x, this);
if(x->iscontiguous() == true)
if(x->isplain() == true)
return copy(x);

std::vector<std::string> conversion = jitCopyFromView(x);
Expand Down
16 changes: 12 additions & 4 deletions Etaler/Core/Tensor.cpp
Expand Up @@ -191,6 +191,7 @@ Tensor Tensor::view(svector<Range> ranges) const
Shape result_shape;
svector<intmax_t> offset;
Shape viewed_strides = pimpl_->stride();
Shape result_stride;
offset.reserve(dimentions());

assert(viewed_strides.size() == dimentions());
Expand Down Expand Up @@ -220,16 +221,21 @@ Tensor Tensor::view(svector<Range> ranges) const
viewed_strides[i] *= step;

offset.push_back(real_start);
if(size != 1 || result_shape.empty() == false) //Ignore heading 1 dimentions
if(size != 1 || result_shape.empty() == false) { //Ignore heading 1 dimentions
result_shape.push_back(size);
result_stride.push_back(viewed_strides[i]);
}
}

//If all dims are 1, thus no shape. Give it a shape
if(result_shape.empty() == true)
if(result_shape.empty() == true) {
et_assert(result_stride.size() == result_shape.size());
result_shape.push_back(1);
result_stride.push_back(1);
}

size_t initial_offset = unfold(offset, pimpl_->stride())+pimpl_->offset();
return std::make_shared<TensorImpl>(pimpl_->buffer(), result_shape, viewed_strides, initial_offset);
return std::make_shared<TensorImpl>(pimpl_->buffer(), result_shape, result_stride, initial_offset);
}

Tensor et::zeros(const Shape& shape, DType dtype, Backend* backend)
Expand Down Expand Up @@ -346,7 +352,9 @@ Tensor et::cat(const svector<Tensor>& tensors, intmax_t dim)

Tensor Tensor::copy() const
{
return backend()->copy(pimpl());
if(iscontiguous() == true)
return backend()->copy(pimpl());
return realize().copy();
}

inline bool brodcastable(Shape a, Shape b)
Expand Down
50 changes: 49 additions & 1 deletion Etaler/Core/Tensor.hpp
Expand Up @@ -17,6 +17,36 @@ namespace et
{

struct Tensor;

template <typename T>
struct ETALER_EXPORT TensorIterator
{
// Iterator properties
using iterator_category = std::bidirectional_iterator_tag;
using value_type = T;
using raw_value_type = std::remove_const_t<value_type>; // extra
using difference_type = intmax_t;
using pointer = std::unique_ptr<raw_value_type>;
using reference = T&;

using ThisIterator = TensorIterator<T>;
TensorIterator() = default;
TensorIterator(reference t, intmax_t offset = 0) : t_(&t), offset_(offset)
{static_assert(std::is_same_v<raw_value_type, Tensor>); }
value_type operator*() { return t_->view({offset_}); }
// Unfortunatelly returning a pointer is not doable
pointer operator->() { return std::make_unique<raw_value_type>(*(*this)); }
bool operator==(ThisIterator rhs) const { return offset_ == rhs.offset_ && t_ == rhs.t_; }
bool operator!=(ThisIterator rhs) const { return !(*this == rhs); }
ThisIterator& operator++() {offset_ += 1; return *this;}
ThisIterator operator++(int) {ThisIterator retval = *this; ++(*this); return retval;}
ThisIterator& operator--() {offset_ -= 1; return *this;}
ThisIterator operator--(int) {ThisIterator retval = *this; --(*this); return retval;}
value_type* t_ = nullptr; // Using a pointer because Tensor is a incomplete type here
intmax_t offset_ = 0;
};


Tensor ETALER_EXPORT brodcast_to(const Tensor& t, Shape s);

ETALER_EXPORT std::ostream& operator<< (std::ostream& os, const Tensor& t);
Expand Down Expand Up @@ -204,6 +234,17 @@ struct ETALER_EXPORT Tensor
TensorImpl* operator () () {return pimpl();}
const TensorImpl* operator () () const {return pimpl();}

using iterator = TensorIterator<Tensor>;
using const_iterator = TensorIterator<const Tensor>;

iterator begin() { return iterator(*this, 0); }
iterator back() { return iterator(*this, shape()[0]-1); }
iterator end() { return iterator(*this, shape()[0]); }

const_iterator begin() const { return const_iterator(*this, 0); }
const_iterator back() const { return const_iterator(*this, shape()[0]-1); }
const_iterator end() const { return const_iterator(*this, shape()[0]); }

bool has_value() const {return (bool)pimpl_ && size() > 0;}

std::pair<Tensor, Tensor> brodcast(const Tensor& other) const;
Expand Down Expand Up @@ -251,7 +292,7 @@ inline Tensor realize(const Tensor& t)

inline Tensor ravel(const Tensor& t)
{
if(t.iscontiguous() == false)
if(t.iscontiguous() == true)
return t;
return t.realize();
}
Expand Down Expand Up @@ -313,6 +354,13 @@ inline void assign(Tensor& x, const Tensor& y)
x.assign(y);
}

inline void swap(Tensor x, Tensor y)
{
Tensor tmp = ravel(x).copy();
x.assign(y);
y.assign(tmp);
}

Tensor ETALER_EXPORT sum(const Tensor& x, std::optional<intmax_t> dim=std::nullopt, DType dtype=DType::Unknown);
Tensor ETALER_EXPORT cat(const svector<Tensor>& tensors, intmax_t dim=0);
inline Tensor concat(const svector<Tensor>& tensors, intmax_t dim=0) { return cat(tensors, dim); }
Expand Down
5 changes: 5 additions & 0 deletions Etaler/Core/TensorImpl.hpp
Expand Up @@ -56,6 +56,7 @@ struct ETALER_EXPORT TensorImpl : public std::enable_shared_from_this<TensorImpl
};

struct IsContingous {};
struct IsPlain {};

template <typename Storage>
struct IsDType
Expand All @@ -78,6 +79,8 @@ bool checkProperty(const TensorImpl* x, const T& value)
return x->dtype() == value;
else if constexpr(std::is_same_v<T, IsContingous>)
return x->iscontiguous();
else if constexpr(std::is_same_v<T, IsPlain>)
return x->iscontiguous();
else if constexpr(is_specialization<std::remove_pointer_t<std::decay_t<T>>, IsDType>::value)
return (std::find(value.types.begin(), value.types.end(), x->dtype()) != value.types.end());
else
Expand All @@ -99,6 +102,8 @@ void requireProperty(const TensorImpl* x, const T value, const std::string& line
throw EtError(msg + ".dtype() == " + to_ctype_string(value));
else if constexpr(std::is_same_v<T, IsContingous>)
throw EtError(msg + ".iscontiguous() == true");
else if constexpr(std::is_same_v<T, IsPlain>)
throw EtError(msg + ".isplain() == true");
else if constexpr(is_specialization<std::remove_pointer_t<std::decay_t<T>>, IsDType>::value) {
throw EtError(msg + ".dtype() is in {" + std::accumulate(value.types.begin(), value.types.end(), std::string()
, [](auto v, auto a){return v + to_ctype_string(a) + ", ";}));
Expand Down
47 changes: 47 additions & 0 deletions tests/common_tests.cpp
Expand Up @@ -162,6 +162,10 @@ TEST_CASE("Testing Tensor", "[Tensor]")

CHECK_NOTHROW(requireProperties(ones(Shape{1}, DType::Int32).pimpl(), IsContingous()));
CHECK_THROWS(requireProperties(ones(Shape{4,4}, DType::Int32).view({range(2), range(2)}).pimpl(), IsContingous()));

CHECK_NOTHROW(requireProperties(ones(Shape{1}, DType::Int32).pimpl(), IsPlain()));
CHECK_NOTHROW(requireProperties(ones(Shape{1}, DType::Int32).view({0}).pimpl(), IsPlain()));
CHECK_THROWS(requireProperties(ones(Shape{4,4}, DType::Int32).view({range(2), range(2)}).pimpl(), IsPlain()));
}

SECTION("Views") {
Expand Down Expand Up @@ -208,6 +212,13 @@ TEST_CASE("Testing Tensor", "[Tensor]")
CHECK(realize(r).isSame(pred));
}

SECTION("View of views") {
Tensor t = ones({4, 4});
Tensor v1 = t[{3}];
Tensor v2 = v1[{all()}];
CHECK(v2.size() == 4);
}

SECTION("View write back") {
Tensor q = t.view({range(2),range(2)});
CHECK_THROWS(q.assign(ones({5,5})));
Expand Down Expand Up @@ -286,6 +297,42 @@ TEST_CASE("Testing Tensor", "[Tensor]")
// item() should fail because q is not a scalar
CHECK_THROWS(q.item<int>());
}

SECTION("iterator") {
Tensor t = ones({3, 4});
Tensor q = zeros({3, 4});
STATIC_REQUIRE(std::is_same_v<Tensor::iterator::value_type, Tensor>);

// Tensor::iterator should be bideractional
// Reference: http://www.cplusplus.com/reference/iterator/BidirectionalIterator/
STATIC_REQUIRE(std::is_default_constructible_v<Tensor::iterator>);
STATIC_REQUIRE(std::is_copy_constructible_v<Tensor::iterator>);
STATIC_REQUIRE(std::is_copy_assignable_v<Tensor::iterator>);
STATIC_REQUIRE(std::is_destructible_v<Tensor::iterator>);
CHECK(t.begin() != t.end());
CHECK(t.begin() == t.begin());
CHECK((*t.begin()).shape() == Shape{4});
CHECK(t.begin()->shape() == Shape{4});
auto it1 = t.begin(), it2 = t.begin();
it1++;
++it2;
CHECK(it1 == it2);
--it1;
it2--;
CHECK(it1 == it2);

swap(*t.begin(), *q.begin());
CHECK(t[{0}].isSame(zeros({4})));

int num_iteration = 0;
for(auto s : t) {
CHECK(s.shape() == Shape({4}));
s.assign(constant({4}, 42));
num_iteration += 1;
}
CHECK(num_iteration == t.shape()[0]);
CHECK(t.sum().item<int>() == 42*t.size());
}
}

TEST_CASE("Testing Encoders", "[Encoder]")
Expand Down

0 comments on commit 9be40a5

Please sign in to comment.