Skip to content

Commit

Permalink
Merge pull request #113 from marty1885/master
Browse files Browse the repository at this point in the history
Improve step support in indexing
  • Loading branch information
marty1885 committed Dec 5, 2019
2 parents 08c5635 + 789713f commit cf18fd3
Show file tree
Hide file tree
Showing 7 changed files with 85 additions and 64 deletions.
2 changes: 1 addition & 1 deletion Etaler/3rdparty/half_precision
Submodule half_precision updated 1 files
+7 −24 half.hpp
2 changes: 1 addition & 1 deletion Etaler/3rdparty/pcg-cpp
63 changes: 40 additions & 23 deletions Etaler/Core/Tensor.cpp
Expand Up @@ -176,43 +176,60 @@ Tensor Tensor::view(svector<Range> ranges) const
while(ranges.size() != dimentions())
ranges.push_back(all());

auto resolve_index = [](intmax_t idx, bool from_back, intmax_t size) {
if(from_back == true)
auto resolve_index = [](intmax_t idx, intmax_t size) -> intmax_t {
if(idx < 0)
return size-idx;
else
return idx;
return idx;
};

auto resolve_range_size = [resolve_index](Range r, intmax_t size) {
return resolve_index(r.end(), r.endFromBack(), size) - resolve_index(r.start(), r.startFromBack(), size);
auto is_index_valid = [](intmax_t idx, intmax_t size) -> bool {
if(idx >= 0)
return idx < size;
return -idx <= size;
};

Shape result_shape;
svector<intmax_t> offset;
Shape viewed_strides = pimpl_->stride();
offset.reserve(dimentions());

for(size_t i=0;i<dimentions();i++) {
Range r = ranges[i];

intmax_t start = resolve_index(r.start(), r.startFromBack(), shape()[i]);
intmax_t size = resolve_range_size(r, shape()[i]);

if(size < 0)
throw EtError("Negative steps not supported now");
if(start < 0 || (start+size) > shape()[i])
throw EtError("Indexing from " + std::to_string(start+size-1) + " is out of the range of " + std::to_string(shape()[i]));
assert(viewed_strides.size() == dimentions());

offset.push_back(start);
if(size != 1 || result_shape.size() != 0) //Ignore heading 1 dimentions
for(size_t i=0;i<dimentions();i++) {
const Range& r = ranges[i];
intmax_t dim_size = shape()[i];

intmax_t start = r.start().value_or(0);
intmax_t stop = r.stop().value_or(dim_size);
intmax_t step = r.step().value_or(1);

// Indexing validations
if(step == 0)
throw EtError("Error: Step size is zero in dimension " + std::to_string(i));
if(is_index_valid(start, dim_size) == false)
throw EtError("Starting index " + std::to_string(start) + " is out of range in dimension " + std::to_string(i));
if(is_index_valid(stop, dim_size+1) == false)
throw EtError("Stopping index " + std::to_string(stop) + " is out of range in dimension " + std::to_string(i));

intmax_t real_start = resolve_index(start, dim_size);
intmax_t real_stop = resolve_index(stop, dim_size);
intmax_t size = (real_stop - real_start - 1) / step + 1;

if((real_stop - real_start) * step < 0)
throw EtError("Step is going in the wrong direction. Will cause infinate loop");
viewed_strides[i] *= step;

offset.push_back(real_start);
if(size != 1 || result_shape.empty() == false) //Ignore heading 1 dimentions
result_shape.push_back(size);
}

//If all dims are 1, thus no shape. Give it a shape
if(result_shape.size() == 0)
if(result_shape.empty() == true)
result_shape.push_back(1);

Shape view_meta_strides = pimpl_->stride();
size_t initial_offset = unfold(offset, pimpl_->stride())+pimpl_->offset();
return std::make_shared<TensorImpl>(pimpl_->buffer(), result_shape, view_meta_strides, initial_offset);
return std::make_shared<TensorImpl>(pimpl_->buffer(), result_shape, viewed_strides, initial_offset);
}

Tensor et::zeros(const Shape& shape, DType dtype, Backend* backend)
Expand Down Expand Up @@ -364,7 +381,7 @@ inline Shape brodcast_result_shape(Shape a, Shape b)
Tensor et::brodcast_to(const Tensor& t, Shape s)
{
et_assert(s.size() >= t.dimentions());
Shape stride = leftpad(shapeToStride(t.shape()), s.size(), 0);
Shape stride = leftpad(t.stride(), s.size(), 0);
Shape shape = leftpad(t.shape(), s.size(), 0);
for(size_t i=0;i<s.size();i++) {
if(shape[i] != s[i])
Expand All @@ -386,4 +403,4 @@ std::pair<Tensor, Tensor> et::brodcast_tensors(const Tensor& a, const Tensor& b)
std::pair<Tensor, Tensor> Tensor::brodcast(const Tensor& other) const
{
return brodcast_tensors(*this, other);
}
}
3 changes: 2 additions & 1 deletion Etaler/Core/Tensor.hpp
Expand Up @@ -63,8 +63,9 @@ struct ETALER_EXPORT Tensor
size_t dimentions() const {return pimpl_->dimentions();}
void resize(Shape s) {pimpl()->resize(s);}
bool iscontiguous() const {return pimpl()->iscontiguous();}
Shape stride() const {return pimpl()->stride();}

Backend* backend() const {return pimpl()->backend();};
Backend* backend() const {return pimpl()->backend();}


template <typename ImplType=TensorImpl>
Expand Down
58 changes: 21 additions & 37 deletions Etaler/Core/Views.hpp
Expand Up @@ -6,6 +6,7 @@

#include <variant>
#include <memory>
#include <optional>

namespace et
{
Expand All @@ -14,52 +15,30 @@ struct Range
{
Range() = default;
Range(intmax_t start)
{
start_ = start;
end_ = start+1;
}
: start_(start), stop_(start+1)
{}

Range(intmax_t start, intmax_t end)
{
start_ = start;
end_ = end;
Range(intmax_t start, intmax_t stop)
: start_(start), stop_(stop)
{}

if (start < 0) {
start_from_back_ = true;
end = -end;
}
Range(intmax_t start, intmax_t stop, intmax_t step)
: start_(start), stop_(stop), step_(step)
{}

if (end < 0) {
end_from_back_ = true;
end = -end;
}
}

Range(intmax_t start, intmax_t end, bool start_from_back, bool end_from_back)
{
start_ = start;
et_assert(end >= 0);
end_ = end;
start_from_back_ = start_from_back;
end_from_back_ = end_from_back;
}

intmax_t start() const {return start_;}
intmax_t end() const {return end_;}
bool startFromBack() const {return start_from_back_;}
bool endFromBack() const {return end_from_back_;}
std::optional<intmax_t> start() const {return start_;}
std::optional<intmax_t> stop() const {return stop_;}
std::optional<intmax_t> step() const {return step_;}

protected:
intmax_t start_ = 0;
bool start_from_back_ = false;
intmax_t end_ = 0;
bool end_from_back_ = false;
//intmax_t step_size_ = 1;
std::optional<intmax_t> start_;
std::optional<intmax_t> stop_;
std::optional<intmax_t> step_;
};

inline Range all()
{
return Range(0, 0, false, true);
return Range();
}

inline Range range(intmax_t start, intmax_t end)
Expand All @@ -72,5 +51,10 @@ inline Range range(intmax_t end)
return Range(0, end);
}

inline Range range(intmax_t start, intmax_t stop, intmax_t step)
{
return Range(start, stop, step);
}


}
2 changes: 1 addition & 1 deletion examples/visualize/LayersVisualizer
19 changes: 19 additions & 0 deletions tests/common_tests.cpp
Expand Up @@ -239,6 +239,25 @@ TEST_CASE("Testing Tensor", "[Tensor]")
CHECK(u.isSame(zeros_like(u)));
}

SECTION("Strided views") {
SECTION("read") {
int a[] = {0, 2};
Tensor q = Tensor({2}, a);
Tensor res = t.view({0, range(0, 3, 2)});
CHECK(res.isSame(q));
}

SECTION("write") {
int a[] = {-1, -1};
Tensor q = Tensor({2}, a);
t[{0, range(0, 3, 2)}] = q;

int b[] = {-1, 1, -1, 3};
Tensor r = Tensor({4}, b);
CHECK(t[{0}].isSame(r));
}
}

SECTION("subscription operator") {
svector<Range> r = {range(2)};
//The [] operator should work exactly like the view() function
Expand Down

0 comments on commit cf18fd3

Please sign in to comment.