Skip to content

Commit

Permalink
publish 0.0.9
Browse files Browse the repository at this point in the history
  • Loading branch information
perry committed Apr 1, 2021
1 parent 94bd925 commit 261cf5b
Show file tree
Hide file tree
Showing 7 changed files with 161 additions and 31 deletions.
2 changes: 1 addition & 1 deletion docs/source/contact.rst
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
Contact
============

This project is mainly developed by Yu Pan etc.. Feel free to contact me at iperryuu@gmail.com.
This project is mainly developed by Yu Pan(iperryuu@gmail.com) and Maolin Wang(morin.w98@gmail.com), etc. Feel free to contact us.
107 changes: 107 additions & 0 deletions docs/source/quick_start.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "gqwrcHFAVwgs"
},
"source": [
"# Quick Start"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "oz-X6ZrGVwgu"
},
"source": [
"In this quick starting guide we show the basics of working with t3f library. The main concept of the library is a TensorTrain object – a compact (factorized) representation of a tensor (=multidimensional array). This is generalization of the matrix low-rank decomposition."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
},
"colab_type": "code",
"id": "ri9QCNEAVwgw",
"outputId": "efd8c64b-836e-449b-a385-dd3a63a5b4a2"
},
"outputs": [],
"source": [
"\n",
"import tednet as tdt\n",
"import tednet.tnn.tensor_ring as tr"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"colab": {},
"colab_type": "code",
"id": "5Awp7wdwVwg3"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"compression_ration is: 0.3968253968253968\n",
"compression_ration is: 14.17233560090703\n",
"compression_ration is: 241.54589371980677\n",
"compression_ration is: 2.867383512544803\n"
]
}
],
"source": [
"# Define a TR-LeNet5\n",
"model = tr.TRLeNet5(10, [6, 6, 6, 6])"
]
},
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "i2QmXYqeVwhQ",
"pycharm": {
"name": "#%% md\n"
}
},
"source": [
"It is easy to define a tensor ring model.\n"
]
}
],
"metadata": {
"colab": {
"collapsed_sections": [],
"name": "riemannian.ipynb",
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.9"
}
},
"nbformat": 4,
"nbformat_minor": 1
}
7 changes: 0 additions & 7 deletions docs/source/quick_start.rst

This file was deleted.

13 changes: 13 additions & 0 deletions ext/tests/tt_opt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# -*- coding: UTF-8 -*-

import tednet

import torch

a = torch.Tensor(6, 8)
b = tednet.to_numpy(a)
c = tednet.to_tensor(b)
d = c.cuda()
e = tednet.to_numpy(d)
f = tednet.to_tensor(e)

4 changes: 2 additions & 2 deletions tednet/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@


__author__ = "Perry"
__version__ = "0.0.8"
__version__ = "0.0.9"

from ._ops import *

__all__ = ["hard_sigmoid", "eye"]
__all__ = ["hard_sigmoid", "eye", "to_numpy", "to_tensor"]
55 changes: 36 additions & 19 deletions tednet/_ops.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,16 @@
# -*- coding: UTF-8 -*-

from collections import namedtuple

import numpy as np

import torch
import torch.nn as nn
from torch.nn import Parameter
import torch.nn.functional as F
from collections import namedtuple


def hard_sigmoid(tensor: torch.Tensor):
def hard_sigmoid(tensor: torch.Tensor) -> torch.Tensor:
"""Computes element-wise hard sigmoid of x.
See e.g. https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/sigm.py#L279
Expand All @@ -27,7 +30,7 @@ def hard_sigmoid(tensor: torch.Tensor):
return tensor


def eye(n: int, m: int, device: torch.device = "cpu", requires_grad: bool=False):
def eye(n: int, m: int, device: torch.device = "cpu", requires_grad: bool=False) -> torch.Tensor:
"""Returns a 2-D tensor with ones on the diagonal and zeros elsewhere.
Parameters
Expand All @@ -44,31 +47,45 @@ def eye(n: int, m: int, device: torch.device = "cpu", requires_grad: bool=False)
Returns
-------
torch.Tensor
2-D tensor :math:`\in \mathbb{R}^{{i_1} \\times {i_2}}`
2-D tensor :math:`\in \mathbb{R}^{{n} \\times {m}}`
"""
return torch.eye(n=n, m=m, device=device, requires_grad=requires_grad)


def ones(x):
# TODO:
pass
def to_numpy(tensor: torch.Tensor) -> np.ndarray:
"""Convert torch.Tensor to numpy variable.
Parameters
----------
tensor : torch.Tensor
tensor :math:`\in \mathbb{R}^{{i_1} \\times \dots \\times {i_n}}`
def prod_all(x):
# TODO:
pass
Returns
-------
numpy.ndarray
arr :math:`\in \mathbb{R}^{{i_1} \\times \dots \\times {i_n}}`
"""
if tensor.device.type == "cpu":
arr = tensor.numpy()
else:
arr = tensor.cpu().numpy()

return arr

def diag(x):
# TODO:
pass

def to_tensor(arr: np.ndarray) -> torch.Tensor:
"""Convert numpy variable to torch.Tensor.
def tensordot(x):
# TODO:
pass
Parameters
----------
arr : numpy.ndarray
arr :math:`\in \mathbb{R}^{{i_1} \\times \dots \\times {i_n}}`
Returns
-------
torch.Tensor
tensor :math:`\in \mathbb{R}^{{i_1} \\times \dots \\times {i_n}}`
"""
tensor = torch.from_numpy(arr)

def from_numpy(x):
# TODO:
pass
return tensor
4 changes: 2 additions & 2 deletions tednet/tnn/tn_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

import numpy as np

__all__ = ["_TNBase"]
__all__ = ["_TNBase", "LambdaLayer"]


class _TNBase(nn.Module, metaclass=abc.ABCMeta):
Expand Down Expand Up @@ -139,7 +139,7 @@ def recover(self):

class LambdaLayer(nn.Module):
def __init__(self, lambd):
"""Tensor Ring Block.
"""A layer consists of Lambda function.
Parameters
----------
Expand Down

0 comments on commit 261cf5b

Please sign in to comment.