Skip to content

Commit

Permalink
Release 0.20.0 (#1888)
Browse files Browse the repository at this point in the history
* Update pyproject.toml

* update black

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* Update _trainingplans.py

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

---------

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
adamgayoso and pre-commit-ci[bot] committed Feb 1, 2023
1 parent 3792568 commit 486e3e4
Show file tree
Hide file tree
Showing 22 changed files with 29 additions and 27 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ default_stages:
minimum_pre_commit_version: 2.16.0
repos:
- repo: https://github.com/psf/black
rev: 22.12.0
rev: 23.1.0
hooks:
- id: black
- repo: https://github.com/pre-commit/mirrors-prettier
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ requires = ["hatchling"]

[project]
name = "scvi-tools"
version = "0.20.0b2"
version = "0.20.0"
description = "Deep probabilistic analysis of single-cell omics data."
readme = "README.md"
requires-python = ">=3.8"
Expand Down
1 change: 0 additions & 1 deletion scvi/_settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,6 @@ def __init__(
dl_pin_memory_gpu_training: bool = False,
jax_preallocate_gpu_memory: bool = False,
):

self.seed = seed
self.batch_size = batch_size
if progress_bar_style not in ["rich", "tqdm"]:
Expand Down
1 change: 0 additions & 1 deletion scvi/autotune/_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,6 @@ def _validate_search_space(self, search_space: dict, use_defaults: bool) -> dict
# add defaults if requested
_search_space = {}
if use_defaults:

# parse defaults into tune sample functions
for param, metadata in self._defaults.items():
sample_fn = getattr(tune, metadata["fn"])
Expand Down
1 change: 0 additions & 1 deletion scvi/data/_built_in_data/_seqfish.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ def _load_seqfishplus(
save_path: str = "data/",
tissue_region: str = "subventricular cortex",
) -> anndata.AnnData:

if tissue_region == "subventricular cortex":
file_prefix = "cortex_svz"
elif tissue_region == "olfactory bulb":
Expand Down
1 change: 0 additions & 1 deletion scvi/data/_built_in_data/_synthetic.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ def _generate_synthetic(
n_labels: int = 3,
sparse: bool = False,
) -> AnnData:

data = np.random.negative_binomial(5, 0.3, size=(batch_size * n_batches, n_genes))
mask = np.random.binomial(n=1, p=0.7, size=(batch_size * n_batches, n_genes))
data = data * mask # We put the batch index first
Expand Down
1 change: 0 additions & 1 deletion scvi/data/_preprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,6 @@ def poisson_gene_selection(
obs_frac_zeross = []
exp_frac_zeross = []
for b in np.unique(batch_info):

ad = adata[batch_info == b]
data = ad.layers[layer] if layer is not None else ad.X

Expand Down
1 change: 0 additions & 1 deletion scvi/dataloaders/_ann_dataloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,6 @@ def __init__(
iter_ndarray: bool = False,
**data_loader_kwargs,
):

if adata_manager.adata is None:
raise ValueError(
"Please run register_fields() on your AnnDataManager object first."
Expand Down
2 changes: 0 additions & 2 deletions scvi/distributions/_negative_binomial.py
Original file line number Diff line number Diff line change
Expand Up @@ -430,7 +430,6 @@ def __init__(
scale: Optional[torch.Tensor] = None,
validate_args: bool = False,
):

super().__init__(
total_count=total_count,
probs=probs,
Expand Down Expand Up @@ -527,7 +526,6 @@ def __init__(
theta2: Optional[torch.Tensor] = None,
validate_args: bool = False,
):

(
self.mu1,
self.theta1,
Expand Down
4 changes: 2 additions & 2 deletions scvi/external/gimvi/_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def training_step(self, batch, batch_idx, optimizer_idx=0):
loss_output_objs = []
n_obs = 0
zs = []
for (i, tensors) in enumerate(batch):
for i, tensors in enumerate(batch):
n_obs += tensors[REGISTRY_KEYS.X_KEY].shape[0]
self.loss_kwargs.update(dict(kl_weight=self.kl_weight, mode=i))
inference_kwargs = dict(mode=i)
Expand Down Expand Up @@ -76,7 +76,7 @@ def training_step(self, batch, batch_idx, optimizer_idx=0):
# this condition will not be met unless self.adversarial_classifier is not False
if optimizer_idx == 1:
zs = []
for (i, tensors) in enumerate(batch):
for i, tensors in enumerate(batch):
inference_inputs = self.module._get_inference_input(tensors)
inference_inputs.update({"mode": i})
outputs = self.module.inference(**inference_inputs)
Expand Down
5 changes: 4 additions & 1 deletion scvi/model/_totalvi.py
Original file line number Diff line number Diff line change
Expand Up @@ -897,7 +897,10 @@ def _get_denoised_samples(
generative_kwargs = dict(transform_batch=transform_batch)
inference_kwargs = dict(n_samples=n_samples)
with torch.inference_mode():
inference_outputs, generative_outputs, = self.module.forward(
(
inference_outputs,
generative_outputs,
) = self.module.forward(
tensors,
inference_kwargs=inference_kwargs,
generative_kwargs=generative_kwargs,
Expand Down
7 changes: 6 additions & 1 deletion scvi/model/base/_base_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -639,7 +639,12 @@ def load(
load_adata = adata is None
_, _, device = parse_use_gpu_arg(use_gpu)

(attr_dict, var_names, model_state_dict, new_adata,) = _load_saved_files(
(
attr_dict,
var_names,
model_state_dict,
new_adata,
) = _load_saved_files(
dir_path,
load_adata,
map_location=device,
Expand Down
1 change: 0 additions & 1 deletion scvi/model/base/_pyromixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -284,7 +284,6 @@ def _get_posterior_samples(
description="Sampling global variables, sample: ",
disable=not show_progress,
):

# generate new sample
samples_ = self._get_one_posterior_sample(
args, kwargs, return_sites=return_sites, return_observed=return_observed
Expand Down
1 change: 0 additions & 1 deletion scvi/model/base/_rnamixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -583,7 +583,6 @@ def get_latent_library_size(
else:
ql = outputs["ql"]
if ql is None:

raise RuntimeError(
"The module for this model does not compute the posterior distribution "
"for the library size. Set `give_mean` to False to use the observed library size instead."
Expand Down
1 change: 0 additions & 1 deletion scvi/model/base/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,6 @@ def _initialize_model(cls, adata, attr_dict):


def _validate_var_names(adata, source_var_names):

user_var_names = adata.var_names.astype(str)
if not np.array_equal(source_var_names, user_var_names):
warnings.warn(
Expand Down
1 change: 0 additions & 1 deletion scvi/module/_amortizedlda.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,6 @@ def __init__(
def _get_fn_args_from_batch(
tensor_dict: Dict[str, torch.Tensor]
) -> Union[Iterable, dict]:

x = tensor_dict[REGISTRY_KEYS.X_KEY]
library = torch.sum(x, dim=1)
return (x, library), {}
Expand Down
5 changes: 4 additions & 1 deletion scvi/module/_totalvae.py
Original file line number Diff line number Diff line change
Expand Up @@ -660,7 +660,10 @@ def sample(self, tensors, n_samples=1):
"""Sample from the generative model."""
inference_kwargs = dict(n_samples=n_samples)
with torch.inference_mode():
inference_outputs, generative_outputs, = self.forward(
(
inference_outputs,
generative_outputs,
) = self.forward(
tensors,
inference_kwargs=inference_kwargs,
compute_loss=False,
Expand Down
5 changes: 4 additions & 1 deletion scvi/module/_vae.py
Original file line number Diff line number Diff line change
Expand Up @@ -494,7 +494,10 @@ def sample(
tensor with shape (n_cells, n_genes, n_samples)
"""
inference_kwargs = dict(n_samples=n_samples)
_, generative_outputs, = self.forward(
(
_,
generative_outputs,
) = self.forward(
tensors,
inference_kwargs=inference_kwargs,
compute_loss=False,
Expand Down
3 changes: 2 additions & 1 deletion scvi/train/_trainingplans.py
Original file line number Diff line number Diff line change
Expand Up @@ -1168,8 +1168,9 @@ def jit_training_step(
**kwargs,
):
"""Jit training step."""
# state can't be passed here

def loss_fn(params):
# state can't be passed here
vars_in = {"params": params, **state.state}
outputs, new_model_state = state.apply_fn(
vars_in, batch, rngs=rngs, mutable=list(state.state.keys()), **kwargs
Expand Down
1 change: 0 additions & 1 deletion tests/autotune/test_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ def __init__(self, n_train: Tunable[int] = 1000, n_val: Tunable[int] = 100):


class DummyModel(TunableMixin, DummyTrainingMixin):

_data_splitter_cls = DummyDataSplitter

def __init__(
Expand Down
7 changes: 5 additions & 2 deletions tests/core/test_differential.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@ def test_features():


def test_differential_computation(save_path):

n_latent = 5
adata = synthetic_iid()
SCVI.setup_anndata(
Expand Down Expand Up @@ -111,7 +110,11 @@ def m1_domain_fn_test(samples):
)

# Test query features
obs_col, group1, _, = _prepare_obs(
(
obs_col,
group1,
_,
) = _prepare_obs(
idx1="(labels == 'label_1') & (batch == 'batch_1')", idx2=None, adata=adata
)
assert (obs_col == group1).sum() == adata.obs.loc[
Expand Down
3 changes: 0 additions & 3 deletions tests/models/test_pyro.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,6 @@ def _get_fn_args_from_batch(tensor_dict):
return (x, y, ind_x), {}

def forward(self, x, y, ind_x):

obs_plate = self.create_plates(x, y, ind_x)

sigma = pyro.sample("sigma", dist.Exponential(self.one))
Expand All @@ -114,7 +113,6 @@ def forward(self, x, y, ind_x):

class BayesianRegressionModule(PyroBaseModuleClass):
def __init__(self, **kwargs):

super().__init__()
self._model = BayesianRegressionPyroModel(**kwargs)
self._guide = AutoNormal(
Expand Down Expand Up @@ -465,7 +463,6 @@ def test_pyro_bayesian_train_sample_mixin_with_local_full_data():

class FunctionBasedPyroModule(PyroBaseModuleClass):
def __init__(self, n_input: int, n_latent: int, n_hidden: int, n_layers: int):

super().__init__()
self.n_input = n_input
self.n_latent = n_latent
Expand Down

0 comments on commit 486e3e4

Please sign in to comment.