Skip to content

Commit

Permalink
more debugging
Browse files Browse the repository at this point in the history
  • Loading branch information
madhavajay committed Jul 16, 2022
1 parent e1508c1 commit 844dc7a
Show file tree
Hide file tree
Showing 4 changed files with 645 additions and 441 deletions.
38 changes: 9 additions & 29 deletions fastai/_nbdev.py
Expand Up @@ -288,9 +288,7 @@
"TensorPoint.dihedral": "09_vision.augment.ipynb",
"TensorBBox.dihedral": "09_vision.augment.ipynb",
"DihedralItem": "09_vision.augment.ipynb",
"TensorBBox.crop_pad": "09_vision.augment.ipynb",
"TensorPoint.crop_pad": "09_vision.augment.ipynb",
"Image.Image.crop_pad": "09_vision.augment.ipynb",
"TensorBBox|TensorPoint|Image.Image.crop_pad": "09_vision.augment.ipynb",
"CropPad": "09_vision.augment.ipynb",
"RandomCrop": "09_vision.augment.ipynb",
"OldRandomCrop": "09_vision.augment.ipynb",
Expand All @@ -307,38 +305,23 @@
"mask_tensor": "09_vision.augment.ipynb",
"affine_mat": "09_vision.augment.ipynb",
"flip_mat": "09_vision.augment.ipynb",
"TensorImage.flip_batch": "09_vision.augment.ipynb",
"TensorMask.flip_batch": "09_vision.augment.ipynb",
"TensorPoint.flip_batch": "09_vision.augment.ipynb",
"TensorBBox.flip_batch": "09_vision.augment.ipynb",
"TensorImage|TensorMask|TensorPoint|TensorBBox.flip_batch": "09_vision.augment.ipynb",
"Flip": "09_vision.augment.ipynb",
"DeterministicDraw": "09_vision.augment.ipynb",
"DeterministicFlip": "09_vision.augment.ipynb",
"dihedral_mat": "09_vision.augment.ipynb",
"TensorImage.dihedral_batch": "09_vision.augment.ipynb",
"TensorMask.dihedral_batch": "09_vision.augment.ipynb",
"TensorPoint.dihedral_batch": "09_vision.augment.ipynb",
"TensorBBox.dihedral_batch": "09_vision.augment.ipynb",
"TensorImage|TensorMask|TensorPoint|TensorBBox.dihedral_batch": "09_vision.augment.ipynb",
"Dihedral": "09_vision.augment.ipynb",
"DeterministicDihedral": "09_vision.augment.ipynb",
"rotate_mat": "09_vision.augment.ipynb",
"TensorImage.rotate": "09_vision.augment.ipynb",
"TensorMask.rotate": "09_vision.augment.ipynb",
"TensorPoint.rotate": "09_vision.augment.ipynb",
"TensorBBox.rotate": "09_vision.augment.ipynb",
"TensorImage|TensorMask|TensorPoint|TensorBBox.rotate": "09_vision.augment.ipynb",
"Rotate": "09_vision.augment.ipynb",
"zoom_mat": "09_vision.augment.ipynb",
"TensorImage.zoom": "09_vision.augment.ipynb",
"TensorMask.zoom": "09_vision.augment.ipynb",
"TensorPoint.zoom": "09_vision.augment.ipynb",
"TensorBBox.zoom": "09_vision.augment.ipynb",
"TensorImage|TensorMask|TensorPoint|TensorBBox.zoom": "09_vision.augment.ipynb",
"Zoom": "09_vision.augment.ipynb",
"find_coeffs": "09_vision.augment.ipynb",
"apply_perspective": "09_vision.augment.ipynb",
"TensorImage.warp": "09_vision.augment.ipynb",
"TensorMask.warp": "09_vision.augment.ipynb",
"TensorPoint.warp": "09_vision.augment.ipynb",
"TensorBBox.warp": "09_vision.augment.ipynb",
"TensorImage|TensorMask|TensorPoint|TensorBBox.warp": "09_vision.augment.ipynb",
"Warp": "09_vision.augment.ipynb",
"TensorImage.lighting": "09_vision.augment.ipynb",
"SpaceTfm": "09_vision.augment.ipynb",
Expand Down Expand Up @@ -799,12 +782,9 @@
"DcmDataset.to_nchan": "60_medical.imaging.ipynb",
"Tensor.to_3chan": "60_medical.imaging.ipynb",
"DcmDataset.to_3chan": "60_medical.imaging.ipynb",
"Tensor.save_jpg": "60_medical.imaging.ipynb",
"DcmDataset.save_jpg": "60_medical.imaging.ipynb",
"Tensor.to_uint16": "60_medical.imaging.ipynb",
"DcmDataset.to_uint16": "60_medical.imaging.ipynb",
"Tensor.save_tif16": "60_medical.imaging.ipynb",
"DcmDataset.save_tif16": "60_medical.imaging.ipynb",
"Tensor|DcmDataset.save_jpg": "60_medical.imaging.ipynb",
"Tensor|DcmDataset.to_uint16": "60_medical.imaging.ipynb",
"Tensor|DcmDataset.save_tif16": "60_medical.imaging.ipynb",
"DcmDataset.set_pixels": "60_medical.imaging.ipynb",
"DcmDataset.pixel_array": "60_medical.imaging.ipynb",
"DcmDataset.zoom": "60_medical.imaging.ipynb",
Expand Down
12 changes: 12 additions & 0 deletions nbs/10_tutorial.pets.ipynb
Expand Up @@ -1724,6 +1724,18 @@
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
}
},
"nbformat": 4,
Expand Down
147 changes: 17 additions & 130 deletions nbs/15a_vision.models.unet.ipynb
Expand Up @@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -13,7 +13,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -25,7 +25,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -35,7 +35,7 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -53,7 +53,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -67,7 +67,7 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -80,7 +80,7 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -113,7 +113,7 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -129,7 +129,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -181,7 +181,7 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -190,26 +190,9 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 11,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"middle_conv Sequential(\n",
" (0): ConvLayer(\n",
" (0): Conv2d(512, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
" (1): ReLU()\n",
" )\n",
" (1): ConvLayer(\n",
" (0): Conv2d(1024, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
" (1): ReLU()\n",
" )\n",
")\n"
]
}
],
"outputs": [],
"source": [
"m = resnet34()\n",
"m = nn.Sequential(*list(m.children())[:-2])\n",
Expand All @@ -221,26 +204,9 @@
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 12,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"middle_conv Sequential(\n",
" (0): ConvLayer(\n",
" (0): Conv2d(512, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
" (1): ReLU()\n",
" )\n",
" (1): ConvLayer(\n",
" (0): Conv2d(1024, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
" (1): ReLU()\n",
" )\n",
")\n"
]
}
],
"outputs": [],
"source": [
"tst = DynamicUnet(m, 5, (128,128), norm_type=None)\n",
"x = torch.randn(2, 3, 127, 128)\n",
Expand All @@ -256,100 +222,21 @@
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": 17,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Converted 00_torch_core.ipynb.\n",
"Converted 01_layers.ipynb.\n",
"Converted 01a_losses.ipynb.\n",
"Converted 02_data.load.ipynb.\n",
"Converted 03_data.core.ipynb.\n",
"Converted 04_data.external.ipynb.\n",
"Converted 05_data.transforms.ipynb.\n",
"Converted 06_data.block.ipynb.\n",
"Converted 07_vision.core.ipynb.\n",
"Converted 08_vision.data.ipynb.\n",
"Converted 09_vision.augment.ipynb.\n",
"Converted 09b_vision.utils.ipynb.\n",
"Converted 09c_vision.widgets.ipynb.\n",
"Converted 10_tutorial.pets.ipynb.\n",
"Converted 10b_tutorial.albumentations.ipynb.\n",
"Converted 11_vision.models.xresnet.ipynb.\n",
"Converted 12_optimizer.ipynb.\n",
"Converted 13_callback.core.ipynb.\n",
"Converted 13a_learner.ipynb.\n",
"Converted 13b_metrics.ipynb.\n",
"Converted 14_callback.schedule.ipynb.\n",
"Converted 14a_callback.data.ipynb.\n",
"Converted 15_callback.hook.ipynb.\n",
"Converted 15a_vision.models.unet.ipynb.\n",
"Converted 16_callback.progress.ipynb.\n",
"Converted 17_callback.tracker.ipynb.\n",
"Converted 18_callback.fp16.ipynb.\n",
"Converted 18a_callback.training.ipynb.\n",
"Converted 18b_callback.preds.ipynb.\n",
"Converted 19_callback.mixup.ipynb.\n",
"Converted 20_interpret.ipynb.\n",
"Converted 20a_distributed.ipynb.\n",
"Converted 20b_tutorial.distributed.ipynb.\n",
"Converted 21_vision.learner.ipynb.\n",
"Converted 22_tutorial.imagenette.ipynb.\n",
"Converted 23_tutorial.vision.ipynb.\n",
"Converted 24_tutorial.image_sequence.ipynb.\n",
"Converted 24_tutorial.siamese.ipynb.\n",
"Converted 24_vision.gan.ipynb.\n",
"Converted 30_text.core.ipynb.\n",
"Converted 31_text.data.ipynb.\n",
"Converted 32_text.models.awdlstm.ipynb.\n",
"Converted 33_text.models.core.ipynb.\n",
"Converted 34_callback.rnn.ipynb.\n",
"Converted 35_tutorial.wikitext.ipynb.\n",
"Converted 37_text.learner.ipynb.\n",
"Converted 38_tutorial.text.ipynb.\n",
"Converted 39_tutorial.transformers.ipynb.\n",
"Converted 40_tabular.core.ipynb.\n",
"Converted 41_tabular.data.ipynb.\n",
"Converted 42_tabular.model.ipynb.\n",
"Converted 43_tabular.learner.ipynb.\n",
"Converted 44_tutorial.tabular.ipynb.\n",
"Converted 45_collab.ipynb.\n",
"Converted 46_tutorial.collab.ipynb.\n",
"Converted 50_tutorial.datablock.ipynb.\n",
"Converted 60_medical.imaging.ipynb.\n",
"Converted 61_tutorial.medical_imaging.ipynb.\n",
"Converted 65_medical.text.ipynb.\n",
"Converted 70_callback.wandb.ipynb.\n",
"Converted 70a_callback.tensorboard.ipynb.\n",
"Converted 70b_callback.neptune.ipynb.\n",
"Converted 70c_callback.captum.ipynb.\n",
"Converted 70d_callback.comet.ipynb.\n",
"Converted 74_huggingface.ipynb.\n",
"Converted 97_test_utils.ipynb.\n",
"Converted 99_pytorch_doc.ipynb.\n",
"Converted dev-setup.ipynb.\n",
"Converted app_examples.ipynb.\n",
"Converted camvid.ipynb.\n",
"Converted distributed_app_examples.ipynb.\n",
"Converted migrating_catalyst.ipynb.\n",
"Converted migrating_ignite.ipynb.\n",
"Converted migrating_lightning.ipynb.\n",
"Converted migrating_pytorch.ipynb.\n",
"Converted migrating_pytorch_verbose.ipynb.\n",
"Converted ulmfit.ipynb.\n",
"Converted index.ipynb.\n",
"Converted quick_start.ipynb.\n",
"Converted tutorial.ipynb.\n"
"Converted 15a_vision.models.unet.ipynb.\n"
]
}
],
"source": [
"#|hide\n",
"from nbdev.export import *\n",
"notebook2script()"
"notebook2script(fname=\"15a*\")"
]
},
{
Expand Down

0 comments on commit 844dc7a

Please sign in to comment.