-
Notifications
You must be signed in to change notification settings - Fork 0
/
modules_ibp.py
99 lines (83 loc) · 3.25 KB
/
modules_ibp.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import torch
import torch.nn as nn
import numpy as np
class Add_ParamI(nn.Module):
def __init__(self):
super().__init__()
self.bias = nn.Parameter(torch.zeros(1))
def forward(self, x):
out = x + self.bias
return out
def ibp_forward(self, l, u):
l_ = l + self.bias
u_ = u + self.bias
return l_, u_
class Scale_By_ParamI(nn.Module):
def __init__(self):
super().__init__()
self.scalar = nn.Parameter(torch.ones(1))
def forward(self, x):
out = x * self.scalar
return out
def ibp_forward(self, l, u):
if self.scalar >= 0:
l_ = l * self.scalar
u_ = u * self.scalar
else:
u_ = l * self.scalar
l_ = u * self.scalar
return l_, u_
class FlattenI(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
def ibp_forward(self, l, u):
l_ = self.forward(l)
u_ = self.forward(u)
return l_, u_
class Conv2dI(nn.Conv2d):
def ibp_forward(self, l, u):
l_ = (nn.functional.conv2d(l, self.weight.clamp(min=0), bias=None,
stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=self.groups) +
nn.functional.conv2d(u, self.weight.clamp(max=0), bias=None,
stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=self.groups)
)
u_ = (nn.functional.conv2d(u, self.weight.clamp(min=0), bias=None,
stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=self.groups) +
nn.functional.conv2d(l, self.weight.clamp(max=0), bias=None,
stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=self.groups)
)
if self.bias is not None:
l_ += self.bias[None,:,None,None]
u_ += self.bias[None,:,None,None]
return l_, u_
class ReLUI(nn.ReLU):
def ibp_forward(self, l, u):
l_ = l.clamp(min=0)
u_ = u.clamp(min=0)
return l_, u_
class AvgPool2dI(nn.AvgPool2d):
def ibp_forward(self, l, u):
return self.forward(l), self.forward(u)
class AdaptiveAvgPool2dI(nn.AdaptiveAvgPool2d):
def ibp_forward(self, l, u):
return self.forward(l), self.forward(u)
class LinearI(nn.Linear):
def ibp_forward(self, l, u):
l_ = (self.weight.clamp(min=0) @ l.t() + self.weight.clamp(max=0) @ u.t()
+ self.bias[:,None]).t()
u_ = (self.weight.clamp(min=0) @ u.t() + self.weight.clamp(max=0) @ l.t()
+ self.bias[:,None]).t()
return l_, u_
def conv3x3I(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return Conv2dI(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class SequentialI(nn.Sequential):
def ibp_forward(self, l, u):
for module in self:
l,u = module.ibp_forward(l, u)
return l,u