-
Notifications
You must be signed in to change notification settings - Fork 1
/
mhca.py
52 lines (43 loc) · 1.99 KB
/
mhca.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
"""
2023, Multimodal Multi-Head Convolutional Attention with Various Kernel Sizes for Medical Image Super-Resolution
Mariana-Iuliana Georgescu, Radu Tudor Ionescu, Andreea-Iuliana Miron, Olivian Savencu, Nicolae-Catalin Ristea, Nicolae Verga and Fahad Shahbaz Khan, WACV
Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)
"""
import torch.nn as nn
class MHCA(nn.Module):
def __init__(self, n_feats, ratio):
"""
MHCA spatial-channel attention module.
:param n_feats: The number of filter of the input.
:param ratio: Channel reduction ratio.
"""
super(MHCA, self).__init__()
out_channels = int(n_feats // ratio)
head_1 = [
nn.Conv2d(in_channels=n_feats, out_channels=out_channels, kernel_size=1, padding=0, bias=True),
nn.ReLU(True),
nn.Conv2d(in_channels=out_channels, out_channels=n_feats, kernel_size=1, padding=0, bias=True)
]
kernel_size_sam = 3
head_2 = [
nn.Conv2d(in_channels=n_feats, out_channels=out_channels, kernel_size=kernel_size_sam, padding=0, bias=True),
nn.ReLU(True),
nn.ConvTranspose2d(in_channels=out_channels, out_channels=n_feats, kernel_size=kernel_size_sam, padding=0, bias=True)
]
kernel_size_sam_2 = 5
head_3 = [
nn.Conv2d(in_channels=n_feats, out_channels=out_channels, kernel_size=kernel_size_sam_2, padding=0, bias=True),
nn.ReLU(True),
nn.ConvTranspose2d(in_channels=out_channels, out_channels=n_feats, kernel_size=kernel_size_sam_2, padding=0, bias=True)
]
self.head_1 = nn.Sequential(*head_1)
self.head_2 = nn.Sequential(*head_2)
self.head_3 = nn.Sequential(*head_3)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
res_h1 = self.head_1(x)
res_h2 = self.head_2(x)
res_h3 = self.head_3(x)
m_c = self.sigmoid(res_h1 + res_h2 + res_h3)
res = x * m_c
return res