Skip to content

Commit afa14ab

Browse files
author
xyFeng-guet
committed
code
1 parent 051dbf8 commit afa14ab

19 files changed

+1329
-0
lines changed

Overall Modal.png

955 KB
Loading

data/README.md

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
Data description:
2+
3+
Au: Action Units
4+
5+
Bp: Body Posture
6+
7+
Em: Eye movements
8+
9+
Hp: Head Posture
10+
11+
12+
We use the tool to extract the features from our collection video to facilitate the processing. The action units, head postures, and eye movements are extracted by Openface 2.0, and Openpose extracts the body postures.
13+
14+
Each modality includes 195 samples (we collate our dataset again, and finally choose 24 participants' data (24 people * 3 times * 4 quadrants), and delete the data located in the axis).

data/aus.npy

15.6 MB
Binary file not shown.

data/bps.npy

10.7 MB
Binary file not shown.

data/ems.rar

15.2 MB
Binary file not shown.

data/hps.npy

2.68 MB
Binary file not shown.

data/quality.npy

1.65 KB
Binary file not shown.

data/ra.npy

1.65 KB
Binary file not shown.

data/readiness.npy

1.65 KB
Binary file not shown.

src/core/dataset.py

Lines changed: 125 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,125 @@
1+
import numpy as np
2+
import torch
3+
from torch.utils.data import Dataset, DataLoader
4+
5+
6+
class create_Dataset():
7+
def __init__(self, datasetNmae, labelType):
8+
DATA_MAP = {
9+
'emotake': self.__init_emotake,
10+
'other': None
11+
}
12+
data, multi_task, label = DATA_MAP[datasetNmae](labelType)
13+
self.d_l = {'data': data, 'multi_task': multi_task, 'label': label}
14+
15+
def __init_emotake(self, labelType):
16+
# au_data = np.load('data/aus.npy').transpose(0, 2, 1)
17+
# em_data = np.load('data/ems.npy').transpose(0, 2, 1)
18+
# hp_data = np.load('data/hps.npy').transpose(0, 2, 1)
19+
au_data = np.load('data/aus.npy')
20+
em_data = np.load('data/ems.npy')
21+
hp_data = np.load('data/hps.npy')
22+
23+
# Body Poster 的维度是 batch,lenght,point1,point2
24+
# 300 12 2 中的 12 2 代表的是每个图片提取出了12个点,2是每个点的横纵坐标
25+
bp_data = np.load('data/bps.npy')
26+
# bp_data = bp_data.reshape(bp_data.shape[0], bp_data.shape[1], -1).transpose(0, 2, 1)
27+
bp_data = bp_data.reshape(bp_data.shape[0], bp_data.shape[1], -1)
28+
29+
combine_data = [{'au': au_data[i], 'em': em_data[i], 'hp': hp_data[i], 'bp': bp_data[i]} for i in range(len(au_data))]
30+
31+
quality_label = np.load('data/quality.npy', allow_pickle=True)
32+
quality_label = np.array(quality_label, dtype=int)
33+
ra_label = np.load('data/ra.npy', allow_pickle=True)
34+
ra_label = np.array(ra_label, dtype=int)
35+
readiness_label = np.load('data/readiness.npy', allow_pickle=True)
36+
readiness_label = np.array(readiness_label, dtype=int)
37+
38+
multi_task = []
39+
for i in range(len(quality_label)):
40+
multi_task.append([quality_label[i], ra_label[i], readiness_label[i]])
41+
42+
if labelType == 'quality':
43+
combine_label = quality_label
44+
elif labelType == 'ra':
45+
combine_label = ra_label
46+
else:
47+
combine_label = readiness_label
48+
49+
return combine_data, multi_task, combine_label
50+
51+
def __len__(self):
52+
return len(self.dataset['label'])
53+
54+
55+
def create_DataLoader(opt, dataset):
56+
dataset = MDDataset(dataset)
57+
dataLoader = DataLoader(
58+
dataset,
59+
batch_size=opt.batch_size,
60+
num_workers=opt.num_workers,
61+
shuffle=True
62+
)
63+
return dataLoader
64+
65+
66+
class MDDataset(Dataset):
67+
def __init__(self, dataset):
68+
self.reconstruct_dataset(dataset)
69+
70+
def reconstruct_dataset(self, dataset):
71+
data, label = dataset[0], dataset[1]
72+
au, em, hp, bp = [], [], [], []
73+
for i in range(len(data)):
74+
au.append(data[i]['au'])
75+
em.append(data[i]['em'])
76+
hp.append(data[i]['hp'])
77+
bp.append(data[i]['bp'])
78+
79+
self.au = torch.tensor(np.array(au), dtype=torch.float32)
80+
self.em = torch.tensor(np.array(em), dtype=torch.float32)
81+
self.hp = torch.tensor(np.array(hp), dtype=torch.float32)
82+
self.bp = torch.tensor(np.array(bp), dtype=torch.float32)
83+
84+
self.au_lengths = len(self.au[0])
85+
self.em_lengths = len(self.em[0])
86+
self.hp_lengths = len(self.hp[0])
87+
self.bp_lengths = len(self.bp[0])
88+
89+
# # Clear dirty data
90+
# self.audio[self.audio == -np.inf] = 0
91+
# self.vision[self.vision == -np.inf] = 0
92+
self.__gen_mask()
93+
self.label = torch.tensor(label)
94+
95+
def __gen_mask(self):
96+
mask_list = []
97+
for data in [self.au, self.em, self.hp, self.bp]:
98+
mask = torch.tensor([[False for i in range(data.shape[1])] for j in range(data.shape[0])])
99+
mask_list.append(mask)
100+
self.padding_mask_au = mask_list[0]
101+
self.padding_mask_em = mask_list[1]
102+
self.padding_mask_hp = mask_list[2]
103+
self.padding_mask_bp = mask_list[3]
104+
105+
def __len__(self):
106+
return len(self.label)
107+
108+
def __getitem__(self, index):
109+
sample = {
110+
'au': self.au[index],
111+
'em': self.em[index],
112+
'hp': self.hp[index],
113+
'bp': self.bp[index],
114+
'au_lengths': self.au_lengths,
115+
'em_lengths': self.em_lengths,
116+
'hp_lengths': self.hp_lengths,
117+
'bp_lengths': self.bp_lengths,
118+
'padding_mask_au': self.padding_mask_au[index],
119+
'padding_mask_em': self.padding_mask_em[index],
120+
'padding_mask_hp': self.padding_mask_hp[index],
121+
'padding_mask_bp': self.padding_mask_bp[index],
122+
'label': self.label[index],
123+
'index': index
124+
}
125+
return sample

0 commit comments

Comments
 (0)