Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

The output point cloud is too concentrated #16

Open
nuclear-missile opened this issue Oct 5, 2022 · 0 comments
Open

The output point cloud is too concentrated #16

nuclear-missile opened this issue Oct 5, 2022 · 0 comments

Comments

@nuclear-missile
Copy link

We retrained the network and used Chamfer_loss. Although loss successfully converged to about 1, most of the points in the generated point cloud image were concentrated in the center, so we could nearly see the shape out! We used FC-Final and GraphX-up-Final as network structures for training respectively. Since neither split.py nor the preprocessed data can be downloaded, we manually wrote the Dataloader and the trained functions based on the Shapenet dataset. We'd like to know what happend. If you can answer for us, we will sincerely thank you.

the dataset code (We randomly selected 40000 pieces of data as the training set and 3783 pieces of data as the test set. All 5 shooting angles of each data are used. For the data set, we simply selected coordinate information and picture information to be packaged into PKL storage, without any preprocess. For the function init_pointcloud_loader we copied from your code.)
`
def _init_pointcloud_loader(num_points):
Z = np.random.rand(num_points) + 1.
h = np.random.uniform(10., 214., size=(num_points,))
w = np.random.uniform(10., 214., size=(num_points,))
X = (w - 111.5) / 248. * -Z
Y = (h - 111.5) / 248. * Z
X = np.reshape(X, (-1, 1))
Y = np.reshape(Y, (-1, 1))
Z = np.reshape(Z, (-1, 1))
XYZ = np.concatenate((X, Y, Z), 1)
return XYZ.astype('float32')

class _iter_split:
def init(self, dl):
self.bs = dl.bs
self.datas_files = deepcopy(dl.datas_files)
self.npoints = dl.npoints
random.shuffle(self.datas_files)
self.iter_files = 0
self.iter_elements = 0
self.datas = []
self._read_file()

def _read_file(self):
    del self.datas
    self.datas = pickle.load(open(self.datas_files[self.iter_files], 'rb'))
    print(self.datas_files[self.iter_files])
    random.shuffle(self.datas)
    self.iter_files += 1
    self.iter_elements = 0

def __next__(self):
    if self.iter_elements == len(self.datas):
        if self.iter_files == len(self.datas_files):
            del self.datas
            raise StopIteration
        else:
            self._read_file()
    ret = self.datas[self.iter_elements: min(self.iter_elements + self.bs, len(self.datas))]
    ret = [(_init_pointcloud_loader(self.npoints), i[0], i[1]) for i in ret]
    self.iter_elements = min(self.iter_elements + self.bs, len(self.datas))
    return ret

def __iter__(self):
    return self

class dataloader:
def init(self, batch_size, npoints=2000):
self.bs = batch_size
self.npoints = npoints
self.datas_files = ['G:\data\' + i for i in ['data0.pkl', 'data1.pkl', 'data2.pkl', 'data3.pkl', 'data4.pkl', 'data5.pkl', 'data6.pkl', 'data7.pkl', ]]

def dataloader_split(self):
    return _iter_split(self)

`

the train function we use (we have trained 2 epoch for GraphX-up-Final and 16 epoch for FC-Final)
`
dev = torch.device('cuda')
@gin.configurable('GraphX')
def train_valid(data_root, name, img_enc, pc_enc, pc_dec, optimizer, scheduler, adain=True, projection=True,
decimation=None, color_img=False, n_points=250, bs=100, lr=2e-5, weight_decay=1e-5, gamma=.3,
milestones=(5, 8), n_epochs=10, print_freq=1000, val_freq=10000, checkpoint_folder=None):
if decimation is not None:
pc_dec = partial(pc_dec, decimation=decimation)
bs = 4
net = PointcloudDeformNet((bs,) + (3 if color_img else 1, 137, 137), (bs, n_points, 3), img_enc, pc_enc, pc_dec,
adain=adain, projection=projection, weight_decay=None).to(device=dev)
# print(net)
solver = T.optim.Adam(net.trainable, lr=lr, weight_decay=weight_decay) if optimizer is None
else optimizer(net.trainable, lr, weight_decay=weight_decay)
scheduler = scheduler(solver, milestones=milestones, gamma=gamma) if scheduler is not None else None

train_data = dataloader.dataloader(bs, n_points)
epch = 1
while True:
    print('epoch: ', epch)
    for i in train_data.dataloader_split():
        loss = net.get_loss((torch.tensor(np.array([i[j][0] for j in range(bs)])).to(device=dev),
                      torch.tensor(np.array([[i[j][1]] for j in range(bs)])).to(device=dev),
                      [torch.tensor(i[j][2]).to(device=dev) for j in range(bs)]))
        solver.zero_grad()
        loss.backward()
        solver.step()
        if scheduler is not None:
            scheduler.step()
        print(loss.item(), solver.param_groups[0]['lr'])
    torch.save({'model': net, 'optim': solver, 'scheduler': scheduler}, f'model{epch}.pkl')
    epch += 1

`

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant