forked from fxia22/pointnet.pytorch
-
Notifications
You must be signed in to change notification settings - Fork 36
/
show_seg.py
68 lines (47 loc) · 1.63 KB
/
show_seg.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
from __future__ import print_function
from show3d_balls import *
import argparse
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from datasets import PartDataset
from pointnet import PointNetDenseCls
import torch.nn.functional as F
import matplotlib.pyplot as plt
#showpoints(np.random.randn(2500,3), c1 = np.random.uniform(0,1,size = (2500)))
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default = '', help='model path')
parser.add_argument('--idx', type=int, default = 0, help='model index')
opt = parser.parse_args()
print (opt)
d = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', class_choice = ['Chair'], train = False)
idx = opt.idx
print("model %d/%d" %( idx, len(d)))
point, seg = d[idx]
print(point.size(), seg.size())
point_np = point.numpy()
cmap = plt.cm.get_cmap("hsv", 10)
cmap = np.array([cmap(i) for i in range(10)])[:,:3]
gt = cmap[seg.numpy() - 1, :]
classifier = PointNetDenseCls(k = 4)
classifier.load_state_dict(torch.load(opt.model))
classifier.eval()
point = point.transpose(1,0).contiguous()
point = Variable(point.view(1, point.size()[0], point.size()[1]))
pred, _ = classifier(point)
pred_choice = pred.data.max(2)[1]
print(pred_choice)
#print(pred_choice.size())
pred_color = cmap[pred_choice.numpy()[0], :]
#print(pred_color.shape)
showpoints(point_np, gt, pred_color)