3D point cloud generation from 3D triangular mesh
David de la Iglesia
572

Hey, David! Sorry to interrupt you. I’m building a CNN model by using Keras with Theano as its backend. There is a problem bothering me with the Flattern layer. I’ve seen you help others with a similar problem on https://stackoverflow.com/questions/42699956/3d-convolutional-neural-network-input-shape

Here are the problem and the code.

ValueError: The shape of the input to “Flatten” is not fully defined (got (0, 6, 80). Make sure to pass a complete “input_shape” or “batch_input_shape” argument to the first layer in your model.

# Import all the things we need — -
# by setting env variables before Keras import you can set up which backend and which GPU it uses
# %matplotlib inline
import os,random
os.environ[“KERAS_BACKEND”] = “theano”
#os.environ[“KERAS_BACKEND”] = “tensorflow”
#os.environ[“THEANO_FLAGS”] = “device=gpu%d”%(1)
import numpy as np
import theano as th
import theano.tensor as T
from keras.utils import np_utils
import keras.models as models
from keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten
from keras.layers.noise import GaussianNoise
from keras.layers.convolutional import Conv2D, MaxPooling2D, ZeroPadding2D
from keras.regularizers import *
from keras.optimizers import adam
import matplotlib.pyplot as plt
import seaborn as sns
import cPickle, random, sys, keras

# Load the dataset …
# You will need to seperately download or generate this file
Xd = cPickle.load(open(“RML2016.10a_dict.dat”,’rb’))
snrs,mods = map(lambda j: sorted(list(set(map(lambda x: x[j], Xd.keys())))), [1,0])
X = [] 
lbl = []
for mod in mods:
 for snr in snrs:
 X.append(Xd[(mod,snr)])
 for i in range(Xd[(mod,snr)].shape[0]): lbl.append((mod,snr))
X = np.vstack(X)

# Partition the data
# into training and test sets of the form we can train/test on 
# while keeping SNR and Mod labels handy for each
np.random.seed(2016)
n_examples = X.shape[0]
n_train = n_examples * 0.5
train_idx = np.random.choice(range(0,n_examples), size=int(n_train), replace=False)
test_idx = list(set(range(0,n_examples))-set(train_idx))
X_train = X[train_idx]
X_test = X[test_idx]
def to_onehot(yy):
 yy1 = np.zeros([len(yy), max(yy)+1])
 yy1[np.arange(len(yy)),yy] = 1
 return yy1
Y_train = to_onehot(map(lambda x: mods.index(lbl[x][0]), train_idx))
Y_test = to_onehot(map(lambda x: mods.index(lbl[x][0]), test_idx))

in_shp = list(X_train.shape[1:])
print X_train.shape
print in_shp
in_shp=[[],1,2,128]
print in_shp

classes = mods

# Build VT-CNN2 Neural Net model using Keras primitives — 
# — Reshape [N,2,128] to [N,1,2,128] on input
# — Pass through 2 2DConv/ReLu layers
# — Pass through 2 Dense layers (ReLu and Softmax)
# — Perform categorical cross entropy optimization

#input_shape=in_shp
dr = 0.5 # dropout rate (%)
model = models.Sequential()
input_shape=in_shp
#model.add(Reshape(in_shp,input_shape=in_shp))
#print input_shape
model.add(ZeroPadding2D((0, 2)))
model.add(Conv2D(256, (1, 3), padding=’valid’, activation=”relu”, name=”conv1", kernel_initializer=’glorot_uniform’))
model.add(Dropout(dr))
model.add(ZeroPadding2D((0, 2)))
model.add(Conv2D(80, (2, 3), padding=”valid”, activation=”relu”, name=”conv2", kernel_initializer=’glorot_uniform’))
model.add(Dropout(dr))
model.add(Flatten())
model.add(Dense(256, activation=’relu’, kernel_initializer=’he_normal’, name=”dense1"))
model.add(Dropout(dr))
model.add(Dense( len(classes), kernel_initializer=’he_normal’, name=”dense2" ))
model.add(Activation(‘softmax’))
model.add(Reshape([len(classes)]))
model.compile(loss=’categorical_crossentropy’, optimizer=’adam’)
model.summary()

# Set up some params 
epoch = 100 # number of epochs to train on
batch_size = 1024 # training batch size

# perform training …
# — call the main training loop in keras for our network+dataset
filepath = ‘convmodrecnets_CNN2_0.5.wts.h5’
history = model.fit(X_train,
 Y_train,
 batch_size=batch_size,
 epoch=epoch,
 show_accuracy=False,
 verbose=2,
 validation_data=(X_test, Y_test),
 callbacks = [
 keras.callbacks.ModelCheckpoint(filepath, monitor=’val_loss’, verbose=0, save_best_only=True, mode=’auto’),
 keras.callbacks.EarlyStopping(monitor=’val_loss’, patience=5, verbose=0, mode=’auto’)
 ])
# we re-load the best weights once training is finished
model.load_weights(filepath)

# Show simple version of performance
score = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0, batch_size=batch_size)
print score

# Show loss curves 
plt.figure()
plt.title(‘Training performance’)
plt.plot(history.epoch, history.history[‘loss’], label=’train loss+error’)
plt.plot(history.epoch, history.history[‘val_loss’], label=’val_error’)
plt.legend()

def plot_confusion_matrix(cm, title=’Confusion matrix’, cmap=plt.cm.Blues, labels=[]):
 plt.imshow(cm, interpolation=’nearest’, cmap=cmap)
 plt.title(title)
 plt.colorbar()
 tick_marks = np.arange(len(labels))
 plt.xticks(tick_marks, labels, rotation=45)
 plt.yticks(tick_marks, labels)
 plt.tight_layout()
 plt.ylabel(‘True label’)
 plt.xlabel(‘Predicted label’)

# Plot confusion matrix
test_Y_hat = model.predict(X_test, batch_size=batch_size)
conf = np.zeros([len(classes),len(classes)])
confnorm = np.zeros([len(classes),len(classes)])
for i in range(0,X_test.shape[0]):
 j = list(Y_test[i,:]).index(1)
 k = int(np.argmax(test_Y_hat[i,:]))
 conf[j,k] = conf[j,k] + 1
for i in range(0,len(classes)):
 confnorm[i,:] = conf[i,:] / np.sum(conf[i,:])
plot_confusion_matrix(confnorm, labels=classes)

# Plot confusion matrix
acc = {}
for snr in snrs:

# extract classes @ SNR
 test_SNRs = map(lambda x: lbl[x][1], test_idx)
 test_X_i = X_test[np.where(np.array(test_SNRs)==snr)]
 test_Y_i = Y_test[np.where(np.array(test_SNRs)==snr)]

# estimate classes
 test_Y_i_hat = model.predict(test_X_i)
 conf = np.zeros([len(classes),len(classes)])
 confnorm = np.zeros([len(classes),len(classes)])
 for i in range(0,test_X_i.shape[0]):
 j = list(test_Y_i[i,:]).index(1)
 k = int(np.argmax(test_Y_i_hat[i,:]))
 conf[j,k] = conf[j,k] + 1
 for i in range(0,len(classes)):
 confnorm[i,:] = conf[i,:] / np.sum(conf[i,:])
 plt.figure()
 plot_confusion_matrix(confnorm, labels=classes, title=”ConvNet Confusion Matrix (SNR=%d)”%(snr))
 
 cor = np.sum(np.diag(conf))
 ncor = np.sum(conf) — cor
 print “Overall Accuracy: “, cor / (cor+ncor)
 acc[snr] = 1.0*cor/(cor+ncor)

# Save results to a pickle file for plotting later
print acc
fd = open(‘results_cnn2_d0.5.dat’,’wb’)
cPickle.dump( (“CNN2”, 0.5, acc) , fd )

# Plot accuracy curve
plt.plot(snrs, map(lambda x: acc[x], snrs))
plt.xlabel(“Signal to Noise Ratio”)
plt.ylabel(“Classification Accuracy”)
plt.title(“CNN2 Classification Accuracy on RadioML 2016.10 Alpha”)

Hope you give some advice, thanks!