Import libraries
import numpy as np
import scipy.io as sio
import sklearn
from sklearn.decomposition import PCA
from sklearn import preprocessing
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import tensorflow as tf
import keras
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from numpy.random import random
import random as rand
Load a dummy dataset of material and luminescence maps (I did not want to hardcode all the maps myself)
dummy_dataset = sio.loadmat('../Dataset#1_2015.mat') #this is a dataset with image maps
dummy_dataset_key=list(dummy_dataset.keys())
The general equation for a given pixel at location $(x,y)$ and wavelength $\lambda$ in a hyperspectral image $h$
$$h(x,y,\lambda) = \left[\sum^R_{r=1} \pi_{r}(\lambda)\alpha_{r}(x,y)\right] \left[\sum^L_{l=1} \ell_{l}(\lambda)\beta_l(x,y)\right]$$Pull random maps from the dummy dataset, create randomized spectra
def make_maps_and_spectra(dataset_size, num_materials, num_light_sources, spectra_length):
material_maps = []
luminecence_maps = []
material_spectra = []
lum_spectra = []
for _ in range(dataset_size):
material_map_set = []
for _ in range(num_materials):
material_map_set.append(np.expand_dims(dummy_dataset[dummy_dataset_key[3]][:,:,rand.randint(0,137)],axis=0))
material_maps.append(np.concatenate(material_map_set, axis=0))
luminecence_maps_set = []
for _ in range(num_light_sources):
luminecence_maps_set.append(np.expand_dims(dummy_dataset[dummy_dataset_key[3]][:,:,rand.randint(0,137)],axis=0))
luminecence_maps.append(np.concatenate(luminecence_maps_set, axis=0))
material_spectra.append(np.random.rand(num_materials,1,1,spectra_length))
lum_spectra.append(np.random.rand(num_light_sources,1,1,spectra_length))
return material_maps, luminecence_maps, material_spectra, lum_spectra
Use the maps and spectra to create a single hyperspectral image
def make_hyperspectral(mat_maps, lum_maps, mat_spectra, lum_spectra):
reshaped_mat_maps = np.array([np.expand_dims(i, axis=-1) for i in mat_maps]) #add a dimention for spectra
reflectances_for_materials = np.multiply(reshaped_mat_maps, mat_spectra) #multiply material by corresonding spectra
reflectance_term = np.sum(reflectances_for_materials, axis=0) #add materials together
reshaped_lum_map = np.array([np.expand_dims(i, axis=-1) for i in lum_maps]) #add a dimention for spectra
reflectances_for_luminescence = np.multiply(reshaped_lum_map, lum_spectra) #multiply luminecences by corresonding spectra
luminescence_term = np.sum(np.multiply(reshaped_lum_map, lum_spectra), axis=0) #add luminecences together
return np.array(np.expand_dims(reflectance_term+reflectance_term, axis=0))
Create a dataset of hyperspectral ../images of specified size
def make_hyperspectral_dataset(mat_maps_list, lum_maps_list, mat_spectra, lum_spectra):
dataset = make_hyperspectral(mat_maps_list[0], lum_maps_list[0], mat_spectra[0], lum_spectra[0])
for i in range(1, len(mat_maps_list)):
result = make_hyperspectral(mat_maps_list[i], lum_maps_list[i], mat_spectra[i], lum_spectra[i])
dataset = np.concatenate((dataset, result))
return dataset
Create 80 hyperspectral ../images for testing, and 20 for training. They each have 3 materials and 2 light sources
mat_maps_train, lum_maps_train, mat_spectra_train, lum_spectra_train = make_maps_and_spectra(80, 3, 2, 32)
mat_maps_test, lum_maps_test, mat_spectra_test, lum_spectra_test = make_maps_and_spectra(20, 3, 2, 32)
train_x = make_hyperspectral_dataset(mat_maps_train, lum_maps_train, mat_spectra_train, lum_spectra_train)
test_x = make_hyperspectral_dataset(mat_maps_test, lum_maps_test, mat_spectra_test, lum_spectra_test)
print(train_x.shape)
print(test_x.shape)
(80, 200, 200, 32) (20, 200, 200, 32)
input_img = keras.Input(shape=(200,200, 32))
#encode the maps, remove spectra
encoded_maps = Conv2D(32, (3, 3), activation='relu', padding='same')(input_img)
encoded_maps = Conv2D(16, (3, 3), activation='relu', padding='same')(encoded_maps)
encoded_maps = Conv2D(16, (3, 3), activation='relu', padding='same')(encoded_maps)
encoded_maps = Conv2D(16, (3, 3), activation='relu', padding='same')(encoded_maps)
encoded_maps = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded_maps)
encoded_maps = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded_maps)
encoded_maps = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded_maps)
encoded_maps = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded_maps)
encoded_maps = Conv2D(5, (3, 3), activation='relu', padding='same')(encoded_maps)
encoded_maps = Conv2D(5, (3, 3), activation='relu', padding='same')(encoded_maps)
encoded_maps = Conv2D(5, (3, 3), activation='relu', padding='same')(encoded_maps)
encoded_maps = Conv2D(5, (3, 3), activation='relu', padding='same')(encoded_maps)
#encode spectra, remove maps
#first, shrink the maps so they can be used as channels
encoded_spectra = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(input_img)
encoded_spectra = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(encoded_spectra) #(None, 50, 50, 32)
#rehsape the image so the pixels in the maps are channels
remove_dim_1 = tf.stack(tf.split(encoded_spectra, 50, axis=1), -1)
remove_dim_2 = tf.split(remove_dim_1, 50, axis=2)
encoded_spectra = tf.concat(remove_dim_2, -1) #(None, 1, 1, 32, 2500)
#enocde the spectra now that the maps are in the channels
encoded_spectra = Conv2D(2500, (3, 3), activation='relu', padding='same')(encoded_spectra)
encoded_spectra = Conv2D(1250, (3, 3), activation='relu', padding='same')(encoded_spectra)
encoded_spectra = Conv2D(625, (3, 3), activation='relu', padding='same')(encoded_spectra)
encoded_spectra = Conv2D(312, (3, 3), activation='relu', padding='same')(encoded_spectra)
encoded_spectra = Conv2D(156, (3, 3), activation='relu', padding='same')(encoded_spectra)
encoded_spectra = Conv2D(78, (3, 3), activation='relu', padding='same')(encoded_spectra)
encoded_spectra = Conv2D(39, (3, 3), activation='relu', padding='same')(encoded_spectra)
encoded_spectra = Conv2D(19, (3, 3), activation='relu', padding='same')(encoded_spectra)
encoded_spectra = Conv2D(9, (3, 3), activation='relu', padding='same')(encoded_spectra)
encoded_spectra = Conv2D(5, (3, 3), activation='relu', padding='same')(encoded_spectra)
#decode matricies
mat_spec1, mat_spec2, mat_spec3, lum_spec1, lum_spec2 = tf.split(encoded_spectra, num_or_size_splits=5, axis=4)
mat_map1, mat_map2, mat_map3, lum_map1, lum_map2 = tf.split(encoded_maps, num_or_size_splits=5, axis=3)
mat_spec1 = tf.squeeze(mat_spec1, 4)
mat_spec2 = tf.squeeze(mat_spec2, 4)
mat_spec3 = tf.squeeze(mat_spec3, 4)
lum_spec1 = tf.squeeze(lum_spec1, 4)
lum_spec2 = tf.squeeze(lum_spec2, 4)
material_component = mat_map1*mat_spec1+mat_map2*mat_spec2+mat_map3*mat_spec3
lum_component = lum_map1*lum_spec1+lum_map2*lum_spec2
autoencoder = keras.Model(input_img, material_component+lum_component)
autoencoder.compile(optimizer='adam', loss='mean_squared_logarithmic_error')
history = autoencoder.fit(train_x, train_x,
epochs=100,
batch_size=5,
shuffle=True,
validation_data=(test_x, test_x),
verbose = 0)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Loss of the Hyperspectral Image Simulator over Time')
plt.ylabel('Mean Squared Logarithmic Error')
plt.xlabel('Epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
f, axarr = plt.subplots(1,3, figsize=(30,30))
axarr[0].imshow(dummy_dataset[dummy_dataset_key[3]][:,:,rand.randint(0,137)])
axarr[0].axis('off')
axarr[1].imshow((dummy_dataset[dummy_dataset_key[3]][:,:,rand.randint(0,137)]))
axarr[2].imshow(dummy_dataset[dummy_dataset_key[3]][:,:,rand.randint(0,137)])
axarr[2].axis('off')
(-0.5, 199.5, 199.5, -0.5)