{"metadata":{"kernelspec":{"name":"python3","display_name":"Python 3","language":"python"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.10.13"},"kaggle":{"accelerator":"none","dataSources":[{"sourceId":7708506,"sourceType":"datasetVersion","datasetId":4500725},{"sourceId":12184,"sourceType":"modelInstanceVersion","modelInstanceId":9887},{"sourceId":12187,"sourceType":"modelInstanceVersion","modelInstanceId":9890}],"dockerImageVersionId":30646,"isInternetEnabled":false,"language":"python","sourceType":"notebook","isGpuEnabled":false}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"markdown","source":"This version has all three outputs, calculates strain (ezz) and uses a mask input.","metadata":{}},{"cell_type":"markdown","source":"It should be noted that development and training was done on a local machine. Importing the dataset into Kaggle seems to have changed the way the images are ordered, hence the train/validation/test sets defined are different despite using the same random seed.\n\nThis has the knock on effect of meaning that the 'test' slices used on the paper will likely not be in the test set now if running the notebook below with the currently defined random seed. This will have an impact on all outputs and stats generated if loading one of the provided trained models (which were used for generating the paper results).\n","metadata":{}},{"cell_type":"code","source":"#importing the libraries used\nimport os\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tifffile as tiff\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Flatten, Dense\nfrom tensorflow.keras.models import Model\nfrom scipy.ndimage import zoom\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nfrom tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Flatten, Dense, Dropout, concatenate, BatchNormalization, Multiply, UpSampling2D\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.callbacks import LearningRateScheduler\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom keras.regularizers import l2, l1\nfrom scipy.ndimage import binary_dilation, binary_erosion\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport matplotlib.gridspec as gridspec\n\n%matplotlib inline","metadata":{"execution":{"iopub.status.busy":"2024-02-26T23:40:40.579644Z","iopub.execute_input":"2024-02-26T23:40:40.580429Z","iopub.status.idle":"2024-02-26T23:40:59.195292Z","shell.execute_reply.started":"2024-02-26T23:40:40.580383Z","shell.execute_reply":"2024-02-26T23:40:59.193858Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"

Importing the Image Data

","metadata":{}},{"cell_type":"code","source":"# Define directory paths\ntrainPath = '/kaggle/input/d2im-prototype-dataset-porcine-vertebra-slices/Final/Input/Scan'\nmaskPath = '/kaggle/input/d2im-prototype-dataset-porcine-vertebra-slices/Final/Input/Mask'\ntestPathU = '/kaggle/input/d2im-prototype-dataset-porcine-vertebra-slices/Final/Target/U'\ntestPathV = '/kaggle/input/d2im-prototype-dataset-porcine-vertebra-slices/Final/Target/V'\ntestPathW = '/kaggle/input/d2im-prototype-dataset-porcine-vertebra-slices/Final/Target/W'\n\n#Define function to import images\ndef import_images(folder_path, name):\n image_data_list = []\n file_path = os.path.join(folder_path, name)\n img = tiff.imread(file_path)\n img[np.isnan(img)] = 0\n image_data_list.append(img)\n return image_data_list\n\n# Get the list of files in testPathU (this was chosen generically as files in all directories have the same name)\ntest_u_files = set([f for f in os.listdir(testPathU) if os.path.isfile(os.path.join(testPathU, f))])\n\n#Import all images into the bone_data array\n# Note, the train images have had some poor images removed manually where there was not enough bone.\n# This function will only import images (from all folders) if they exist in the trainPath\n\nbone_data = [[], [], [], [], []]\nfor file in os.listdir(trainPath): \n if file in test_u_files: \n bone_data[0].extend(import_images(trainPath, file))\n bone_data[1].extend(import_images(maskPath, file))\n bone_data[2].extend(import_images(testPathU, file))\n bone_data[3].extend(import_images(testPathV, file))\n bone_data[4].extend(import_images(testPathW, file))","metadata":{"execution":{"iopub.status.busy":"2024-02-26T23:41:49.406442Z","iopub.execute_input":"2024-02-26T23:41:49.406831Z","iopub.status.idle":"2024-02-26T23:41:58.763528Z","shell.execute_reply.started":"2024-02-26T23:41:49.406803Z","shell.execute_reply":"2024-02-26T23:41:58.762200Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# Import the Scan slices\ncommon_shape = (256, 256)\n\nresized_image_data = []\nfor img in bone_data[0]:\n resized_img = zoom(img, (common_shape[0] / img.shape[0], common_shape[1] / img.shape[1]), mode='nearest', order=0)\n resized_image_data.append(resized_img)\n\nscan = np.array(resized_image_data)\nscan = scan/255\n\n# Plot an example of the output image\nexample_index = 170\n# You can change this to plot different examples\nplt.imshow(scan[example_index], cmap='gray')\nplt.title(f\"Example {example_index+1} of Input Image\")\nplt.colorbar()\nplt.show()","metadata":{"execution":{"iopub.status.busy":"2024-02-26T23:42:03.657065Z","iopub.execute_input":"2024-02-26T23:42:03.657867Z","iopub.status.idle":"2024-02-26T23:42:04.480292Z","shell.execute_reply.started":"2024-02-26T23:42:03.657832Z","shell.execute_reply":"2024-02-26T23:42:04.479329Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# Import the mask slices\ncommon_shape = (20, 20)\nresized_image_data = []\n\nfor img in bone_data[1]:\n resized_img = zoom(img, (common_shape[0] / img.shape[0], common_shape[1] / img.shape[1]), order=0, grid_mode=False)\n resized_image_data.append(resized_img)\n\nmask = np.array(resized_image_data)\n\n# Perform binary dilation on the mask\nmask = binary_dilation(mask)\n\n# Plot an example of the output image\nexample_index = 111 # You can change this to plot different examples\nplt.imshow(mask[example_index], cmap='gray')\nplt.title(f\"Example {example_index+1} of Mask Image\")\nplt.colorbar()\nplt.show()","metadata":{"execution":{"iopub.status.busy":"2024-02-26T23:42:08.323401Z","iopub.execute_input":"2024-02-26T23:42:08.324207Z","iopub.status.idle":"2024-02-26T23:42:08.711729Z","shell.execute_reply.started":"2024-02-26T23:42:08.324155Z","shell.execute_reply":"2024-02-26T23:42:08.710384Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# Import the u target\ncommon_shape = (20, 20)\n\nresized_image_data = []\nfor img in bone_data[2]:\n resized_img = zoom(img, (common_shape[0] / img.shape[0], common_shape[1] / img.shape[1]), mode='nearest', order=0)\n resized_image_data.append(resized_img)\n# Convert the list of resized images into a NumPy array\noutput_ims_u = np.array(resized_image_data)\n\n\n# Plot an example of the output image\nexample_index = 111 # You can change this to plot different examples\nplt.imshow(output_ims_u[example_index], cmap='coolwarm')\nplt.title(f\"Example {example_index+1} of u Image\")\nplt.colorbar()\nplt.show()","metadata":{"execution":{"iopub.status.busy":"2024-02-26T23:42:18.689584Z","iopub.execute_input":"2024-02-26T23:42:18.689979Z","iopub.status.idle":"2024-02-26T23:42:19.048816Z","shell.execute_reply.started":"2024-02-26T23:42:18.689949Z","shell.execute_reply":"2024-02-26T23:42:19.047764Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# Import the v target\ncommon_shape = (20, 20)\n\nresized_image_data = []\nfor img in bone_data[3]:\n resized_img = zoom(img, (common_shape[0] / img.shape[0], common_shape[1] / img.shape[1]), mode='nearest', order=0)\n resized_image_data.append(resized_img)\n\n# Convert the list of resized images into a NumPy array\noutput_ims_v = np.array(resized_image_data)\n\n# Plot an example of the output image\nexample_index = 56 # You can change this to plot different examples\nplt.imshow(output_ims_v[example_index], cmap='coolwarm')\nplt.title(f\"Example {example_index+1} of v Image\")\nplt.colorbar()\nplt.show()","metadata":{"execution":{"iopub.status.busy":"2024-02-26T23:42:23.229742Z","iopub.execute_input":"2024-02-26T23:42:23.230138Z","iopub.status.idle":"2024-02-26T23:42:23.598407Z","shell.execute_reply.started":"2024-02-26T23:42:23.230109Z","shell.execute_reply":"2024-02-26T23:42:23.596979Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# Import the w target\ncommon_shape = (20, 20)\n\nresized_image_data = []\nfor img in bone_data[4]:\n resized_img = zoom(img, (common_shape[0] / img.shape[0], common_shape[1] / img.shape[1]), mode='nearest', order=0)\n resized_image_data.append(resized_img)\n\n# Convert the list of resized images into a NumPy array\noutput_ims_w = np.array(resized_image_data)\n\n# Plot an example of the output image\nexample_index = 69 # You can change this to plot different examples\nplt.imshow(output_ims_w[example_index], cmap='coolwarm')\nplt.title(f\"Example {example_index+1} of w Image\")\nplt.colorbar()\nplt.show()","metadata":{"execution":{"iopub.status.busy":"2024-02-26T23:42:27.849713Z","iopub.execute_input":"2024-02-26T23:42:27.850326Z","iopub.status.idle":"2024-02-26T23:42:28.215713Z","shell.execute_reply.started":"2024-02-26T23:42:27.850267Z","shell.execute_reply":"2024-02-26T23:42:28.214403Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"

Split the Data

","metadata":{}},{"cell_type":"code","source":"# Random Seed\nRS = 3623\n\n# displacements\ntarget_NT_u, target_test_u = train_test_split(output_ims_u, test_size=0.1, random_state=RS, shuffle=True)\ntarget_train_u, target_val_u = train_test_split(target_NT_u, test_size=1/9, random_state=RS, shuffle=True)\ntarget_NT_v, target_test_v = train_test_split(output_ims_v, test_size=0.1, random_state=RS, shuffle=True)\ntarget_train_v, target_val_v = train_test_split(target_NT_v, test_size=1/9, random_state=RS, shuffle=True)\ntarget_NT_w, target_test_w = train_test_split(output_ims_w, test_size=0.1, random_state=RS, shuffle=True)\ntarget_train_w, target_val_w = train_test_split(target_NT_w, test_size=1/9, random_state=RS, shuffle=True)\n\n# scans\ninput_NT, input_test_1 = train_test_split(scan, test_size=0.1, random_state=RS, shuffle=True)\ninput_train_1, input_val_1 = train_test_split(input_NT, test_size=1/9, random_state=RS, shuffle=True)\n\n# Masks\ninput_NT, input_test_2 = train_test_split(mask, test_size=0.1, random_state=RS, shuffle=True)\ninput_train_2, input_val_2 = train_test_split(input_NT, test_size=1/9, random_state=RS, shuffle=True)","metadata":{"execution":{"iopub.status.busy":"2024-02-26T23:42:32.941061Z","iopub.execute_input":"2024-02-26T23:42:32.941603Z","iopub.status.idle":"2024-02-26T23:42:33.078954Z","shell.execute_reply.started":"2024-02-26T23:42:32.941555Z","shell.execute_reply":"2024-02-26T23:42:33.077788Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"

Create & Train CNN

","metadata":{}},{"cell_type":"code","source":"from tensorflow.keras.layers import Layer, Input, Conv2D, BatchNormalization, MaxPooling2D, Dropout, Flatten, Dense, Multiply\n\ndef create_cnn(input_shape1, input_shape2, output_shape, dropout_rate, l2_lambda):\n input_layer1 = Input(shape=input_shape1)\n input_layer2 = Input(shape=input_shape2)\n\n x = input_layer1\n # Convolutional layers for the first image\n x = BatchNormalization()(x)\n x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)\n x = BatchNormalization()(x)\n x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)\n x = MaxPooling2D((2, 2))(x)\n x = BatchNormalization()(x)\n x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)\n x = BatchNormalization()(x)\n x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)\n x = MaxPooling2D((2, 2))(x)\n x = BatchNormalization()(x)\n x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)\n x = BatchNormalization()(x)\n x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)\n x = MaxPooling2D((2, 2))(x)\n x = BatchNormalization()(x)\n x = Conv2D(256, (3, 3), activation='relu', padding='same')(x)\n x = BatchNormalization()(x)\n x = Conv2D(256, (3, 3), activation='relu', padding='same')(x)\n x = MaxPooling2D((2, 2))(x)\n x = Dropout(dropout_rate)(x) \n \n x = Flatten()(x)\n x = Dropout(dropout_rate)(x) \n x = Dense(512, activation='relu', kernel_regularizer=l2(l2_lambda))(x)\n x = Dropout(dropout_rate)(x) \n x = Dense(512, activation='relu', kernel_regularizer=l2(l2_lambda))(x)\n x = Dropout(dropout_rate)(x) \n \n x = BatchNormalization()(x)\n x_u = Dense(512, activation='relu', kernel_regularizer=l2(l2_lambda))(x)\n x_u = Dropout(dropout_rate)(x_u) \n \n x_v = Dense(512, activation='relu', kernel_regularizer=l2(l2_lambda))(x)\n x_v = Dropout(dropout_rate)(x_v) \n \n x_w = Dense(512, activation='relu', kernel_regularizer=l2(l2_lambda))(x)\n x_w = Dropout(dropout_rate)(x_w) \n\n x_u = Dense(output_shape[0] * output_shape[1], activation=None)(x_u)\n x_v = Dense(output_shape[0] * output_shape[1], activation=None)(x_v)\n x_w = Dense(output_shape[0] * output_shape[1], activation=None)(x_w)\n \n xm2 = Flatten()(input_layer2)\n output_layer_u = Multiply(name=\"out_u\")([x_u, xm2])\n output_layer_v = Multiply(name=\"out_v\")([x_v, xm2])\n output_layer_w = Multiply(name=\"out_w\")([x_w, xm2])\n \n model = Model(inputs=[input_layer1, input_layer2], outputs=[output_layer_u, output_layer_v, output_layer_w])\n return model\n\n\n\n\n# Define input shapes and output shape\ninput_shape1 = (input_train_1.shape[1], input_train_1.shape[2], 1)\ninput_shape2 = (input_train_2.shape[1], input_train_2.shape[2], 1)\noutput_shape = (target_train_u.shape[1], target_train_u.shape[2])\n\n# Define regularisation and create the model\ndropout_rate = 0.5\nl2_lambda = 0.001\nmodel = create_cnn(input_shape1, input_shape2, output_shape, dropout_rate, l2_lambda)\nmodel.summary()\n\n# Compile the model\nmodel.compile(optimizer='adam', loss={'out_u': 'mean_squared_error', 'out_v': 'mean_squared_error', 'out_w': 'mean_squared_error'})\n\n# Reshape input and target data\nnum_channels = 1\ninput_data1 = input_train_1.reshape(input_train_1.shape[0], input_train_1.shape[1], input_train_1.shape[2], num_channels)\ninput_data_val1 = input_val_1.reshape(input_val_1.shape[0], input_val_1.shape[1], input_val_1.shape[2], num_channels)\ninput_data2 = input_train_2.reshape(input_train_2.shape[0], input_train_2.shape[1], input_train_2.shape[2], num_channels)\ninput_data_val2 = input_val_2.reshape(input_val_2.shape[0], input_val_2.shape[1], input_val_2.shape[2], num_channels)\ntarget_data_u = target_train_u.reshape(target_train_u.shape[0], -1)\ntarget_data_val_u = target_val_u.reshape(target_val_u.shape[0], -1)\ntarget_data_v = target_train_v.reshape(target_train_v.shape[0], -1)\ntarget_data_val_v = target_val_v.reshape(target_val_v.shape[0], -1)\ntarget_data_w = target_train_w.reshape(target_train_w.shape[0], -1)\ntarget_data_val_w = target_val_w.reshape(target_val_w.shape[0], -1)\n\n# Create data generators\ndef data_generator(input_data1, input_data2, target_data_1, target_data_2, target_data_3, batch_size):\n num_samples = input_data1.shape[0]\n while True:\n for start_idx in range(0, num_samples, batch_size):\n end_idx = start_idx + batch_size\n yield ([input_data1[start_idx:end_idx], input_data2[start_idx:end_idx]], [target_data_1[start_idx:end_idx], target_data_2[start_idx:end_idx], target_data_3[start_idx:end_idx]])\n\n# Train the model using the data generator\nbatch_size = 50\ntrain_gen = data_generator(input_data1, input_data2, target_data_u, target_data_v, target_data_w, batch_size)\nval_data = ([input_data_val1, input_data_val2], [target_data_val_u, target_data_val_v, target_data_val_w])","metadata":{"execution":{"iopub.status.busy":"2024-02-26T23:42:40.998456Z","iopub.execute_input":"2024-02-26T23:42:40.998845Z","iopub.status.idle":"2024-02-26T23:42:42.077078Z","shell.execute_reply.started":"2024-02-26T23:42:40.998817Z","shell.execute_reply":"2024-02-26T23:42:42.075889Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# Create learning rate schedule and fit model\n\ndef lr_schedule(epoch, lr):\n if epoch == 0:\n lr=0.001\n if epoch == 300:\n lr=0.0001\n\n return lr\n\nlr_scheduler = LearningRateScheduler(lr_schedule)\n\nhistory = model.fit(\n train_gen,\n epochs=500,\n #steps_per_epoch=len(input_data1) // batch_size,\n steps_per_epoch=4,\n validation_data=val_data,\n callbacks=[lr_scheduler]\n)","metadata":{},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# Plot training and validation accuracy\nplt.plot(history.history['loss'][1:], label='Training Loss')\nplt.plot(history.history['val_loss'][1:], label='Validation Loss')\nplt.title('Training and Validation Loss')\nplt.xlabel('Epoch')\nplt.ylabel('Loss')\nplt.legend()\nplt.show()","metadata":{},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# save or load the model\n#model.save('...')\nmodel = tf.keras.models.load_model('/kaggle/input/d2im-prototype/tensorflow2/micro_only/1/D2IM_trained.h5')","metadata":{"execution":{"iopub.status.busy":"2024-02-26T23:43:54.806531Z","iopub.execute_input":"2024-02-26T23:43:54.806992Z","iopub.status.idle":"2024-02-26T23:44:01.930947Z","shell.execute_reply.started":"2024-02-26T23:43:54.806956Z","shell.execute_reply":"2024-02-26T23:44:01.929724Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"results = model.evaluate([input_data1, input_data2], [target_data_u, target_data_v, target_data_w])\nprint(\"train loss, train acc:\", results)","metadata":{"execution":{"iopub.status.busy":"2024-02-26T23:44:05.320687Z","iopub.execute_input":"2024-02-26T23:44:05.321406Z","iopub.status.idle":"2024-02-26T23:44:25.258643Z","shell.execute_reply.started":"2024-02-26T23:44:05.321358Z","shell.execute_reply":"2024-02-26T23:44:25.257417Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"results = model.evaluate([input_data_val1, input_data_val2], [target_data_val_u, target_data_val_v, target_data_val_w])\nprint(\"val loss, val acc:\", results)","metadata":{"execution":{"iopub.status.busy":"2024-02-26T23:44:28.725767Z","iopub.execute_input":"2024-02-26T23:44:28.726226Z","iopub.status.idle":"2024-02-26T23:44:31.444112Z","shell.execute_reply.started":"2024-02-26T23:44:28.726194Z","shell.execute_reply":"2024-02-26T23:44:31.443294Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"

View results

","metadata":{}},{"cell_type":"code","source":"# Get test data into correct shape\ninput_data_test1 = input_test_1.reshape(input_test_1.shape[0], input_test_1.shape[1], input_test_1.shape[2], num_channels)\ninput_data_test2 = input_test_2.reshape(input_test_2.shape[0], input_test_2.shape[1], input_test_2.shape[2], num_channels)\n\ntarget_data_test_u = target_test_u.reshape(target_test_u.shape[0], -1)\ntarget_data_test_v = target_test_v.reshape(target_test_v.shape[0], -1)\ntarget_data_test_w = target_test_w.reshape(target_test_w.shape[0], -1)\n\n# Evaluate on test data\nresults = model.evaluate([input_data_test1,input_data_test2], [target_data_test_u,target_data_test_v,target_data_test_w])\nprint(\"test loss, test acc:\", results)\n\n# Make predictions using test data\npredictions = model.predict([input_data_test1,input_data_test2])","metadata":{"execution":{"iopub.status.busy":"2024-02-26T23:44:35.760487Z","iopub.execute_input":"2024-02-26T23:44:35.760892Z","iopub.status.idle":"2024-02-26T23:44:43.873137Z","shell.execute_reply.started":"2024-02-26T23:44:35.760862Z","shell.execute_reply":"2024-02-26T23:44:43.872233Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# View u test displacements\nplot_num=target_test_u.shape[0]\n\nfor i in range(plot_num): # Loop through each sample\n print(\"plot \", i)\n plt.figure(figsize=(20, 5)) # Adjust the figure size to accommodate three plots\n \n predicted_image = np.flipud(predictions[0][i].reshape(output_shape))\n target_image = np.flipud(target_test_u[i])\n min_d=np.array([target_image.min(),predicted_image.min()]).min()\n max_d=np.array([target_image.max(),predicted_image.max()]).max()\n\n # Load or generate the mask\n msk = binary_erosion(input_data_test2[i].reshape(input_shape2[:2]), iterations = 2)\n\n # Create a grid of subplots\n gs = gridspec.GridSpec(1, 6, width_ratios=[1, 1, 1, 0.05, 1, 0.05])\n \n # Plot the input image\n ax3 = plt.subplot(gs[0,0]) # Fourth subplot\n input_image = np.flipud(input_data_test1[i].reshape(input_shape1[:2]))\n im3 = ax3.imshow(input_image, cmap='gray', vmin=0, vmax=1)\n ax3.set_title(\"Input Image\")\n ax3.axis('off')\n \n # Plot the ground truth target value\n ax1 = plt.subplot(gs[0,1]) # First subplot\n im1 = ax1.imshow(target_image, cmap='coolwarm', vmin=min_d, vmax=max_d)\n plt.colorbar(im1, cax=plt.subplot(gs[0,3])) # Add colorbar to the third subplot\n ax1.set_title(\"Ground Truth U (Voxels)\")\n ax1.axis('off')\n \n # Plot the predicted output\n ax2 = plt.subplot(gs[0,2], sharey=ax1) # Second subplot, sharing y-axis with the first subplot\n im2 = ax2.imshow(predicted_image, cmap='coolwarm', vmin=min_d, vmax=max_d)\n ax2.set_title(\"Predicted U (Voxels)\")\n ax2.axis('off')\n \n # Plot the displacement error\n ax1 = plt.subplot(gs[0,4]) # First subplot\n im1 = ax1.imshow(np.abs(target_image-predicted_image)*msk, cmap='jet', vmin=np.min(np.abs(target_image-predicted_image)*msk), vmax=np.max(np.abs(target_image-predicted_image)*msk))\n plt.colorbar(im1, cax=plt.subplot(gs[0,5])) # Add colorbar to the third subplot\n ax1.set_title(\"U Error: |W_0-W_p|\")\n ax1.axis('off')\n\n plt.tight_layout() # Ensure plots don't overlap\n plt.show()","metadata":{},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# view v test displacements\nplot_num=target_test_v.shape[0]\n\nfor i in range(plot_num): # Loop through each sample\n print(\"plot \", i)\n plt.figure(figsize=(20, 5)) # Adjust the figure size to accommodate three plots\n \n predicted_image = np.flipud(predictions[1][i].reshape(output_shape))\n target_image = np.flipud(target_test_v[i])\n min_d=np.array([target_image.min(),predicted_image.min()]).min()\n max_d=np.array([target_image.max(),predicted_image.max()]).max()\n\n # Load or generate the mask\n msk = binary_erosion(input_data_test2[i].reshape(input_shape2[:2]), iterations = 2)\n\n \n # Create a grid of subplots\n gs = gridspec.GridSpec(1, 6, width_ratios=[1, 1, 1, 0.05, 1, 0.05])\n \n # Plot the input image\n ax3 = plt.subplot(gs[0,0]) # Fourth subplot\n input_image = np.flipud(input_data_test1[i].reshape(input_shape1[:2]))\n im3 = ax3.imshow(input_image, cmap='gray', vmin=0, vmax=1)\n ax3.set_title(\"Input Image\")\n ax3.axis('off')\n \n # Plot the ground truth target value\n ax1 = plt.subplot(gs[0,1]) # First subplot\n im1 = ax1.imshow(target_image, cmap='coolwarm', vmin=min_d, vmax=max_d)\n plt.colorbar(im1, cax=plt.subplot(gs[0,3])) # Add colorbar to the third subplot\n ax1.set_title(\"Ground Truth V (Voxels)\")\n ax1.axis('off')\n \n # Plot the predicted output\n ax2 = plt.subplot(gs[0,2], sharey=ax1) # Second subplot, sharing y-axis with the first subplot\n im2 = ax2.imshow(predicted_image, cmap='coolwarm', vmin=min_d, vmax=max_d)\n ax2.set_title(\"Predicted V (Voxels)\")\n ax2.axis('off')\n \n # Plot the displacement error\n ax1 = plt.subplot(gs[0,4]) # First subplot\n im1 = ax1.imshow(np.abs(target_image-predicted_image)*msk, cmap='jet', vmin=np.min(np.abs(target_image-predicted_image)*msk), vmax=np.max(np.abs(target_image-predicted_image)*msk))\n plt.colorbar(im1, cax=plt.subplot(gs[0,5])) # Add colorbar to the third subplot\n ax1.set_title(\"W Error: |V_0-V_p|\")\n ax1.axis('off')\n \n plt.tight_layout() # Ensure plots don't overlap\n plt.show()","metadata":{},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# view w test displacements\nplot_num=target_test_w.shape[0]\n\nfor i in range(plot_num): # Loop through each sample\n print(\"plot \", i)\n plt.figure(figsize=(20, 5)) # Adjust the figure size to accommodate three plots\n \n predicted_image = np.flipud(predictions[2][i].reshape(output_shape))\n target_image = np.flipud(target_test_w[i])\n min_d=np.array([target_image.min(),predicted_image.min()]).min()\n max_d=np.array([target_image.max(),predicted_image.max()]).max()\n\n # Load or generate the mask\n msk = binary_erosion(input_data_test2[i].reshape(input_shape2[:2]), iterations = 2)\n\n \n # Create a grid of subplots\n gs = gridspec.GridSpec(1, 6, width_ratios=[1, 1, 1, 0.05, 1, 0.05])\n \n # Plot the input image\n ax3 = plt.subplot(gs[0,0]) # Fourth subplot\n input_image = np.flipud(input_data_test1[i].reshape(input_shape1[:2]))\n im3 = ax3.imshow(input_image, cmap='gray', vmin=0, vmax=1)\n ax3.set_title(\"Input Image\")\n ax3.axis('off')\n \n # Plot the ground truth target value\n ax1 = plt.subplot(gs[0,1]) # First subplot\n im1 = ax1.imshow(target_image, cmap='coolwarm', vmin=min_d, vmax=max_d)\n plt.colorbar(im1, cax=plt.subplot(gs[0,3])) # Add colorbar to the third subplot\n ax1.set_title(\"Ground Truth W (Voxels)\")\n ax1.axis('off')\n \n # Plot the predicted output\n ax2 = plt.subplot(gs[0,2], sharey=ax1) # Second subplot, sharing y-axis with the first subplot\n im2 = ax2.imshow(predicted_image, cmap='coolwarm', vmin=min_d, vmax=max_d)\n ax2.set_title(\"Predicted W (Voxels)\")\n ax2.axis('off')\n \n # Plot the displacement error\n ax1 = plt.subplot(gs[0,4]) # First subplot\n im1 = ax1.imshow(np.abs(target_image-predicted_image)*msk, cmap='jet', vmin=np.min(np.abs(target_image-predicted_image)*msk), vmax=np.max(np.abs(target_image-predicted_image)*msk))\n plt.colorbar(im1, cax=plt.subplot(gs[0,5])) # Add colorbar to the third subplot\n ax1.set_title(\"W Error: |W_0-W_p|\")\n ax1.axis('off')\n \n plt.tight_layout() # Ensure plots don't overlap\n plt.show()","metadata":{"execution":{"iopub.status.busy":"2024-02-26T23:45:31.555542Z","iopub.execute_input":"2024-02-26T23:45:31.555971Z","iopub.status.idle":"2024-02-26T23:45:53.397463Z","shell.execute_reply.started":"2024-02-26T23:45:31.555937Z","shell.execute_reply":"2024-02-26T23:45:53.396153Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"plot_num = [3, 4, 9, 24]\n\nvs = 39 #real voxel size um\nns = 50 # Node spacing\n\ndef calculate_strain(displacement_w, dx):\n dw_dz = np.gradient(displacement_w, dx, axis=(0))\n ezz = dw_dz*1e6\n return ezz\n\n\n# Ontain strain for samples. Includes masking and value capping for better visualisations\nfor i in plot_num: # Loop through each sample\n predicted_image = (np.flipud(predictions[2][i].reshape(output_shape)))*vs\n target_image = np.flipud(target_test_w[i])*vs\n msk = binary_erosion(input_data_test2[i].reshape(input_shape2[:2]), iterations = 2)\n ezz_t = np.flipud(calculate_strain(np.flipud(target_image), ns*vs))\n ezz_p = np.flipud(calculate_strain(np.flipud(predicted_image), ns*vs))\n\n ezz_t_i = np.where(msk, ezz_t.reshape(output_shape), 0.0)\n ezz_p_i = np.where(msk, ezz_p.reshape(output_shape), 0.0)\n if i == 25:\n min_s = -40000\n threshold = -40000\n stress_mask = ezz_t_i < threshold\n ezz_t_i[stress_mask] = 0\n ezz_p_i[stress_mask] = 0\n min_err_bar = 0\n max_err_bar = np.max(np.abs(target_image-predicted_image)*msk)\n min_strerr_bar = 0\n max_strerr_bar = np.max(np.abs(ezz_t_i-ezz_p_i))\n max_strerr_bar = 30000\n \n\nfor i in plot_num: # Loop through each sample\n print(\"plot \", i)\n plt.figure(figsize=(20, 10)) # Adjust the figure size to accommodate three plots\n \n predicted_image = (np.flipud(predictions[2][i].reshape(output_shape)))*vs\n target_image = np.flipud(target_test_w[i])*vs\n min_d=np.array([target_image.min(),predicted_image.min()]).min()\n max_d=np.array([target_image.max(),predicted_image.max()]).max()\n\n # Load or generate the mask\n msk = binary_erosion(input_data_test2[i].reshape(input_shape2[:2]), iterations = 2)\n\n ezz_t = np.flipud(calculate_strain(np.flipud(target_image), ns*vs))\n ezz_p = np.flipud(calculate_strain(np.flipud(predicted_image), ns*vs))\n # Apply the mask to the strain field\n ezz_t_i = np.where(msk, ezz_t.reshape(output_shape), 0.0)\n ezz_p_i = np.where(msk, ezz_p.reshape(output_shape), 0.0)\n\n min_s=np.array([ezz_t_i.min(),ezz_p_i.min()]).min()\n max_s=np.array([ezz_t_i.max(),ezz_p_i.max()]).max()\n\n # Masking and value capping for clearer visualisations\n if i == 4:\n threshold = -40000\n stress_mask = ezz_t_i < threshold\n ezz_t_i[stress_mask] = 0\n ezz_p_i[stress_mask] = 0\n min_s = -30000\n\n \n if i == 24:\n threshold = -60000\n stress_mask = ezz_t_i < threshold\n ezz_t_i[stress_mask] = 0\n ezz_p_i[stress_mask] = 0\n\n \n if i == 9:\n threshold1 = -129000\n threshold2 = -129500\n stress_mask = (ezz_t_i <= threshold1) & (ezz_t_i >= threshold2)\n ezz_t_i[stress_mask] = 0\n ezz_p_i[stress_mask] = 0\n\n min_s=np.array([ezz_t_i.min(),ezz_p_i.min()]).min()\n max_s=np.array([ezz_t_i.max(),ezz_p_i.max()]).max()\n \n # Create a grid of subplots\n gs = gridspec.GridSpec(2, 4, width_ratios=[1, 1, 1.065, 1.065])\n \n # Plot the input image\n ax3 = plt.subplot(gs[0,0]) # Fourth subplot\n input_image = np.flipud(input_data_test1[i].reshape(input_shape1[:2]))\n im3 = ax3.imshow(input_image, cmap='gray', vmin=0, vmax=1)\n ax3.set_title(\"Input Image\")\n ax3.axis('off')\n \n # Plot the ground truth target value\n ax1 = plt.subplot(gs[0, 1]) # First subplot\n im1 = ax1.imshow(target_image, cmap='coolwarm', vmin=min_d, vmax=max_d)\n ax1.set_title(\"Ground Truth w (μm)\")\n ax1.axis('off')\n\n # Plot the predicted output\n ax2 = plt.subplot(gs[0,2], sharey=ax1) # Second subplot, sharing y-axis with the first subplot\n im2 = ax2.imshow(predicted_image, cmap='coolwarm', vmin=min_d, vmax=max_d)\n ax2.set_title(\"Predicted $w$ (μm)\")\n\n #shared axes\n divider = make_axes_locatable(ax2)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05) # Adjust the size and padding\n plt.colorbar(im2, cax=cax) # Add colorbar to the right side\n cax.yaxis.set_ticks_position('right') # Move the colorbar ticks to the left side\n ax2.axis('off')\n \n error = np.abs(target_image-predicted_image)*msk\n \n ax3 = plt.subplot(gs[0,3]) # First subplot\n #im3 = ax3.imshow(error, cmap='jet')\n im3 = ax3.imshow(error, cmap='jet', vmin=min_err_bar, vmax=max_err_bar)\n ax3.set_title(\"w Error (μm): |w-$w$|\")\n ax3.axis('off')\n divider = make_axes_locatable(ax3)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05) # Adjust the size and padding\n plt.colorbar(im3, cax=cax) # Add colorbar to the right side\n cax.yaxis.set_ticks_position('right') # Move the colorbar ticks to the left side\n ax3.axis('off')\n\n # Plot the input image\n ax3 = plt.subplot(gs[1,0]) # Fourth subplot\n im3 = ax3.imshow(input_data_test2[i].reshape(input_shape2[:2]), cmap='gray', vmin=0, vmax=1)\n ax3.set_title(\"Input Mask\")\n ax3.axis('off')\n \n # Plot the ground truth target value\n ax1 = plt.subplot(gs[1,1]) # First subplot\n im1 = ax1.imshow(ezz_t_i, cmap='plasma_r', vmin=min_s, vmax=max_s)\n ax1.set_title(\"Measured Strain ε$_{zz}$ (με)\")\n ax1.axis('off')\n \n # Plot the predicted output\n ax2 = plt.subplot(gs[1,2], sharey=ax1) # Second subplot, sharing y-axis with the first subplot\n im2 = ax2.imshow(ezz_p_i, cmap='plasma_r', vmin=min_s, vmax=max_s)\n ax2.set_title(\"Predicted Strain $ε_{zz}$ (με)\")\n ax2.axis('off')\n\n #shared axes\n divider = make_axes_locatable(ax2)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05) # Adjust the size and padding\n plt.colorbar(im2, cax=cax) # Add colorbar to the right side\n cax.yaxis.set_ticks_position('right') # Move the colorbar ticks to the left side\n ax2.axis('off')\n\n # Plot the displacement error\n ax3 = plt.subplot(gs[1,3]) # First subplot\n im3 = ax3.imshow(np.abs(ezz_t_i-ezz_p_i), cmap='jet', vmin=0, vmax=max_strerr_bar)\n ax3.set_title(\"Strain Error (με): |ε$_{zz}-ε_{zz}$|\")\n \n divider = make_axes_locatable(ax3)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05) # Adjust the size and padding\n plt.colorbar(im3, cax=cax) # Add colorbar to the right side\n cax.yaxis.set_ticks_position('right') # Move the colorbar ticks to the left side\n ax3.axis('off')\n \n plt.tight_layout() # Ensure plots don't overlap\n # output_file = f\"Outputs/2024/Plot_output_{i}.jpg\" # Specify the output file name\n # plt.savefig(output_file, dpi=500) # dpi controls the resolution (dots per inch)\n plt.show()\n","metadata":{},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# plot the outputs highlighting where bone yielding is predicted but not seen in ground truth\nplt.figure(figsize=(20, 5)) # Adjust the figure size to accommodate three plots\n\ndef strainOutput(predictions, target_test_w, i):\n predicted_image = (np.flipud(predictions.reshape(output_shape)))*vs\n target_image = np.flipud(target_test_w)*vs\n min_d=np.array([target_image.min(),predicted_image.min()]).min()\n max_d=np.array([target_image.max(),predicted_image.max()]).max()\n \n # Load or generate the mask\n msk = binary_erosion(input_data_test2[i].reshape(input_shape2[:2]), iterations = 2)\n \n \n ezz_t = np.flipud(calculate_strain(np.flipud(target_image), ns*vs))\n ezz_p = np.flipud(calculate_strain(np.flipud(predicted_image), ns*vs))\n ezz_t_i = np.where(msk, ezz_t.reshape(output_shape), 0.0)\n ezz_p_i = np.where(msk, ezz_p.reshape(output_shape), 0.0)\n \n min_s=np.array([ezz_t_i.min(),ezz_p_i.min()]).min()\n max_s=np.array([ezz_t_i.max(),ezz_p_i.max()]).max()\n \n if i == 4:\n threshold = -40000\n stress_mask = ezz_t_i < threshold\n ezz_t_i[stress_mask] = 0\n ezz_p_i[stress_mask] = 0\n min_s = -30000\n\n \n if i == 24:\n threshold = -60000\n stress_mask = ezz_t_i < threshold\n ezz_t_i[stress_mask] = 0\n ezz_p_i[stress_mask] = 0\n\n \n if i == 9:\n threshold1 = -129000\n threshold2 = -129500\n stress_mask = (ezz_t_i <= threshold1) & (ezz_t_i >= threshold2)\n ezz_t_i[stress_mask] = 0\n ezz_p_i[stress_mask] = 0\n \n \n \n min_s=np.array([ezz_t_i.min(),ezz_p_i.min()]).min()\n max_s=np.array([ezz_t_i.max(),ezz_p_i.max()]).max()\n return ezz_t_i, ezz_p_i\n\n# Create a grid of subplots\ngs = gridspec.GridSpec(1, 4, width_ratios=[1, 1, 1, 1.061])\n\nplot = 24\nezz_t_i, ezz_p_i = strainOutput(predictions[2][plot], target_test_w[plot], plot)\n\nax3 = plt.subplot(gs[0,0]) # First subplot\nim3 = ax3.imshow(np.abs(ezz_t_i-ezz_p_i), cmap='jet', vmin=0, vmax=max_strerr_bar)\n\nhighlight_mask = (np.abs(ezz_p_i) > 10000) & (np.abs(ezz_t_i) < 10000) \nax3.contour(highlight_mask, levels=[0, 1], colors='white', linewidths=5)\ncax.yaxis.set_ticks_position('right') # Move the colorbar ticks to the left side\nax3.axis('off')\n\nplot = 4\nezz_t_i, ezz_p_i = strainOutput(predictions[2][plot], target_test_w[plot], plot)\n\nax3 = plt.subplot(gs[0,1]) # First subplot\nim3 = ax3.imshow(np.abs(ezz_t_i-ezz_p_i), cmap='jet', vmin=0, vmax=max_strerr_bar)\n\nhighlight_mask = (np.abs(ezz_p_i) > 10000) & (np.abs(ezz_t_i) < 10000) \nax3.contour(highlight_mask, levels=[0, 1], colors='white', linewidths=5)\ncax.yaxis.set_ticks_position('right') # Move the colorbar ticks to the left side\nax3.axis('off')\n\nplot = 3\nezz_t_i, ezz_p_i = strainOutput(predictions[2][plot], target_test_w[plot], plot)\n\nax3 = plt.subplot(gs[0,2]) # First subplot\nim3 = ax3.imshow(np.abs(ezz_t_i-ezz_p_i), cmap='jet', vmin=0, vmax=max_strerr_bar)\n\nhighlight_mask = (np.abs(ezz_p_i) > 10000) & (np.abs(ezz_t_i) < 10000) \nax3.contour(highlight_mask, levels=[0, 1], colors='white', linewidths=5)\ncax.yaxis.set_ticks_position('right') # Move the colorbar ticks to the left side\nax3.axis('off')\n\nplot = 9\nezz_t_i, ezz_p_i = strainOutput(predictions[2][plot], target_test_w[plot], plot)\n\nax3 = plt.subplot(gs[0,3]) # First subplot\nim3 = ax3.imshow(np.abs(ezz_t_i-ezz_p_i), cmap='jet', vmin=0, vmax=max_strerr_bar)\n\nhighlight_mask = (np.abs(ezz_p_i) > 10000) & (np.abs(ezz_t_i) < 10000) \nax3.contour(highlight_mask, levels=[0, 1], colors='white', linewidths=5)\ndivider = make_axes_locatable(ax3)\ncax = divider.append_axes(\"right\", size=\"5%\", pad=0.05) # Adjust the size and padding\nplt.colorbar(im3, cax=cax) # Add colorbar to the right side\ncax.yaxis.set_ticks_position('right') # Move the colorbar ticks to the left side\nax3.axis('off')\n\nplt.tight_layout() # Ensure plots don't overlap\n#output_file = f\"Outputs/2024/Plot_output_yieldHighlight.jpg\" # Specify the output file name\n#plt.savefig(output_file, dpi=500) # dpi controls the resolution (dots per inch)\nplt.show()\n","metadata":{},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"

Statistics

","metadata":{}},{"cell_type":"code","source":"# Distribution of u predictions\n\nmean = np.mean(target_test_u)*vs\nstd_dev = np.std(target_test_u)*vs\nmin_value = np.min(target_test_u)*vs\nmax_value = np.max(target_test_u)*vs\n\n# Print the summary statistics\nprint(\"Mean Measued U:\", mean)\nprint(\"Standard Deviation Measued U:\", std_dev)\nprint(\"Min Value Measued U:\", min_value)\nprint(\"Max Value Measued U:\", max_value)\n\nmean = np.mean(predictions[0])*vs\nstd_dev = np.std(predictions[0])*vs\nmin_value = np.min(predictions[0])*vs\nmax_value = np.max(predictions[0])*vs\n\n# Print the summary statistics\nprint(\"Mean Predicted U:\", mean)\nprint(\"Standard Deviation Predicted U:\", std_dev)\nprint(\"Min Value Predicted U:\", min_value)\nprint(\"Max Value Predicted U:\", max_value)","metadata":{},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# Distribution of v predictions\n\nmean = np.mean(target_test_v)*vs\nstd_dev = np.std(target_test_v)*vs\nmin_value = np.min(target_test_v)*vs\nmax_value = np.max(target_test_v)*vs\n\n# Print the summary statistics\nprint(\"Mean Measued V:\", mean)\nprint(\"Standard Deviation Measued V:\", std_dev)\nprint(\"Min Value Measued V:\", min_value)\nprint(\"Max Value Measued V:\", max_value)\n\nmean = np.mean(predictions[1])*vs\nstd_dev = np.std(predictions[1])*vs\nmin_value = np.min(predictions[1])*vs\nmax_value = np.max(predictions[1])*vs\n\n# Print the summary statistics\nprint(\"Mean Predicted V:\", mean)\nprint(\"Standard Deviation Predicted V:\", std_dev)\nprint(\"Min Value Predicted V:\", min_value)\nprint(\"Max Value Predicted V:\", max_value)","metadata":{},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# Distribution of w predictions\n\nmean = np.mean(target_test_w)*vs\nstd_dev = np.std(target_test_w)*vs\nmin_value = np.min(target_test_w)*vs\nmax_value = np.max(target_test_w)*vs\n\n# Print the summary statistics\nprint(\"Mean Measued W:\", mean)\nprint(\"Standard Deviation Measued W:\", std_dev)\nprint(\"Min Value Measued W:\", min_value)\nprint(\"Max Value Measued W:\", max_value)\n\nmean = np.mean(predictions[2])*vs\nstd_dev = np.std(predictions[2])*vs\nmin_value = np.min(predictions[2])*vs\nmax_value = np.max(predictions[2])*vs\n\n# Print the summary statistics\nprint(\"Mean Predicted W:\", mean)\nprint(\"Standard Deviation Predicted W:\", std_dev)\nprint(\"Min Value Predicted W:\", min_value)\nprint(\"Max Value Predicted W:\", max_value)","metadata":{},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# Correlation of predicted vs observed displacements\nfrom scipy.stats import pearsonr\n\nvs=39\n# Extract the data\npredicted_data_u = predictions[0]*vs\ntarget_data_u = target_test_u*vs\npredicted_data_v = predictions[1]*vs\ntarget_data_v = target_test_v*vs\npredicted_data_w = predictions[2]*vs\ntarget_data_w = target_test_w*vs\n\n# Create a function to calculate correlation and plot the data\ndef plot_correlation(predicted_data, target_data, title, title2):\n # Flatten the data if they are not already 1D arrays\n predicted_data = predicted_data.reshape(-1)\n target_data = target_data.reshape(-1)\n\n # Create a filter to exclude data points where either value is 0\n non_zero_filter = (predicted_data != 0) & (target_data != 0)\n\n # Apply the filter to both datasets\n predicted_data = predicted_data[non_zero_filter]\n target_data = target_data[non_zero_filter]\n\n # Calculate the correlation coefficient\n correlation_coefficient, _ = pearsonr(predicted_data, target_data)\n\n # Calculate the coefficients for the line of best fit (linear regression)\n coefficients = np.polyfit(predicted_data, target_data, 1)\n\n # Create the linear regression line using the coefficients\n line_of_best_fit = np.poly1d(coefficients)\n\n # Create a scatter plot\n plt.scatter(predicted_data, target_data, alpha=0.25, s=4)\n plt.plot(predicted_data, line_of_best_fit(predicted_data), color='black')\n plt.title(f'Correlation: $R^2 =$ {correlation_coefficient:.2f}')\n plt.xlabel(f'Predicted Displacement {title2} (μm)')\n plt.ylabel(f'Measured Displacement {title} (μm)')\n plt.grid(True)\n\n print(predicted_data.size)\n\n# Create a figure with three subplots\nfig, axs = plt.subplots(1, 3, figsize=(15, 5))\n\n# Plot correlations for U displacement\nplt.sca(axs[0])\nplot_correlation(predicted_data_u, target_data_u, '$\\it{u}$', '$\\hat{\\it{u}}$')\n\n# Plot correlations for V displacement\nplt.sca(axs[1])\nplot_correlation(predicted_data_v, target_data_v, '$\\it{v}$', '$\\hat{\\it{v}}$')\n\n# Plot correlations for W displacement\nplt.sca(axs[2])\nplot_correlation(predicted_data_w, target_data_w, '$\\it{w}$', '$\\hat{\\it{w}}$')\n\nplt.tight_layout()\n\n#output_file = f\"Outputs/2024/Correlations.jpg\" # Specify the output file name\n#plt.savefig(output_file, dpi=500) # dpi controls the resolution (dots per inch)\nplt.show()\n","metadata":{},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# Relative error of displacement predictions\n\n# Extract the data\npredicted_data_u = predictions[0]\npredicted_data_v = predictions[1]\npredicted_data_w = predictions[2]\n\ntarget_data_u = target_test_u\ntarget_data_v = target_test_v\ntarget_data_w = target_test_w\n\n# Ensure all arrays have the same shape\npredicted_data_u = predicted_data_u.reshape(-1)\npredicted_data_v = predicted_data_v.reshape(-1)\npredicted_data_w = predicted_data_w.reshape(-1)\n\ntarget_data_u = target_data_u.reshape(-1)\ntarget_data_v = target_data_v.reshape(-1)\ntarget_data_w = target_data_w.reshape(-1)\n\n# Create filters to exclude data points where either value is 0\nnon_zero_filter_u = (predicted_data_u != 0) & (target_data_u != 0)\nnon_zero_filter_v = (predicted_data_v != 0) & (target_data_v != 0)\nnon_zero_filter_w = (predicted_data_w != 0) & (target_data_w != 0)\n\n# Apply the filters to both datasets\npredicted_data_u = predicted_data_u[non_zero_filter_u]\npredicted_data_v = predicted_data_v[non_zero_filter_v]\npredicted_data_w = predicted_data_w[non_zero_filter_w]\n\ntarget_data_u = target_data_u[non_zero_filter_u]\ntarget_data_v = target_data_v[non_zero_filter_v]\ntarget_data_w = target_data_w[non_zero_filter_w]\n\n# Calculate relative errors\nrelative_errors_u = np.abs(predicted_data_u - target_data_u) / target_data_u\nrelative_errors_v = np.abs(predicted_data_v - target_data_v) / target_data_v\nrelative_errors_w = np.abs(predicted_data_w - target_data_w) / target_data_w\n\n# Create a box and whisker plot for all relative errors on the same plot\nfig, ax = plt.subplots(figsize=(8, 6))\n\nbp = ax.boxplot(\n [relative_errors_u*100,\n relative_errors_v*100, relative_errors_w*100],\n vert=True,\n showfliers=False,\n labels=['$\\hat{\\it{u}}$','$\\hat{\\it{v}}$','$\\hat{\\it{w}}$'],\n patch_artist=True, # Color code the boxes\n medianprops={'color': 'black'} # Set median line color to black\n)\n\n# Color code the boxes\ncolors = ['lightblue', 'lightgreen', 'lightcoral']\nfor patch, color in zip(bp['boxes'], colors):\n patch.set_facecolor(color)\n\nax.set_ylabel('Displacement Error (%)')\nax.set_title('Relative Error of Displacement Predictions')\n\nplt.tight_layout()\n#output_file = f\"Outputs/2024/RelativeErrorDisp.jpg\" # Specify the output file name\n#plt.savefig(output_file, dpi=500) # dpi controls the resolution (dots per inch)\nplt.show()\n","metadata":{},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# Distributions of the relative errors\n\nmean = np.mean(relative_errors_u)\nstd_dev = np.std(relative_errors_u)\nmin_value = np.min(relative_errors_u)\nmax_value = np.max(relative_errors_u)\nq1 = np.percentile(relative_errors_u, 25)\nq3 = np.percentile(relative_errors_u, 75)\n# Print the summary statistics\nprint(\"Mean Predicted U:\", mean)\nprint(\"Standard Deviation Predicted U:\", std_dev)\nprint(\"Min Value Predicted U:\", min_value)\nprint(\"Max Value Predicted U:\", max_value)\nprint(\"Q1:\", q1)\nprint(\"Q3:\", q3)\nprint(\"IQR:\", q3-q1)\n\nmean = np.mean(relative_errors_v)\nstd_dev = np.std(relative_errors_v)\nmin_value = np.min(relative_errors_v)\nmax_value = np.max(relative_errors_v)\nq1 = np.percentile(relative_errors_v, 25)\nq3 = np.percentile(relative_errors_v, 75)\n# Print the summary statistics\nprint(\"Mean Predicted V:\", mean)\nprint(\"Standard Deviation Predicted V:\", std_dev)\nprint(\"Min Value Predicted V:\", min_value)\nprint(\"Max Value Predicted V:\", max_value)\nprint(\"Q1:\", q1)\nprint(\"Q3:\", q3)\nprint(\"IQR:\", q3-q1)\n\nmean = np.mean(relative_errors_w)\nstd_dev = np.std(relative_errors_w)\nmin_value = np.min(relative_errors_w)\nmax_value = np.max(relative_errors_w)\nq1 = np.percentile(relative_errors_w, 25)\nq3 = np.percentile(relative_errors_w, 75)\n# Print the summary statistics\nprint(\"Mean Predicted W:\", mean)\nprint(\"Standard Deviation Predicted W:\", std_dev)\nprint(\"Min Value Predicted W:\", min_value)\nprint(\"Max Value Predicted W:\", max_value)\nprint(\"Q1:\", q1)\nprint(\"Q3:\", q3)\nprint(\"IQR:\", q3-q1)","metadata":{},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"import scipy.stats as stats\n\n# Perform Mann-Whitney U test\nstatistic, p_value = stats.mannwhitneyu(relative_errors_w, relative_errors_v, alternative='less')\n\n# Set your significance level (alpha)\nalpha = 0.05\n\n# Compare the p-value to alpha to determine statistical significance\nif p_value < alpha:\n print(\"Sample 1 has a smaller range than Sample 2.\")\nelse:\n print(\"There is no evidence to suggest that Sample 1 has a smaller range than Sample 2.\")\n\nprint(f\"Mann-Whitney U statistic: {statistic}\")\nprint(f\"P-value: {p_value}\")","metadata":{},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"import scipy.stats as stats\n\n# Perform Mann-Whitney U test\nstatistic, p_value = stats.mannwhitneyu(relative_errors_w, relative_errors_u, alternative='less')\n\n# Set your significance level (alpha)\nalpha = 0.05\n\n# Compare the p-value to alpha to determine statistical significance\nif p_value < alpha:\n print(\"Sample 1 has a smaller range than Sample 2.\")\nelse:\n print(\"There is no evidence to suggest that Sample 1 has a smaller range than Sample 2.\")\n\nprint(f\"Mann-Whitney U statistic: {statistic}\")\nprint(f\"P-value: {p_value}\")","metadata":{},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"

Clinical Images

","metadata":{}},{"cell_type":"code","source":"from tensorflow.keras.layers import Layer, Input, Conv2D, BatchNormalization, MaxPooling2D, Dropout, Flatten, Dense, Multiply\nfrom tensorflow.keras.layers.experimental import preprocessing\nfrom tensorflow import keras\n\nclass BrightnessContrastBlurAugmentation(Layer):\n def __init__(self, brightness_range=(-0.1, 0.1), contrast_range=(0.95, 1.05), blur_range=(1, 12), **kwargs):\n super(BrightnessContrastBlurAugmentation, self).__init__(**kwargs)\n self.brightness_range = brightness_range\n self.contrast_range = contrast_range\n self.blur_range = blur_range\n\n def call(self, inputs, training=None):\n if training:\n # Randomly sample brightness, contrast, and blur values from the specified ranges\n brightness_factor = np.random.uniform(self.brightness_range[0], self.brightness_range[1])\n contrast_factor = np.random.uniform(self.contrast_range[0], self.contrast_range[1])\n blur_factor = np.random.randint(self.blur_range[0], self.blur_range[1] + 1)\n\n # Apply brightness and contrast adjustments\n augmented_images = tf.image.adjust_brightness(inputs, delta=brightness_factor)\n augmented_images = tf.image.adjust_contrast(augmented_images, contrast_factor)\n\n # Apply random blur\n augmented_images = tf.image.random_blur(augmented_images, (blur_factor, blur_factor))\n\n return augmented_images\n else:\n return inputs\n\n\n\ndef create_cnn(input_shape1, input_shape2, output_shape, dropout_rate, l2_lambda):\n input_layer1 = Input(shape=input_shape1)\n input_layer2 = Input(shape=input_shape2)\n\n x = input_layer1\n # Convolutional layers for the first image\n x = BatchNormalization()(x)\n x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)\n x = BatchNormalization()(x)\n x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)\n x = MaxPooling2D((2, 2))(x)\n x = BatchNormalization()(x)\n x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)\n x = BatchNormalization()(x)\n x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)\n x = MaxPooling2D((2, 2))(x)\n x = BatchNormalization()(x)\n x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)\n x = BatchNormalization()(x)\n x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)\n x = MaxPooling2D((2, 2))(x)\n x = BatchNormalization()(x)\n x = Conv2D(256, (3, 3), activation='relu', padding='same')(x)\n x = BatchNormalization()(x)\n x = Conv2D(256, (3, 3), activation='relu', padding='same')(x)\n x = MaxPooling2D((2, 2))(x)\n x = Dropout(dropout_rate)(x) \n \n x = Flatten()(x)\n x = Dropout(dropout_rate)(x) \n x = Dense(512, activation='relu', kernel_regularizer=l2(l2_lambda))(x)\n x = Dropout(dropout_rate)(x) \n x = Dense(512, activation='relu', kernel_regularizer=l2(l2_lambda))(x)\n x = Dropout(dropout_rate)(x) \n \n x = BatchNormalization()(x)\n x_u = Dense(512, activation='relu', kernel_regularizer=l2(l2_lambda))(x)\n x_u = Dropout(dropout_rate)(x_u) \n \n x_v = Dense(512, activation='relu', kernel_regularizer=l2(l2_lambda))(x)\n x_v = Dropout(dropout_rate)(x_v) \n \n x_w = Dense(512, activation='relu', kernel_regularizer=l2(l2_lambda))(x)\n x_w = Dropout(dropout_rate)(x_w) \n\n x_u = Dense(output_shape[0] * output_shape[1], activation=None)(x_u)\n x_v = Dense(output_shape[0] * output_shape[1], activation=None)(x_v)\n x_w = Dense(output_shape[0] * output_shape[1], activation=None)(x_w)\n \n xm2 = Flatten()(input_layer2)\n output_layer_u = Multiply(name=\"out_u\")([x_u, xm2])\n output_layer_v = Multiply(name=\"out_v\")([x_v, xm2])\n output_layer_w = Multiply(name=\"out_w\")([x_w, xm2])\n \n model = Model(inputs=[input_layer1, input_layer2], outputs=[output_layer_u, output_layer_v, output_layer_w])\n return model\n\n\n\n# Define input shapes and output shape\ninput_shape1 = (input_train_1.shape[1], input_train_1.shape[2], 1)\ninput_shape2 = (input_train_2.shape[1], input_train_2.shape[2], 1)\noutput_shape = (target_train_u.shape[1], target_train_u.shape[2])\n\n# Define regularisation and create the model\ndropout_rate = 0.5\nl2_lambda = 0.001\nmodel = create_cnn(input_shape1, input_shape2, output_shape, dropout_rate, l2_lambda)\nmodel.summary()\n\n# Compile the model\nmodel.compile(optimizer='adam', loss={'out_u': 'mean_squared_error', 'out_v': 'mean_squared_error', 'out_w': 'mean_squared_error'})\n\n# Reshape input and target data\nnum_channels = 1\ninput_data1 = input_train_1.reshape(input_train_1.shape[0], input_train_1.shape[1], input_train_1.shape[2], num_channels)\ninput_data_val1 = input_val_1.reshape(input_val_1.shape[0], input_val_1.shape[1], input_val_1.shape[2], num_channels)\ninput_data2 = input_train_2.reshape(input_train_2.shape[0], input_train_2.shape[1], input_train_2.shape[2], num_channels)\ninput_data_val2 = input_val_2.reshape(input_val_2.shape[0], input_val_2.shape[1], input_val_2.shape[2], num_channels)\ntarget_data_u = target_train_u.reshape(target_train_u.shape[0], -1)\ntarget_data_val_u = target_val_u.reshape(target_val_u.shape[0], -1)\ntarget_data_v = target_train_v.reshape(target_train_v.shape[0], -1)\ntarget_data_val_v = target_val_v.reshape(target_val_v.shape[0], -1)\ntarget_data_w = target_train_w.reshape(target_train_w.shape[0], -1)\ntarget_data_val_w = target_val_w.reshape(target_val_w.shape[0], -1)\n\n# Create data generators\ndef data_generator(input_data1, input_data2, target_data_1, target_data_2, target_data_3, batch_size):\n num_samples = input_data1.shape[0]\n while True:\n indices = np.random.permutation(num_samples)\n for start_idx in range(0, num_samples, batch_size):\n end_idx = min(start_idx + batch_size, num_samples)\n batch_indices = indices[start_idx:end_idx]\n batch_input1 = input_data1[batch_indices]\n batch_input2 = input_data2[batch_indices]\n batch_target1 = target_data_1[batch_indices]\n batch_target2 = target_data_2[batch_indices]\n batch_target3 = target_data_3[batch_indices]\n \n # Data augmentation\n augmented_batch_input1 = []\n for img in batch_input1:\n augmented_img = BrightnessContrastAugmentation()(img, training=True)\n augmented_batch_input1.append(augmented_img)\n augmented_batch_input1 = np.array(augmented_batch_input1)\n\n yield ([augmented_batch_input1, batch_input2], [batch_target1, batch_target2, batch_target3])\n \n # If the batch size exceeds the number of samples, resample the dataset\n if batch_size > num_samples:\n indices = np.random.permutation(num_samples)\n\n\n# Train the model using the data generator\nbatch_size = 200\ntrain_gen = data_generator(input_data1, input_data2, target_data_u, target_data_v, target_data_w, batch_size)\nval_data = ([input_data_val1, input_data_val2], [target_data_val_u, target_data_val_v, target_data_val_w])","metadata":{"execution":{"iopub.status.busy":"2024-02-26T23:46:12.208710Z","iopub.execute_input":"2024-02-26T23:46:12.209153Z","iopub.status.idle":"2024-02-26T23:46:13.676024Z","shell.execute_reply.started":"2024-02-26T23:46:12.209119Z","shell.execute_reply":"2024-02-26T23:46:13.674623Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# Create learning rate schedule and fit model\n\ndef lr_schedule(epoch, lr):\n if epoch == 0:\n lr=0.001\n if epoch == 300:\n lr=0.0001\n\n return lr\n\nlr_scheduler = LearningRateScheduler(lr_schedule)\n\nhistory = model.fit(\n train_gen,\n epochs=500,\n #steps_per_epoch=len(input_data1) // batch_size,\n steps_per_epoch=3,\n validation_data=val_data,\n callbacks=[lr_scheduler]\n)","metadata":{},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"model = tf.keras.models.load_model('/kaggle/input/d2im-prototype/tensorflow2/data_augmentation_for_clinical/1/D2IM_trained_data_augmentation.h5')","metadata":{"execution":{"iopub.status.busy":"2024-02-26T23:46:31.124192Z","iopub.execute_input":"2024-02-26T23:46:31.124612Z","iopub.status.idle":"2024-02-26T23:46:37.751163Z","shell.execute_reply.started":"2024-02-26T23:46:31.124583Z","shell.execute_reply":"2024-02-26T23:46:37.749815Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"Anterior-Posterior Slice","metadata":{}},{"cell_type":"code","source":"folder_path = '/kaggle/input/d2im-prototype-dataset-porcine-vertebra-slices/Final/Clinical/AP/'\n\n# Import Clinical Scan\nscan = tiff.imread(os.path.join(folder_path, '551_t12t14_inputslice.tif'))\ncommon_shape = (256, 256)\nresized_img = zoom(scan, (common_shape[0] / scan.shape[0], common_shape[1] / scan.shape[1]), mode='nearest', order=0)\nscan = np.array(resized_img)/255\nscan = np.flipud(scan)\ninputscan = scan.reshape(1, scan.shape[0], scan.shape[1], 1)\n\n# Import clinical mask\nmask = tiff.imread(os.path.join(folder_path, '551_t12t14_inputslice_mask.tif'))\ncommon_shape = (20, 20)\nresized_img = zoom(mask, (common_shape[0] / mask.shape[0], common_shape[1] / mask.shape[1]), mode='nearest', order=0)\nmask = binary_dilation(np.array(resized_img))\ninputmask = mask.reshape(1, mask.shape[0], mask.shape[1], 1)\n\n# Import Rescaled High Res Scan\nscan_HR_RE = tiff.imread(os.path.join(folder_path, 'S7_INT_UL_AP_50_0007_downscaled.tif'))\ncommon_shape = (256, 256)\nresized_img = zoom(scan_HR_RE, (common_shape[0] / scan_HR_RE.shape[0], common_shape[1] / scan_HR_RE.shape[1]), mode='nearest', order=0)\nscan_HR_RE = np.array(resized_img)/255\ninputscan_HR_RE = scan_HR_RE.reshape(1, scan_HR_RE.shape[0], scan_HR_RE.shape[1], 1)\n\n# Import Hi res mask\nmask_HR = tiff.imread(os.path.join(folder_path, 'S7_INT_UL_AP_50_0007_mask.tif'))\ncommon_shape = (20, 20)\nresized_img = zoom(mask_HR, (common_shape[0] / mask_HR.shape[0], common_shape[1] / mask_HR.shape[1]), mode='nearest', order=0)\nmask_HR = binary_dilation(np.array(resized_img))\ninputmask_HR = mask_HR.reshape(1, mask_HR.shape[0], mask_HR.shape[1], 1)\n\n# Import Ground Truth\ntarget = tiff.imread(os.path.join(folder_path, 'S7_INT_UL_AP_50_0007_GT.tif'))\ncommon_shape = (20, 20)\nresized_img = zoom(target, (common_shape[0] / target.shape[0], common_shape[1] / target.shape[1]), mode='nearest', order=0)\ntarget = np.array(resized_img)\ntarget[np.isnan(target)] = 0\ntarget_GT = target.reshape(1, target.shape[0], target.shape[1], 1)","metadata":{"execution":{"iopub.status.busy":"2024-02-26T23:54:44.212451Z","iopub.execute_input":"2024-02-26T23:54:44.212842Z","iopub.status.idle":"2024-02-26T23:54:44.243062Z","shell.execute_reply.started":"2024-02-26T23:54:44.212812Z","shell.execute_reply":"2024-02-26T23:54:44.241643Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# Make predictions using model\npredictions = model.predict([inputscan, inputmask])\npredictions_HR_RE = model.predict([inputscan_HR_RE, inputmask_HR])","metadata":{"execution":{"iopub.status.busy":"2024-02-26T23:54:52.438392Z","iopub.execute_input":"2024-02-26T23:54:52.438830Z","iopub.status.idle":"2024-02-26T23:54:53.116835Z","shell.execute_reply.started":"2024-02-26T23:54:52.438794Z","shell.execute_reply":"2024-02-26T23:54:53.115484Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"vs = 39 # real voxel size um\nns = 50 # Node spacing\n\nplt.figure(figsize=(20, 10)) # Adjust the figure size to accommodate three plots\n\npredicted_image = (np.flipud(predictions[2][0].reshape(output_shape)))*vs\npredicted_image_HR_RE = (np.flipud(predictions_HR_RE[2][0].reshape(output_shape)))*vs\ntarget_image = np.flipud(target)*vs\nmin_d=np.array([predicted_image.min(),predicted_image_HR_RE.min()]).min()\nmax_d=np.array([predicted_image.max(),predicted_image_HR_RE.max()]).max()\n\nmsk = np.flipud(binary_erosion(inputmask.reshape(input_shape2[:2]), iterations = 2))\n\nprediction_error = np.abs(target_image-predicted_image)*msk\nprediction_error_HR_RE = np.abs(target_image-predicted_image_HR_RE)*msk\nmin_e=np.array([prediction_error.min(),prediction_error_HR_RE.min()]).min()\nmax_e=np.array([prediction_error.max(),prediction_error_HR_RE.max()]).max()\n\n# Create a grid of subplots\ngs = gridspec.GridSpec(2, 3, width_ratios=[1, 1.065, 1.065])\n\n# Plot the input image\nax1 = plt.subplot(gs[0,0]) # Fourth subplot\nim1= ax1.imshow(scan, cmap='gray', vmin=0, vmax=1)\nax1.set_title(\"Input Image\")\nax1.axis('off')\n\n# Plot the predicted output\nax2 = plt.subplot(gs[0,1], sharey=ax3) # Second subplot, sharing y-axis with the first subplot\nim2 = ax2.imshow(predicted_image, cmap='coolwarm', vmin=min_d, vmax=max_d)\nax2.set_title(\"Predicted $\\hat{W}$ (μm)\")\n\n#shared axes\ndivider = make_axes_locatable(ax2)\ncax = divider.append_axes(\"right\", size=\"5%\", pad=0.05) # Adjust the size and padding\nplt.colorbar(im2, cax=cax) # Add colorbar to the right side\ncax.yaxis.set_ticks_position('right') # Move the colorbar ticks to the left side\nax2.axis('off')\n\nax3 = plt.subplot(gs[0,2]) # First subplot\nim3 = ax3.imshow(prediction_error, cmap='jet', vmin=min_e, vmax=max_e)\nax3.set_title(\"W Error (μm): |$W-\\hat{W}$|\")\nax3.axis('off')\ndivider = make_axes_locatable(ax3)\ncax = divider.append_axes(\"right\", size=\"5%\", pad=0.05) # Adjust the size and padding\nplt.colorbar(im3, cax=cax) # Add colorbar to the right side\ncax.yaxis.set_ticks_position('right') # Move the colorbar ticks to the left side\nax3.axis('off')\n\n###### Row 2\n\n# Plot the input image\nax2 = plt.subplot(gs[1,0]) # Fourth subplot\ninput_image = np.flipud(inputscan_HR_RE[0])\nim2 = ax2.imshow(input_image, cmap='gray', vmin=0, vmax=1)\nax2.set_title(\"Input Image\")\nax2.axis('off')\n\n# Plot the predicted output\nax2 = plt.subplot(gs[1,1], sharey=ax3) # Second subplot, sharing y-axis with the first subplot\nim2 = ax2.imshow(predicted_image_HR_RE, cmap='coolwarm', vmin=min_d, vmax=max_d)\nax2.set_title(\"Predicted $\\hat{W}$ (μm)\")\n\n#shared axes\ndivider = make_axes_locatable(ax2)\ncax = divider.append_axes(\"right\", size=\"5%\", pad=0.05) # Adjust the size and padding\nplt.colorbar(im2, cax=cax) # Add colorbar to the right side\ncax.yaxis.set_ticks_position('right') # Move the colorbar ticks to the left side\nax2.axis('off')\n\nax3 = plt.subplot(gs[1,2]) # First subplot\nim3 = ax3.imshow(prediction_error_HR_RE, cmap='jet', vmin=min_e, vmax=max_e)\nax3.set_title(\"W Error (μm): |$W-\\hat{W}$|\")\nax3.axis('off')\ndivider = make_axes_locatable(ax3)\ncax = divider.append_axes(\"right\", size=\"5%\", pad=0.05) # Adjust the size and padding\nplt.colorbar(im3, cax=cax) # Add colorbar to the right side\ncax.yaxis.set_ticks_position('right') # Move the colorbar ticks to the left side\nax3.axis('off')\n\nplt.tight_layout() # Ensure plots don't overlap\nplt.show()","metadata":{"execution":{"iopub.status.busy":"2024-02-26T23:54:56.422406Z","iopub.execute_input":"2024-02-26T23:54:56.422827Z","iopub.status.idle":"2024-02-26T23:54:58.019694Z","shell.execute_reply.started":"2024-02-26T23:54:56.422794Z","shell.execute_reply":"2024-02-26T23:54:58.018527Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":"Medial-Lateral Slice","metadata":{}},{"cell_type":"code","source":"folder_path = '/kaggle/input/d2im-prototype-dataset-porcine-vertebra-slices/Final/Clinical/ML/'\n\n# Import Clinical Scan\nscan = tiff.imread(os.path.join(folder_path, '544_t8t10_inputslice.tif'))\ncommon_shape = (256, 256)\nresized_img = zoom(scan, (common_shape[0] / scan.shape[0], common_shape[1] / scan.shape[1]), mode='nearest', order=0)\nscan = np.array(resized_img)/255\nscan = np.flipud(scan)\ninputscan = scan.reshape(1, scan.shape[0], scan.shape[1], 1)\n\n# Import clinical mask\nmask = tiff.imread(os.path.join(folder_path, '544_t8t10_inputslice_mask.tif'))\ncommon_shape = (20, 20)\nresized_img = zoom(mask, (common_shape[0] / mask.shape[0], common_shape[1] / mask.shape[1]), mode='nearest', order=0)\nmask = binary_dilation(np.array(resized_img))\ninputmask = mask.reshape(1, mask.shape[0], mask.shape[1], 1)\n\n# Import Rescaled High Res Scan\nscan_HR_RE = tiff.imread(os.path.join(folder_path, 'S1_INT_UL_ML_50_0015_downscaled.tif'))\ncommon_shape = (256, 256)\nresized_img = zoom(scan_HR_RE, (common_shape[0] / scan_HR_RE.shape[0], common_shape[1] / scan_HR_RE.shape[1]), mode='nearest', order=0)\nscan_HR_RE = np.array(resized_img)/255\ninputscan_HR_RE = scan_HR_RE.reshape(1, scan_HR_RE.shape[0], scan_HR_RE.shape[1], 1)\n\n# Import Hi res mask\nmask_HR = tiff.imread(os.path.join(folder_path, 'S1_INT_UL_ML_50_0015_mask.tif'))\ncommon_shape = (20, 20)\nresized_img = zoom(mask_HR, (common_shape[0] / mask_HR.shape[0], common_shape[1] / mask_HR.shape[1]), mode='nearest', order=0)\nmask_HR = binary_dilation(np.array(resized_img))\ninputmask_HR = mask_HR.reshape(1, mask_HR.shape[0], mask_HR.shape[1], 1)\n\n# Import Ground Truth\ntarget = tiff.imread(os.path.join(folder_path, 'S1_INT_UL_ML_50_0015_GT.tif'))\ncommon_shape = (20, 20)\nresized_img = zoom(target, (common_shape[0] / target.shape[0], common_shape[1] / target.shape[1]), mode='nearest', order=0)\ntarget = np.array(resized_img)\ntarget[np.isnan(target)] = 0\ntarget_GT = target.reshape(1, target.shape[0], target.shape[1], 1)","metadata":{"execution":{"iopub.status.busy":"2024-02-26T23:55:25.371474Z","iopub.execute_input":"2024-02-26T23:55:25.371877Z","iopub.status.idle":"2024-02-26T23:55:25.427940Z","shell.execute_reply.started":"2024-02-26T23:55:25.371848Z","shell.execute_reply":"2024-02-26T23:55:25.426495Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"# Make predictions using model\npredictions = model.predict([inputscan, inputmask])\npredictions_HR_RE = model.predict([inputscan_HR_RE, inputmask_HR])","metadata":{"execution":{"iopub.status.busy":"2024-02-26T23:55:29.408550Z","iopub.execute_input":"2024-02-26T23:55:29.409037Z","iopub.status.idle":"2024-02-26T23:55:29.781492Z","shell.execute_reply.started":"2024-02-26T23:55:29.409002Z","shell.execute_reply":"2024-02-26T23:55:29.780247Z"},"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"vs = 39 # real voxel size um\nns = 50 # Node spacing\n\nplt.figure(figsize=(20, 10)) # Adjust the figure size to accommodate three plots\n\npredicted_image = (np.flipud(predictions[2][0].reshape(output_shape)))*vs\npredicted_image_HR_RE = (np.flipud(predictions_HR_RE[2][0].reshape(output_shape)))*vs\ntarget_image = np.flipud(target)*vs\nmin_d=np.array([predicted_image.min(),predicted_image_HR_RE.min()]).min()\nmax_d=np.array([predicted_image.max(),predicted_image_HR_RE.max()]).max()\n\nmsk = np.flipud(binary_erosion(inputmask.reshape(input_shape2[:2]), iterations = 2))\n\nprediction_error = np.abs(target_image-predicted_image)*msk\nprediction_error_HR_RE = np.abs(target_image-predicted_image_HR_RE)*msk\nmin_e=np.array([prediction_error.min(),prediction_error_HR_RE.min()]).min()\nmax_e=np.array([prediction_error.max(),prediction_error_HR_RE.max()]).max()\n\n# Create a grid of subplots\ngs = gridspec.GridSpec(2, 3, width_ratios=[1, 1.065, 1.065])\n\n# Plot the input image\nax1 = plt.subplot(gs[0,0]) # Fourth subplot\nim1 = ax1.imshow(scan, cmap='gray', vmin=0, vmax=1)\nax1.set_title(\"Input Image\")\nax1.axis('off')\n\n# Plot the predicted output\nax2 = plt.subplot(gs[0,1], sharey=ax3) # Second subplot, sharing y-axis with the first subplot\nim2 = ax2.imshow(predicted_image, cmap='coolwarm', vmin=min_d, vmax=max_d)\nax2.set_title(\"Predicted $\\hat{W}$ (μm)\")\n\n#shared axes\ndivider = make_axes_locatable(ax2)\ncax = divider.append_axes(\"right\", size=\"5%\", pad=0.05) # Adjust the size and padding\nplt.colorbar(im2, cax=cax) # Add colorbar to the right side\ncax.yaxis.set_ticks_position('right') # Move the colorbar ticks to the left side\nax2.axis('off')\n\nax3 = plt.subplot(gs[0,2]) # First subplot\nim3 = ax3.imshow(prediction_error, cmap='jet', vmin=min_e, vmax=max_e)\nax3.set_title(\"W Error (μm): |$W-\\hat{W}$|\")\nax3.axis('off')\ndivider = make_axes_locatable(ax3)\ncax = divider.append_axes(\"right\", size=\"5%\", pad=0.05) # Adjust the size and padding\nplt.colorbar(im3, cax=cax) # Add colorbar to the right side\ncax.yaxis.set_ticks_position('right') # Move the colorbar ticks to the left side\nax3.axis('off')\n\n###### Row 2\n\n# Plot the input image\nax1 = plt.subplot(gs[1,0]) # Fourth subplot\ninput_image = np.flipud(inputscan_HR_RE[0])\nim1 = ax1.imshow(input_image, cmap='gray', vmin=0, vmax=1)\nax1.set_title(\"Input Image\")\nax1.axis('off')\n\n# Plot the predicted output\nax2 = plt.subplot(gs[1,1], sharey=ax3) # Second subplot, sharing y-axis with the first subplot\nim2 = ax2.imshow(predicted_image_HR_RE, cmap='coolwarm', vmin=min_d, vmax=max_d)\nax2.set_title(\"Predicted $\\hat{W}$ (μm)\")\n\n#shared axes\ndivider = make_axes_locatable(ax2)\ncax = divider.append_axes(\"right\", size=\"5%\", pad=0.05) # Adjust the size and padding\nplt.colorbar(im2, cax=cax) # Add colorbar to the right side\ncax.yaxis.set_ticks_position('right') # Move the colorbar ticks to the left side\nax2.axis('off')\n\nax3 = plt.subplot(gs[1,2]) # First subplot\nim3 = ax3.imshow(prediction_error_HR_RE, cmap='jet', vmin=min_e, vmax=max_e)\nax3.set_title(\"W Error (μm): |$W-\\hat{W}$|\")\nax3.axis('off')\ndivider = make_axes_locatable(ax3)\ncax = divider.append_axes(\"right\", size=\"5%\", pad=0.05) # Adjust the size and padding\nplt.colorbar(im3, cax=cax) # Add colorbar to the right side\ncax.yaxis.set_ticks_position('right') # Move the colorbar ticks to the left side\nax3.axis('off')\n\nplt.tight_layout() # Ensure plots don't overlap\n#output_file = f\"Outputs/2024/Plot_output_Clinical_ML.jpg\" # Specify the output file name\n#plt.savefig(output_file, dpi=500) # dpi controls the resolution (dots per inch)\nplt.show()","metadata":{"execution":{"iopub.status.busy":"2024-02-26T23:55:39.679127Z","iopub.execute_input":"2024-02-26T23:55:39.679648Z","iopub.status.idle":"2024-02-26T23:55:41.355589Z","shell.execute_reply.started":"2024-02-26T23:55:39.679609Z","shell.execute_reply":"2024-02-26T23:55:41.354032Z"},"trusted":true},"execution_count":null,"outputs":[]}]}