import glob, os, time
import numpy as np
import pandas as pd
import geopandas as gpd
import rasterio
import rasterio.plot
from rasterio import features
import sklearn
from sklearn.ensemble import RandomForestClassifier
from pathlib import Path
from IPython.display import display
print('All libraries successfully imported!')
print(f'Scikit-learn : {sklearn.__version__}')
All libraries successfully imported! Scikit-learn : 0.24.2
computer_path = '/export/miro/ndeffense/LBRAT2104/'
grp_letter = 'X'
data_path = f'{computer_path}data/' # Directory with data shared by the assistant
work_path = f'{computer_path}GROUP_{grp_letter}/WORK/' # Directory for all work files
# Input directories
in_situ_path = f'{work_path}IN_SITU/'
s2_path = f'{work_path}3_L2A_MASKED/'
ndvi_path = f'{work_path}NDVI/'
s1_path = f'{data_path}S1_GRD/'
lut_path = f'{data_path}LUT/'
# Output directory
classif_path = f'{work_path}CLASSIF/'
Path(classif_path).mkdir(parents=True, exist_ok=True)
print(f'Classification path is set to : {classif_path}')
Classification path is set to : /export/miro/ndeffense/LBRAT2104/GROUP_X/WORK/CLASSIF/
site = 'NAMUR'
year = '2020'
no_data = -999
ws = 3 # Window size (filtering post classification)
# Field used for classification
field_classif_code = 'grp_1_nb'
field_classif_name = 'grp_1'
# Field used for reclassification
field_reclassif_code = 'grp_A_nb'
field_reclassif_name = 'grp_A'
# Group of features used in classification
feat_nb = 2
if feat_nb == 1:
feat_name = ['NDVI']
elif feat_nb == 2:
feat_name = ['NDVI','S1_monthly_mean']
in_situ_cal_shp = f'{in_situ_path}{site}_{year}_IN_SITU_ROI_CAL.shp'
s4s_lut_csv = f'{lut_path}crop_dictionary_new.csv'
in_situ_cal_tif = f'{in_situ_path}{site}_{year}_IN_SITU_ROI_CAL.tif'
classif_tif = f'{classif_path}{site}_{year}_classif_RF_feat_{feat_nb}_{field_classif_name}.tif'
reclassif_tif = f'{classif_path}{site}_{year}_classif_RF_feat_{feat_nb}_{field_classif_name}_reclassify_{field_reclassif_name}.tif'
reclassif_filter_tif = f'{classif_path}{site}_{year}_classif_RF_feat_{feat_nb}_{field_classif_name}_reclassify_{field_reclassif_name}_filter_ws_{ws}.tif'
# Open the calibration polygons with GeoPandas
in_situ_gdf = gpd.read_file(in_situ_cal_shp)
# Open the raster file you want to use as a template for rasterize
img_temp_tif = glob.glob(f'{s2_path}*.tif')[0]
print(f'Raster template file : {img_temp_tif}')
src = rasterio.open(img_temp_tif, "r")
# Update metadata
out_meta = src.meta
out_meta.update(nodata=no_data)
crs_shp = str(in_situ_gdf.crs).split(":",1)[1]
crs_tif = str(src.crs).split(":",1)[1]
print(f'The CRS of in situ data is : {crs_shp}')
print(f'The CRS of raster template is : {crs_tif}')
if crs_shp == crs_tif:
print("CRS are the same")
print(f'Rasterize starts : {in_situ_cal_shp}')
# Burn the features into the raster and write it out
dst = rasterio.open(in_situ_cal_tif, 'w+', **out_meta)
dst_arr = dst.read(1)
# This is where we create a generator of geom, value pairs to use in rasterizing
geom_col = in_situ_gdf.geometry
code_col = in_situ_gdf[field_classif_code].astype(int)
shapes = ((geom,value) for geom, value in zip(geom_col, code_col))
in_situ_arr = features.rasterize(shapes=shapes,
fill=no_data,
out=dst_arr,
transform=dst.transform)
dst.write_band(1, in_situ_arr)
print(f'Rasterize is done : {in_situ_cal_tif}')
# Close rasterio objects
src.close()
dst.close()
else:
print('CRS are different --> repoject in-situ data shapefile with "to_crs"')
Raster template file : /export/miro/ndeffense/LBRAT2104/GROUP_X/WORK/3_L2A_MASKED/T31UFS_20201118T104329_B04_10m_ROI_SCL.tif The CRS of in situ data is : 32631 The CRS of raster template is : 32631 CRS are the same Rasterize starts : /export/miro/ndeffense/LBRAT2104/GROUP_X/WORK/IN_SITU/NAMUR_2020_IN_SITU_ROI_CAL.shp Rasterize is done : /export/miro/ndeffense/LBRAT2104/GROUP_X/WORK/IN_SITU/NAMUR_2020_IN_SITU_ROI_CAL.tif
Create an empty list to append all feature rasters one by one
list_src_arr = []
1 NDVI image per month
if 'NDVI' in feat_name:
list_im = sorted(glob.glob(f'{ndvi_path}*.tif'))
for im_file in list_im:
src = rasterio.open(im_file, "r")
im = src.read(1)
list_src_arr.append(im)
src.close()
print(f'Shape of features : {im.shape}')
print(f'Number of features : {len(list_src_arr)}')
else:
print("No NDVI in the set of features")
Shape of features : (570, 986) Number of features : 12
S1 monthly mean composite (obtained with Google Earth Engine)
if 'S1_monthly_mean' in feat_name:
s1_montlhy_mean_tif = f'{s1_path}monthly_mean_{site}_{year}.tif'
src = rasterio.open(s1_montlhy_mean_tif, "r")
im = src.read()
src.close()
for i in range(len(im)):
band = im[i]
list_src_arr.append(band)
print(f'Shape of features : {band.shape}')
print(f'Number of features : {len(list_src_arr)}')
else:
print("No S1 monthly mean in the set of features")
Shape of features : (570, 986) Number of features : 24
Merge all the 2D matrices from the list into one 3D matrix
feat_arr = np.dstack(list_src_arr).astype(np.float32)
print(feat_arr.shape)
print(f'There are {feat_arr.shape[2]} features')
print(f'The features type is : {feat_arr.dtype}')
#feat_arr_1 = np.stack(list_src_arr, axis=0)
#print(feat_arr_1.shape)
(570, 986, 24) There are 24 features The features type is : float32
Now that we have the image we want to classify (our X feature inputs), and the ROI with the land cover labels (our Y labeled data), we need to pair them up in NumPy arrays so we may feed them to Random Forest.
# Open in-situ used for calibration
src = rasterio.open(in_situ_cal_tif, "r")
cal_arr = src.read(1)
src.close()
# Find how many labeled entries we have -- i.e. how many training data samples?
n_samples = (cal_arr != no_data).sum()
print(f'We have {n_samples} samples (= calibration pixels)')
We have 34909 samples (= calibration pixels)
What are our classification labels?
labels = np.unique(cal_arr[cal_arr != no_data])
print(f'The training data include {labels.size} classes: {labels}')
The training data include 19 classes: [ 3 21 22 69 81 84 121 1111 1121 1152 1171 1192 1435 1511 1771 1811 1911 1923 9212]
We need :
These will have n_samples
rows.
X = feat_arr[cal_arr != no_data, :]
y = cal_arr[cal_arr != no_data]
# Replace NaN in classification features by the no_data value
X = np.nan_to_num(X, nan=no_data)
print(f'Our X matrix is sized: {X.shape}')
print(f'Our y array is sized: {y.shape}')
Our X matrix is sized: (34909, 24) Our y array is sized: (34909,)
Now that we have our X 2D-matrix of feature inputs and our y 1D-matrix containing the labels, we can train our model.
Visit this web page to find the usage of RandomForestClassifier from scikit-learn.
start_training = time.time()
# Initialize our model
rf = RandomForestClassifier(n_estimators=100, # The number of trees in the forest.
bootstrap=True, # Whether bootstrap samples are used when building trees. If False, the whole dataset is used to build each tree.
oob_score=True) # Whether to use out-of-bag samples to estimate the generalization score. Only available if bootstrap=True.
# Fit our model to training data
rf = rf.fit(X, y)
end_training = time.time()
# Get time elapsed during the Random Forest training
hours, rem = divmod(end_training-start_training, 3600)
minutes, seconds = divmod(rem, 60)
print("Random Forest training : {:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
Random Forest training : 00:00:30.04
With our Random Forest model fit, we can check out the "Out-of-Bag" (OOB) prediction score.
Score of the training dataset obtained using an out-of-bag estimate. This attribute exists only when oob_score is True.
print(f'Our OOB prediction of accuracy is: {round(rf.oob_score_ * 100,2)}%')
Our OOB prediction of accuracy is: 99.46%
To help us get an idea of which features bands were important, we can look at the feature importance scores.
The impurity-based feature importances. The higher, the more important the feature. The importance of a feature is computed as the (normalized) total reduction of the criterion brought by that feature. It is also known as the Gini importance.
feat_band_list = []
gini_list = []
for band_nb, imp in enumerate(rf.feature_importances_, start=1):
feat_band_list.append(band_nb)
gini_list.append(imp)
gini_dict = {'feat_band':feat_band_list,'gini':gini_list}
gini_df = pd.DataFrame(gini_dict).sort_values(by='gini', ascending=False)
gini_df
feat_band | gini | |
---|---|---|
6 | 7 | 0.098519 |
7 | 8 | 0.096310 |
4 | 5 | 0.080497 |
2 | 3 | 0.070317 |
8 | 9 | 0.069351 |
3 | 4 | 0.066073 |
0 | 1 | 0.055116 |
17 | 18 | 0.053906 |
9 | 10 | 0.050950 |
10 | 11 | 0.043805 |
18 | 19 | 0.041479 |
11 | 12 | 0.037573 |
19 | 20 | 0.035523 |
5 | 6 | 0.029468 |
16 | 17 | 0.028663 |
15 | 16 | 0.025300 |
20 | 21 | 0.022809 |
12 | 13 | 0.022165 |
1 | 2 | 0.021219 |
14 | 15 | 0.011463 |
23 | 24 | 0.011102 |
13 | 14 | 0.010998 |
22 | 23 | 0.009333 |
21 | 22 | 0.008059 |
Let's look at a crosstabulation to see the class confusion
# Setup a dataframe
df = pd.DataFrame()
df['truth'] = y
df['predict'] = rf.predict(X)
# Cross-tabulate predictions
cross_tab = pd.crosstab(df['truth'], df['predict'], margins=True)
display(cross_tab)
predict | 3 | 21 | 22 | 69 | 81 | 84 | 121 | 1111 | 1121 | 1152 | 1171 | 1192 | 1435 | 1511 | 1771 | 1811 | 1911 | 1923 | 9212 | All |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
truth | ||||||||||||||||||||
3 | 5971 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5971 |
21 | 0 | 1551 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1551 |
22 | 0 | 0 | 124 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 124 |
69 | 0 | 0 | 0 | 2296 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2296 |
81 | 0 | 0 | 0 | 0 | 1338 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1338 |
84 | 0 | 0 | 0 | 0 | 0 | 183 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 183 |
121 | 0 | 0 | 0 | 0 | 0 | 0 | 566 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 566 |
1111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7403 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7403 |
1121 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2132 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2132 |
1152 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1663 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1663 |
1171 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 534 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 534 |
1192 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1477 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1477 |
1435 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1176 | 0 | 0 | 0 | 0 | 0 | 0 | 1176 |
1511 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1577 | 0 | 0 | 0 | 0 | 0 | 1577 |
1771 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2454 | 0 | 0 | 0 | 0 | 2454 |
1811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1850 | 0 | 0 | 0 | 1850 |
1911 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 403 | 0 | 0 | 403 |
1923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2059 | 0 | 2059 |
9212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 152 |
All | 5971 | 1551 | 124 | 2296 | 1338 | 183 | 566 | 7403 | 2132 | 1663 | 534 | 1477 | 1176 | 1577 | 2454 | 1850 | 403 | 2059 | 152 | 34909 |
Unbelievable? I highly doubt the real confusion matrix will be 100% accuracy. What is likely going on is that we used a large number of trees within a machine learning algorithm to best figure out the pattern in our training data. Given enough information and effort, this algorithm precisely learned what we gave it. Asking to validate a machine learning algorithm on the training data is a useless exercise that will overinflate the accuracy.
Instead, we could have done a crossvalidation approach where we train on a subset the dataset, and then predict and assess the accuracy using the sections we didn't train it on.
With our Random Forest classifier fit, we can now proceed by trying to classify the entire image.
# Take our full image and reshape into long 2d array (nrow * ncol, nband) for classification
img = feat_arr
img = np.nan_to_num(img, nan=no_data)
new_shape = (img.shape[0] * img.shape[1], img.shape[2])
img_as_array = img[:, :, :].reshape(new_shape)
print(f'Reshaped from {img.shape} to {img_as_array.shape}')
start_classification = time.time()
# Now predict for each pixel
class_prediction = rf.predict(img_as_array)
# Reshape our classification map
class_prediction = class_prediction.reshape(img[:, :, 0].shape)
end_classification = time.time()
hours, rem = divmod(end_classification-start_classification, 3600)
minutes, seconds = divmod(rem, 60)
print("Random Forest training : {:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
print(class_prediction)
Reshaped from (570, 986, 24) to (562020, 24) Random Forest training : 00:00:24.75 [[ 3 3 81 ... 3 3 3] [ 3 3 3 ... 3 3 3] [ 3 3 21 ... 3 3 1121] ... [ 69 69 69 ... 1111 1111 1111] [ 69 69 69 ... 1111 1111 1111] [ 69 69 69 ... 1111 1111 1111]]
lut_df = pd.read_csv(s4s_lut_csv, sep=';')
lut_df = lut_df.sort_values(by=field_classif_code, ascending=True)
display(lut_df[[field_classif_code, field_classif_name, field_reclassif_code, field_reclassif_name]].head())
grp_1_nb | grp_1 | grp_A_nb | grp_A | |
---|---|---|---|---|
0 | 0 | Remove | 0 | Remove |
71 | 0 | Remove | 0 | Remove |
75 | 0 | Remove | 0 | Remove |
82 | 0 | Remove | 0 | Remove |
86 | 0 | Remove | 0 | Remove |
reclass_prediction = np.copy(class_prediction)
for i, row in lut_df.iterrows():
old_class = row[field_classif_code]
new_class = row[field_reclassif_code]
#print(f'{old_class} --> {new_class}')
#array[np.where(array == old_class)] = new_class
reclass_prediction[reclass_prediction == old_class] = new_class
print(f'Classification : \n {class_prediction}')
print(f'Re-classification : \n {reclass_prediction}')
Classification : [[ 3 3 81 ... 3 3 3] [ 3 3 3 ... 3 3 3] [ 3 3 21 ... 3 3 1121] ... [ 69 69 69 ... 1111 1111 1111] [ 69 69 69 ... 1111 1111 1111] [ 69 69 69 ... 1111 1111 1111]] Re-classification : [[ 3 3 8 ... 3 3 3] [ 3 3 3 ... 3 3 3] [ 3 3 21 ... 3 3 112] ... [ 6 6 6 ... 111 111 111] [ 6 6 6 ... 111 111 111] [ 6 6 6 ... 111 111 111]]
sizey = reclass_prediction.shape[0]
sizex = reclass_prediction.shape[1]
X = np.pad(reclass_prediction, ((1,1),(1,1)), 'edge')
majority = np.empty((sizey,sizex), dtype='int16')
for i in range(sizey):
for j in range(sizex):
window = X[i:i+ws,j:j+ws]
window = window.flatten()
counts = np.bincount(window)
maj = np.argmax(counts)
majority[i,j]= maj
majority = majority.reshape((1,sizey,sizex))
print(f'Re-classification : \n {reclass_prediction}')
print(f'Re-classification with filter : \n {majority}')
Re-classification : [[ 3 3 8 ... 3 3 3] [ 3 3 3 ... 3 3 3] [ 3 3 21 ... 3 3 112] ... [ 6 6 6 ... 111 111 111] [ 6 6 6 ... 111 111 111] [ 6 6 6 ... 111 111 111]] Re-classification with filter : [[[ 3 3 8 ... 3 3 3] [ 3 3 3 ... 3 3 3] [ 3 3 3 ... 3 3 112] ... [ 6 6 6 ... 111 111 111] [ 6 6 6 ... 111 111 111] [ 6 6 6 ... 111 111 111]]]
Open template image to get metadata
with rasterio.open(img_temp_tif) as src:
profile = src.profile
profile
{'driver': 'GTiff', 'dtype': 'int16', 'nodata': -10000.0, 'width': 986, 'height': 570, 'count': 1, 'crs': CRS({'init': 'epsg:32631'}), 'transform': Affine(10.0, 0.0, 627260.0, 0.0, -10.0, 5596180.0), 'tiled': False, 'compress': 'lzw', 'interleave': 'band'}
with rasterio.open(classif_tif, "w", **profile) as dst:
dst.write(class_prediction, 1)
with rasterio.open(reclassif_tif, "w", **profile) as dst:
dst.write(reclass_prediction, 1)
with rasterio.open(reclassif_filter_tif, "w", **profile) as dst:
dst.write(majority)