Geographic Information Systems Asked by Ayda Aktas on December 5, 2020
I’m trying to calculate the vegetation fraction for land surface temperature calculations of a single Landsat 8 image. I need to find the min and max NDVI of the scene.
I’ve masked the image and calculated NDVI with two different methods:
var ndvi1 = image.normalizedDifference(['B5','B4']);
var ndvi2 = nir.subtract(red).divide(nir.add(red));
But when I calculate the min and max NDVI values, the results are different for these two methods.
ndvi1 min: -0.33634447455585664
ndvi1 max: 1ndvi2 min: -94.81818181818181
ndvi2 max: 63.333333333333336
What is the source of this difference? How should I revise my code for the accurate calculation of min and max NDVI values?
Here is my code:
var raw2 = ee.Image('LANDSAT/LC08/C01/T1_SR/LC08_174034_20161117');
var vizParams1 = {
bands: ['B4', 'B3', 'B2'],
min: 0,
max: 3000,
gamma: 1.4,
};
Map.addLayer(raw2, vizParams1, 'raw2');
// Cloud mask of a single Landsat 8 image.
{
// Bits 3 and 5 are cloud shadow and cloud, respectively.
var cloudShadowBitMask = ee.Number(2).pow(3).int();
var cloudsBitMask = ee.Number(2).pow(5).int();
var snowBitMask = ee.Number(2).pow(4).int();
var waterBitMask = ee.Number(2).pow(2).int();
// Get the QA band.
var qa = raw2.select('pixel_qa');
// Both flags should be set to zero, indicating clear conditions.
var mask = qa.bitwiseAnd(cloudShadowBitMask).eq(0).and(
qa.bitwiseAnd(cloudsBitMask).eq(0)).and(
qa.bitwiseAnd(snowBitMask).eq(0)).and(
qa.bitwiseAnd(waterBitMask).eq(0));
var image =
// Scale the data to reflectance and temperature.
//.select(['B5', 'B7'], ['NIR', 'SWIR']).multiply(0.0001)
raw2.updateMask(mask);
}
var vizParams2 = {
bands: ['B4', 'B3', 'B2'],
min: 0,
max: 3000,
gamma: 1.4,
};
Map.addLayer(image, vizParams2, 'Landsat');
print(image,'Landsat');
var nir = image.select(['B5']);
var red = image.select(['B4']);
var ndvi = nir.subtract(red).divide(nir.add(red));
//.rename('NDVI');
var NDVI = image.normalizedDifference(['B5','B4']);
var ndviParams = {min: -1, max: 1, palette: ['blue', 'white', 'green']};
Map.addLayer(NDVI, ndviParams, 'NDVI image');
Map.addLayer(ndvi, ndviParams, 'ndvi image');
//Calculating min and max values:
var min = ee.Number(NDVI.reduceRegion({
reducer: ee.Reducer.min(),
scale: 30,
maxPixels: 1e9
}).values().get(0));
print(min, 'NDVImin');
var max = ee.Number(NDVI.reduceRegion({
reducer: ee.Reducer.max(),
scale: 30,
maxPixels: 1e9
}).values().get(0));
print(max, 'NDVImax');
var min = ee.Number(ndvi.reduceRegion({
reducer: ee.Reducer.min(),
scale: 30,
maxPixels: 1e9
}).values().get(0));
print(min, 'ndvi-min');
var max = ee.Number(ndvi.reduceRegion({
reducer: ee.Reducer.max(),
scale: 30,
maxPixels: 1e9
}).values().get(0));
print(max, 'ndvi-max');
The normalizedDifference()
function clamps inputs to be non-negative, i.e., negative values are first set to 0 before evaluating. For most applications using normalizedDifference()
is the best practice. The same behavior can be achieved with your band math method by adding .max(0)
to the lines that define the red
and nir
variables.
The fact that there are differences in the result of min and max calculation indicate that the Landsat RED and NIR bands have some negative values (however, reflectance should not be negative, so it may be some artifact of USGS processing).
I was curious to know how different the results of various methods were, so I did an analysis where NDVI is calculated three different ways and then subtracted one from the other and counted the number of pixels that had unequal NDVI values. It turns out that the two "manual" expression methods have 947 pixels that are different (0.002749 percent of valid image pixels) - a tiny fraction.
Here is the analysis script for posterity:
var image = ee.Image('LANDSAT/LC08/C01/T1_SR/LC08_174034_20161117');
var cloudShadowBitMask = ee.Number(2).pow(3).int();
var cloudsBitMask = ee.Number(2).pow(5).int();
var snowBitMask = ee.Number(2).pow(4).int();
var waterBitMask = ee.Number(2).pow(2).int();
var qa = image.select('pixel_qa');
var mask = qa.bitwiseAnd(cloudShadowBitMask).eq(0).and(
qa.bitwiseAnd(cloudsBitMask).eq(0)).and(
qa.bitwiseAnd(snowBitMask).eq(0)).and(
qa.bitwiseAnd(waterBitMask).eq(0));
image = image.updateMask(mask);
var nir = image.select(['B5']);
var red = image.select(['B4']);
var ndvi1 = nir.subtract(red).divide(nir.add(red)).rename('NDVI');
var ndvi2 = image.normalizedDifference(['B5','B4']).rename('NDVI');
var ndvi3 = image.expression(
'((NIR - RED) / (NIR + RED))',
{
'NIR': image.select('B5'),
'RED': image.select('B4')
})
.rename('NDVI');
function calcMinMax(img, title) {
var stats = img.reduceRegion({
reducer: ee.Reducer.minMax(),
scale: 30,
maxPixels: 1e13});
print(title+':',
ee.String('Min: ').cat(ee.Algorithms.String(stats.get('NDVI_min'))),
ee.String('Max: ').cat(ee.Algorithms.String(stats.get('NDVI_max'))));
}
calcMinMax(ndvi1, 'chained expression');
calcMinMax(ndvi2, '.normalizedDifference()');
calcMinMax(ndvi3, '.expression()');
var nPixels = ndvi1.select(0)//.mask()
.reduceRegion({
reducer: ee.Reducer.count(),
scale: 30,
maxPixels: 1e13});
var nDifPixels = ndvi1.subtract(ndvi2)
.neq(0)
.selfMask()
.reduceRegion({
reducer: ee.Reducer.count(),
scale: 30,
maxPixels: 1e13});
print('n valid image pixels:', nPixels.get('NDVI'));
print('n valid pixels neq 0 (dif):', nDifPixels.get('NDVI'));
print('percent valid pixels neq 0 (dif)',
ee.Number(nDifPixels.get('NDVI'))
.divide(ee.Number(nPixels.get('NDVI')))
.multiply(100));
Answered by Justin Braaten on December 5, 2020
Get help from others!
Recent Questions
Recent Answers
© 2024 TransWikia.com. All rights reserved. Sites we Love: PCI Database, UKBizDB, Menu Kuliner, Sharing RPP