@@ -1,5 +1,6 @@ |
import os, datetime, netCDF4, pandas, csv, bz2, sys, json, bisect, psycopg2, math |
import psycopg2.extras |
+from pymongo import MongoClient |
|
from collections import namedtuple |
import numpy as np |
@@ -246,7 +247,7 @@ |
['nwcsc_historical_{model}.nc', datetime.date(1950, 1, 1), datetime.date(2005, 12, 31)], |
] |
|
- if forecast_option == 'rcp45': |
+ if not forecast_option or forecast_option == 'rcp45': |
ncfiles.append(['nwcsc_rcp45_{model}.nc', datetime.date(2006, 1, 1), datetime.date(2099, 12, 31)]) |
elif forecast_option == 'rcp85': |
ncfiles.append(['nwcsc_rcp85_{model}.nc', datetime.date(2006, 1, 1), datetime.date(2099, 12, 31)]) |
@@ -257,7 +258,7 @@ |
['nwcsc_historical.nc', datetime.date(1950, 1, 1), datetime.date(2005, 12, 31)], |
] |
|
- if forecast_option == 'rcp45': |
+ if not forecast_option or forecast_option == 'rcp45': |
ncfiles.append(['nwcsc_rcp45.nc', datetime.date(2006, 1, 1), datetime.date(2099, 12, 31)]) |
elif forecast_option == 'rcp85': |
ncfiles.append(['nwcsc_rcp85.nc', datetime.date(2006, 1, 1), datetime.date(2099, 12, 31)]) |
@@ -351,7 +352,8 @@ |
data_dict[cell_name][var] = [] |
data = f.variables[var] |
if testNew: |
- darray = np.ascontiguousarray(data[jday_start:jday_end, gridx, gridy]) |
+ # not sure why the coordinate order is flipped -- it seems like it should be gridx, gridy |
+ darray = np.ascontiguousarray(data[jday_start:jday_end, gridy, gridx]) |
else: |
darray = np.ascontiguousarray(data[model, jday_start:jday_end, gridx, gridy]) |
if hasattr(data, 'scale_factor'): |
@@ -375,7 +377,7 @@ |
]) |
return { 'output': climate_data, 'model': models[model], } |
|
- def WEgetDataPRISM(self, input_zone_file, units, start_date, end_date): |
+ def WEgetDataPRISM_CSIP(self, input_zone_file, units, start_date, end_date): |
tiles, names, cell_extents, cell_defs = self.getInputZoneInfo(input_zone_file, 'prism') |
|
ident = lambda x: x |
@@ -412,6 +414,96 @@ |
climate_data += cell_data |
return climate_data, cell_defs, cell_extents |
|
+ def WEgetDataPRISM(self, input_zone_file, units, start_date, end_date): |
+ prec = 2 |
+ ident = lambda x: round(x, prec) if x != None else None |
+ to_metric_fn = { |
+ 'tmp': ident, |
+ 'apcp': lambda x: round(x, prec)/10 if x != None else None, # mm -> cm |
+ } |
+ to_english_fn = { |
+ 'tmp': lambda x: round(x * 1.8 + 32, prec) if x != None else None, # C -> F |
+ 'apcp': lambda x: round(x / 25.4, prec) if x != None else None, # mm -> in |
+ } |
+ if units == "metric": |
+ header = ["cell", "date", "tmpmin (C)", "tmpmax (C)", "apcp (cm)"] |
+ conv_dict = to_metric_fn |
+ else: |
+ header = ["cell", "date", "tmpmin (F)", "tmpmax (F)", "apcp (in)"] |
+ conv_dict = to_english_fn |
+ |
+ mc = MongoClient('eds0.engr.colostate.edu') |
+ db = mc.climate |
+ |
+ ret = [header,] |
+ |
+ # generate tile info |
+ tiles = [] |
+ if input_zone_file: |
+ geojson_obj = json.loads(input_zone_file) |
+ for feat in geojson_obj['features']: |
+ pt = feat['geometry']['coordinates'] |
+ tiles.append(pt) |
+ |
+ for tile in tiles: |
+ x, y = tile |
+ |
+ # query the locations for x, y data |
+ query = [ |
+ { |
+ '$match': { |
+ 'boundary': { |
+ '$geoIntersects': { |
+ '$geometry': { |
+ 'type': "Point", |
+ 'coordinates': [x, y] |
+ } |
+ } |
+ } |
+ } |
+ }, |
+ { |
+ '$project': {'x': 1, 'y': 1} |
+ }, |
+ { |
+ '$limit': 10 |
+ } |
+ ] |
+ |
+ results = db.prism_locs.aggregate(query) |
+ |
+ cells = [] |
+ for x, y in [(d['x'], d['y']) for d in results]: |
+ cells.append([x, y]) |
+ assert len(cells) == 1 |
+ |
+ # get data for multiple cells and multiple years |
+ for cell in cells: |
+ data_results = db.prism_data.find({'_id': {'$in': ['{0}_{1}_{2}'.format(cell[0], cell[1], y) for y in range(start_date.year, end_date.year+1)]}}) |
+ |
+ # sort data |
+ data_results = sorted(data_results, key=lambda d: d['year']) |
+ |
+ # cap start and end dates |
+ if start_date.year < data_results[0]['year']: |
+ start_date = datetime.datetime(data_results[0]['year'], 1, 1) |
+ |
+ # build precip |
+ ppt = [v for d in data_results for v in d['data']['ppt']] |
+ |
+ # build tmin |
+ tmin = [v for d in data_results for v in d['data']['tmin']] |
+ |
+ # build tmax |
+ tmax = [v for d in data_results for v in d['data']['tmax']] |
+ |
+ dt = start_date |
+ for i in range(len(tmax)): |
+ # convert precip from mm to cm |
+ ret.append(['{0} {1}'.format(cell[0], cell[1]), dt.strftime('%Y-%m-%d'), conv_dict['tmp'](tmin[i]), conv_dict['tmp'](tmax[i]), conv_dict['apcp'](ppt[i])]) |
+ dt += datetime.timedelta(days=1) |
+ return ret |
+ |
def WEgetDataGHCND(self, input_zone_file_or_station_list, units, start_date, end_date, generate_average=False, IDW_center=False): |
names = [] |
stations = [] |