climate-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From omk...@apache.org
Subject [2/4] climate git commit: CLIMATE-852 [OCW Documentation] Theme not found error
Date Tue, 27 Sep 2016 21:05:38 GMT
http://git-wip-us.apache.org/repos/asf/climate/blob/731419f8/ocw/data_source/local.py
----------------------------------------------------------------------
diff --git a/ocw/data_source/local.py b/ocw/data_source/local.py
index f2956fa..ce57538 100644
--- a/ocw/data_source/local.py
+++ b/ocw/data_source/local.py
@@ -16,7 +16,7 @@
 # under the License.
 
 import calendar
-from datetime import timedelta ,datetime
+from datetime import timedelta, datetime
 from time import strptime
 from glob import glob
 import re
@@ -113,6 +113,7 @@ def _get_netcdf_variable_name(valid_var_names, netcdf, netcdf_var):
     )
     raise ValueError(error)
 
+
 def load_WRF_2d_files(file_path=None,
                       filename_pattern=None,
                       filelist=None,
@@ -154,18 +155,19 @@ def load_WRF_2d_files(file_path=None,
     WRF_files.sort()
 
     file_object_first = netCDF4.Dataset(WRF_files[0])
-    lats = file_object_first.variables['XLAT'][0,:]
-    lons = file_object_first.variables['XLONG'][0,:]
+    lats = file_object_first.variables['XLAT'][0, :]
+    lons = file_object_first.variables['XLONG'][0, :]
 
     times = []
     nfile = len(WRF_files)
     for ifile, file in enumerate(WRF_files):
-        print('Reading file '+str(ifile+1)+'/'+str(nfile), file)
+        print('Reading file ' + str(ifile + 1) + '/' + str(nfile), file)
         file_object = netCDF4.Dataset(file)
-        time_struct_parsed = strptime(file[-19:],"%Y-%m-%d_%H:%M:%S")
+        time_struct_parsed = strptime(file[-19:], "%Y-%m-%d_%H:%M:%S")
         for ihour in numpy.arange(24):
-            times.append(datetime(*time_struct_parsed[:6]) + timedelta(hours=ihour))
-        values0= file_object.variables[variable_name][:]
+            times.append(
+                datetime(*time_struct_parsed[:6]) + timedelta(hours=ihour))
+        values0 = file_object.variables[variable_name][:]
         if ifile == 0:
             values = values0
             variable_unit = file_object.variables[variable_name].units
@@ -175,9 +177,10 @@ def load_WRF_2d_files(file_path=None,
     times = numpy.array(times)
     return Dataset(lats, lons, times, values, variable_name, units=variable_unit, name=name)
 
+
 def load_file(file_path,
               variable_name,
-              variable_unit = None,
+              variable_unit=None,
               elevation_index=0,
               name='',
               lat_name=None,
@@ -229,14 +232,14 @@ def load_file(file_path,
     try:
         netcdf = netCDF4.Dataset(file_path, mode='r')
     except IOError:
-        err = "Dataset filepath '%s' is invalid. Please ensure it is correct." %file_path
+        err = "Dataset filepath '%s' is invalid. Please ensure it is correct." % file_path
         raise ValueError(err)
     except:
         err = (
             "The given file '%s' cannot be loaded. Either the path is invalid or the given file is invalid. "
             "Please ensure that it is a valid "
             "NetCDF file. If problems persist, report them to the project's "
-            "mailing list." %file_path
+            "mailing list." % file_path
         )
         raise ValueError(err)
 
@@ -245,7 +248,8 @@ def load_file(file_path,
     if lon_name is None:
         lon_name = _get_netcdf_variable_name(LON_NAMES, netcdf, variable_name)
     if time_name is None:
-        time_name = _get_netcdf_variable_name(TIME_NAMES, netcdf, variable_name)
+        time_name = _get_netcdf_variable_name(
+            TIME_NAMES, netcdf, variable_name)
 
     lats = netcdf.variables[lat_name][:]
     lons = netcdf.variables[lon_name][:]
@@ -271,13 +275,13 @@ def load_file(file_path,
 
         # Strip out the elevation values so we're left with a 3D array.
         if level_index == 0:
-            values = values [elevation_index,:,:,:]
+            values = values[elevation_index, :, :, :]
         elif level_index == 1:
-            values = values [:,elevation_index,:,:]
+            values = values[:, elevation_index, :, :]
         elif level_index == 2:
-            values = values [:,:,elevation_index,:]
-        else: #pragma: no cover
-            values = values [:,:,:,elevation_index]
+            values = values[:, :, elevation_index, :]
+        else:  # pragma: no cover
+            values = values[:, :, :, elevation_index]
 
     origin = {
         'source': 'local',
@@ -286,11 +290,13 @@ def load_file(file_path,
         'lon_name': lon_name,
         'time_name': time_name
     }
-    if elevation_index != 0: origin['elevation_index'] = elevation_index
+    if elevation_index != 0:
+        origin['elevation_index'] = elevation_index
 
     return Dataset(lats, lons, times, values, variable=variable_name,
                    units=variable_unit, name=name, origin=origin)
 
+
 def load_multiple_files(file_path,
                         variable_name,
                         dataset_name='data',
@@ -346,19 +352,20 @@ def load_multiple_files(file_path,
         prefix = os.path.commonprefix(data_filenames)
         postfix = os.path.commonprefix(data_filenames_reversed)[::-1]
         for element in data_filenames:
-            data_name.append(element.replace(prefix,'').replace(postfix,''))
+            data_name.append(element.replace(prefix, '').replace(postfix, ''))
 
     datasets = []
-    for ifile,filename in enumerate(data_filenames):
+    for ifile, filename in enumerate(data_filenames):
         datasets.append(load_file(filename, variable_name, variable_unit, name=data_name[ifile],
-                        lat_name=lat_name, lon_name=lon_name, time_name=time_name))
+                                  lat_name=lat_name, lon_name=lon_name, time_name=time_name))
 
     return datasets
 
+
 def load_WRF_2d_files_RAIN(file_path=None,
-                      filename_pattern=None,
-                      filelist=None,
-                      name=''):
+                           filename_pattern=None,
+                           filelist=None,
+                           name=''):
     ''' Load multiple WRF (or nuWRF) original output files containing 2D \
         fields such as precipitation and surface variables into a Dataset. \
     The dataset can be spatially subset.
@@ -385,46 +392,51 @@ def load_WRF_2d_files_RAIN(file_path=None,
             WRF_files.extend(glob(file_path + pattern))
         WRF_files.sort()
     else:
-        WRF_files=[line.rstrip('\n') for line in open(filelist)]
+        WRF_files = [line.rstrip('\n') for line in open(filelist)]
 
     file_object_first = netCDF4.Dataset(WRF_files[0])
-    lats = file_object_first.variables['XLAT'][0,:]
-    lons = file_object_first.variables['XLONG'][0,:]
+    lats = file_object_first.variables['XLAT'][0, :]
+    lons = file_object_first.variables['XLONG'][0, :]
 
     times = []
     nfile = len(WRF_files)
     for ifile, file in enumerate(WRF_files):
-        print('Reading file '+str(ifile+1)+'/'+str(nfile), file)
+        print('Reading file ' + str(ifile + 1) + '/' + str(nfile), file)
         file_object = netCDF4.Dataset(file)
-        time_struct_parsed = strptime(file[-19:],"%Y-%m-%d_%H:%M:%S")
+        time_struct_parsed = strptime(file[-19:], "%Y-%m-%d_%H:%M:%S")
         for ihour in range(24):
-            times.append(datetime(*time_struct_parsed[:6]) + timedelta(hours=ihour))
+            times.append(
+                datetime(*time_struct_parsed[:6]) + timedelta(hours=ihour))
         if ifile == 0:
-            values0= file_object.variables['RAINC'][:]+file_object.variables['RAINNC'][:]
+            values0 = file_object.variables['RAINC'][
+                :] + file_object.variables['RAINNC'][:]
         else:
-            values0= numpy.concatenate((values0, file_object.variables['RAINC'][:]+file_object.variables['RAINNC'][:]))
+            values0 = numpy.concatenate((values0, file_object.variables['RAINC'][
+                                        :] + file_object.variables['RAINNC'][:]))
         file_object.close()
-    times= numpy.array(times)
+    times = numpy.array(times)
     years = numpy.array([d.year for d in times])
     ncycle = numpy.unique(years).size
-    print('ncycle=',ncycle)
+    print('ncycle=', ncycle)
     nt, ny, nx = values0.shape
-    values = numpy.zeros([nt-ncycle*24, ny, nx])
+    values = numpy.zeros([nt - ncycle * 24, ny, nx])
     times2 = []
-    nt2 = nt/ncycle
+    nt2 = nt / ncycle
     # remove the first day in each year
-    nt3 = nt2-24
+    nt3 = nt2 - 24
     t_index = 0
     for icycle in numpy.arange(ncycle):
-        for it in numpy.arange(nt3)+24:
-            values[t_index,:] = values0[icycle*nt2+it,:]-values0[icycle*nt2+it-1,:]
-            times2.append(times[icycle*nt2+it])
-            t_index = t_index +1
+        for it in numpy.arange(nt3) + 24:
+            values[t_index, :] = values0[icycle * nt2 + it, :] - \
+                values0[icycle * nt2 + it - 1, :]
+            times2.append(times[icycle * nt2 + it])
+            t_index = t_index + 1
     variable_name = 'PREC'
-    variable_unit= 'mm/hr'
+    variable_unit = 'mm/hr'
     times2 = numpy.array(times2)
     return Dataset(lats, lons, times2, values, variable_name, units=variable_unit, name=name)
 
+
 def load_dataset_from_multiple_netcdf_files(variable_name,
                                             lat_name=None, lon_name=None, time_name=None,
                                             name='', file_list=None, file_path=None, filename_pattern=None,
@@ -486,9 +498,9 @@ def load_dataset_from_multiple_netcdf_files(variable_name,
 
     dataset0 = load_file(nc_files[0], variable_name, lat_name=lat_name,
                          lon_name=lon_name, time_name=time_name)
-    if dataset0.lons.ndim == 1 and dataset0.lats.ndim ==1:
+    if dataset0.lons.ndim == 1 and dataset0.lats.ndim == 1:
         lons, lats = numpy.meshgrid(dataset0.lons, dataset0.lats)
-    elif dataset0.lons.ndim == 2 and dataset0.lats.ndim ==2:
+    elif dataset0.lons.ndim == 2 and dataset0.lats.ndim == 2:
         lons = dataset0.lons
         lats = dataset0.lats
 
@@ -499,25 +511,26 @@ def load_dataset_from_multiple_netcdf_files(variable_name,
     times = []
     nfile = len(nc_files)
     for ifile, file in enumerate(nc_files):
-        print('NC file '+str(ifile+1)+'/'+str(nfile), file)
-        file_object0= load_file(file, variable_name, lat_name=lat_name,
-                                lon_name=lon_name, time_name=time_name)
-        values0= file_object0.values
+        print('NC file ' + str(ifile + 1) + '/' + str(nfile), file)
+        file_object0 = load_file(file, variable_name, lat_name=lat_name,
+                                 lon_name=lon_name, time_name=time_name)
+        values0 = file_object0.values
         times.extend(file_object0.times)
         if mask_file:
-            values0 = values0[:,y_index, x_index]
+            values0 = values0[:, y_index, x_index]
         if ifile == 0:
             data_values = values0
         else:
-            data_values= numpy.concatenate((data_values, values0))
+            data_values = numpy.concatenate((data_values, values0))
     times = numpy.array(times)
     return Dataset(lats, lons, times, data_values, variable_name, name=name)
 
+
 def load_NLDAS_forcingA_files(file_path=None,
-                      filename_pattern=None,
-                      filelist=None,
-                      variable_name='APCPsfc_110_SFC_acc1h',
-                      name=''):
+                              filename_pattern=None,
+                              filelist=None,
+                              variable_name='APCPsfc_110_SFC_acc1h',
+                              name=''):
     ''' Load multiple NLDAS2 forcingAWRF files containing 2D fields such \
         as precipitation and surface variables into a Dataset. The dataset \
         can be spatially subset.
@@ -561,9 +574,9 @@ def load_NLDAS_forcingA_files(file_path=None,
     times = []
     nfile = len(NLDAS_files)
     for ifile, file in enumerate(NLDAS_files):
-        print('Reading file '+str(ifile+1)+'/'+str(nfile), file)
+        print('Reading file ' + str(ifile + 1) + '/' + str(nfile), file)
         file_object = netCDF4.Dataset(file)
-        time_struct_parsed = strptime(file[-20:-7],"%Y%m%d.%H%M")
+        time_struct_parsed = strptime(file[-20:-7], "%Y%m%d.%H%M")
         times.append(datetime(*time_struct_parsed[:6]))
 
         values0 = file_object.variables[variable_name][:]
@@ -577,11 +590,12 @@ def load_NLDAS_forcingA_files(file_path=None,
     times = numpy.array(times)
     return Dataset(lats, lons, times, values, variable_name, units=variable_unit, name=name)
 
+
 def load_GPM_IMERG_files(file_path=None,
-                      filename_pattern=None,
-                      filelist=None,
-                      variable_name='precipitationCal',
-                      name='GPM_IMERG'):
+                         filename_pattern=None,
+                         filelist=None,
+                         variable_name='precipitationCal',
+                         name='GPM_IMERG'):
     ''' Load multiple GPM Level 3 IMEGE files containing calibrated \
         precipitation and generate an OCW Dataset obejct.
 
@@ -627,12 +641,13 @@ def load_GPM_IMERG_files(file_path=None,
     times = []
     nfile = len(GPM_files)
     for ifile, file in enumerate(GPM_files):
-        print('Reading file '+str(ifile+1)+'/'+str(nfile), file)
+        print('Reading file ' + str(ifile + 1) + '/' + str(nfile), file)
         file_object = h5py.File(file)
-        time_struct_parsed = strptime(file[-39:-23],"%Y%m%d-S%H%M%S")
+        time_struct_parsed = strptime(file[-39:-23], "%Y%m%d-S%H%M%S")
         times.append(datetime(*time_struct_parsed[:6]))
-        values0= numpy.transpose(ma.masked_less(file_object['Grid'][variable_name][:], 0.))
-        values0= numpy.expand_dims(values0, axis=0)
+        values0 = numpy.transpose(ma.masked_less(
+            file_object['Grid'][variable_name][:], 0.))
+        values0 = numpy.expand_dims(values0, axis=0)
         if ifile == 0:
             values = values0
         else:

http://git-wip-us.apache.org/repos/asf/climate/blob/731419f8/ocw/data_source/rcmed.py
----------------------------------------------------------------------
diff --git a/ocw/data_source/rcmed.py b/ocw/data_source/rcmed.py
index 8892032..82b4c29 100644
--- a/ocw/data_source/rcmed.py
+++ b/ocw/data_source/rcmed.py
@@ -22,7 +22,8 @@ Classes:
     https://rcmes.jpl.nasa.gov/query-api/query.php?
 '''
 
-import urllib, urllib2
+import urllib
+import urllib2
 import re
 import json
 import numpy as np
@@ -58,7 +59,7 @@ def get_parameters_metadata():
     return param_info_list
 
 
-def _make_mask_array(values, parameter_id, parameters_metadata):  
+def _make_mask_array(values, parameter_id, parameters_metadata):
     '''Created masked array to deal with missing values
 
     :param values: Numpy array of values which may contain missing values
@@ -89,7 +90,7 @@ def _reshape_values(values, unique_values):
     :type values: numpy array
     :param unique_values: Tuple of unique latitudes, longitudes and times data.
     :type unique_values: Tuple 
-    
+
     :returns: Reshaped values data
     :rtype: Numpy array
     '''
@@ -116,11 +117,12 @@ def _calculate_time(unique_times, time_step):
     '''
 
     time_format = "%Y-%m-%d %H:%M:%S"
-    unique_times = np.array([datetime.strptime(time, time_format) for time in unique_times])
-    #There is no need to sort time.
-    #This function may required still in RCMES
-    #unique_times.sort()
-    #This function should be moved to the data_process.
+    unique_times = np.array(
+        [datetime.strptime(time, time_format) for time in unique_times])
+    # There is no need to sort time.
+    # This function may required still in RCMES
+    # unique_times.sort()
+    # This function should be moved to the data_process.
 
     return unique_times
 
@@ -157,10 +159,10 @@ def _get_data(url):
     '''
 
     string = urllib2.urlopen(url)
-    data_string = string.read()    
+    data_string = string.read()
     index_of_data = re.search('data: \r\n', data_string)
     data = data_string[index_of_data.end():len(data_string)]
-    data = data.split('\r\n') 
+    data = data.split('\r\n')
 
     lats = []
     lons = []
@@ -168,15 +170,16 @@ def _get_data(url):
     values = []
     times = []
 
-    for i in range(len(data) - 1):  # Because the last row is empty, "len(data)-1" is used.
+    # Because the last row is empty, "len(data)-1" is used.
+    for i in range(len(data) - 1):
         row = data[i].split(',')
         lats.append(np.float32(row[0]))
         lons.append(np.float32(row[1]))
         # Level is not currently supported in Dataset class.
-        #levels.append(np.float32(row[2]))
+        # levels.append(np.float32(row[2]))
         times.append(row[3])
         values.append(np.float32(row[4]))
-    
+
     lats = np.array(lats)
     lons = np.array(lons)
     times = np.array(times)
@@ -259,12 +262,12 @@ def _generate_query_url(dataset_id, parameter_id, min_lat, max_lat, min_lon, max
     start_time = start_time.strftime("%Y%m%dT%H%MZ")
     end_time = end_time.strftime("%Y%m%dT%H%MZ")
 
-    query = [('datasetId',dataset_id), ('parameterId',parameter_id), ('latMin',min_lat), ('latMax',max_lat),
-             ('lonMin', min_lon), ('lonMax',max_lon), ('timeStart', start_time), ('timeEnd', end_time)]
+    query = [('datasetId', dataset_id), ('parameterId', parameter_id), ('latMin', min_lat), ('latMax', max_lat),
+             ('lonMin', min_lon), ('lonMax', max_lon), ('timeStart', start_time), ('timeEnd', end_time)]
 
     query_url = urllib.urlencode(query)
     url_request = URL + query_url
-    
+
     return url_request
 
 
@@ -310,7 +313,7 @@ def parameter_dataset(dataset_id, parameter_id, min_lat, max_lat, min_lon, max_l
 
     :param min_lon: Minimum longitude
     :type min_lon: :class:`float`
-    
+
     :param max_lon: Maximum longitude
     :type max_lon: :class:`float`
 
@@ -326,10 +329,12 @@ def parameter_dataset(dataset_id, parameter_id, min_lat, max_lat, min_lon, max_l
     :returns: An OCW Dataset object contained the requested data from RCMED.
     :rtype: :class:`dataset.Dataset`
     '''
-    
+
     parameters_metadata = get_parameters_metadata()
-    parameter_name, time_step, _, _, _, _, parameter_units = _get_parameter_info(parameters_metadata, parameter_id)
-    url = _generate_query_url(dataset_id, parameter_id, min_lat, max_lat, min_lon, max_lon, start_time, end_time, time_step)
+    parameter_name, time_step, _, _, _, _, parameter_units = _get_parameter_info(
+        parameters_metadata, parameter_id)
+    url = _generate_query_url(dataset_id, parameter_id, min_lat,
+                              max_lat, min_lon, max_lon, start_time, end_time, time_step)
     lats, lons, times, values = _get_data(url)
 
     unique_lats_lons_times = _make_unique(lats, lons, times)
@@ -342,7 +347,7 @@ def parameter_dataset(dataset_id, parameter_id, min_lat, max_lat, min_lon, max_l
         'dataset_id': dataset_id,
         'parameter_id': parameter_id
     }
-    
+
     return Dataset(unique_lats_lons_times[0],
                    unique_lats_lons_times[1],
                    unique_times,

http://git-wip-us.apache.org/repos/asf/climate/blob/731419f8/ocw/dataset.py
----------------------------------------------------------------------
diff --git a/ocw/dataset.py b/ocw/dataset.py
index 3dfc835..196913a 100644
--- a/ocw/dataset.py
+++ b/ocw/dataset.py
@@ -17,9 +17,12 @@
 
 '''
 Classes:
+
     Dataset - Container for a dataset's attributes and data.
+
     Bounds - Container for holding spatial and temporal bounds information
                 for operations on a Dataset.
+
 '''
 
 import os
@@ -117,7 +120,9 @@ class Dataset:
         :returns: The Dataset's latitudinal and longitudinal spatial resolution
             as a tuple of the form (lat_resolution, lon_resolution).
         :rtype: (:class:`float`, :class:`float`)
+
         '''
+
         if self.lats.ndim == 1 and self.lons.ndim == 1:
             sorted_lats = numpy.sort(list(set(self.lats)))
             sorted_lons = numpy.sort(list(set(self.lons)))
@@ -253,10 +258,10 @@ class Bounds(object):
     '''
 
     def __init__(self, boundary_type='rectangular',
-                       us_states=None, countries=None,
-                       user_mask_file=None, mask_variable_name=None, longitude_name=None, latitude_name=None,
-                       lat_min=-90, lat_max=90, lon_min=-180, lon_max=180,
-                       start=None, end=None):
+                 us_states=None, countries=None,
+                 user_mask_file=None, mask_variable_name=None, longitude_name=None, latitude_name=None,
+                 lat_min=-90, lat_max=90, lon_min=-180, lon_max=180,
+                 start=None, end=None):
         '''Default Bounds constructor
         :param boundary_type: The type of spatial subset boundary.
         :type boundary_type: :mod:`string'
@@ -298,34 +303,41 @@ class Bounds(object):
             self._end = None
 
         if boundary_type == 'us_states':
-            self.masked_regions = utils.shapefile_boundary(boundary_type, us_states)
+            self.masked_regions = utils.shapefile_boundary(
+                boundary_type, us_states)
         if boundary_type == 'countries':
-            self.masked_regions = utils.shapefile_boundary(boundary_type, countries)
+            self.masked_regions = utils.shapefile_boundary(
+                boundary_type, countries)
         if boundary_type == 'user':
             file_object = netCDF4.Dataset(user_mask_file)
             self.mask_variable = file_object.variables[mask_variable_name][:]
             mask_longitude = file_object.variables[longitude_name][:]
             mask_latitude = file_object.variables[latitude_name][:]
             if mask_longitude.ndim == 1 and mask_latitude.ndim == 1:
-                self.mask_longitude, self.mask_latitude = numpy.meshgrid(mask_longitude, mask_latitude)
+                self.mask_longitude, self.mask_latitude = numpy.meshgrid(
+                    mask_longitude, mask_latitude)
             elif mask_longitude.ndim == 2 and mask_latitude.ndim == 2:
                 self.mask_longitude = mask_longitude
                 self.mask_latitude = mask_latitude
         if boundary_type == 'rectangular':
-            if not (-90 <= float(lat_min) <=90) or float(lat_min) > float(lat_max):
-                error = "Attempted to set lat_min to invalid value: %s" % (lat_min)
+            if not (-90 <= float(lat_min) <= 90) or float(lat_min) > float(lat_max):
+                error = "Attempted to set lat_min to invalid value: %s" % (
+                    lat_min)
                 logger.error(error)
                 raise ValueError(error)
-            if not (-90 <= float(lat_max) <=90):
-                error = "Attempted to set lat_max to invalid value: %s" % (lat_max)
+            if not (-90 <= float(lat_max) <= 90):
+                error = "Attempted to set lat_max to invalid value: %s" % (
+                    lat_max)
                 logger.error(error)
                 raise ValueError(error)
-            if not (-180 <= float(lon_min) <=180) or float(lon_min) > float(lon_max):
-                error = "Attempted to set lon_min to invalid value: %s" % (lon_min)
+            if not (-180 <= float(lon_min) <= 180) or float(lon_min) > float(lon_max):
+                error = "Attempted to set lon_min to invalid value: %s" % (
+                    lon_min)
                 logger.error(error)
                 raise ValueError(error)
-            if not (-180 <= float(lon_max) <=180):
-                error = "Attempted to set lat_max to invalid value: %s" % (lon_max)
+            if not (-180 <= float(lon_max) <= 180):
+                error = "Attempted to set lat_max to invalid value: %s" % (
+                    lon_max)
                 logger.error(error)
                 raise ValueError(error)
 
@@ -334,7 +346,8 @@ class Bounds(object):
             self.lon_min = float(lon_min)
             self.lon_max = float(lon_max)
         if boundary_type[:6].upper() == 'CORDEX':
-            self.lat_min, self.lat_max, self.lon_min, self.lon_max = utils.CORDEX_boundary(boundary_type[6:].replace(" ","").lower())
+            self.lat_min, self.lat_max, self.lon_min, self.lon_max = utils.CORDEX_boundary(
+                boundary_type[6:].replace(" ", "").lower())
 
     @property
     def start(self):
@@ -363,4 +376,3 @@ class Bounds(object):
                 raise ValueError(error)
 
         self._end = value
-

http://git-wip-us.apache.org/repos/asf/climate/blob/731419f8/ocw/dataset_processor.py
----------------------------------------------------------------------
diff --git a/ocw/dataset_processor.py b/ocw/dataset_processor.py
index b301515..5ec0de4 100755
--- a/ocw/dataset_processor.py
+++ b/ocw/dataset_processor.py
@@ -208,14 +208,14 @@ def spatial_regrid(target_dataset, new_latitudes, new_longitudes,
     else:
         new_lons = new_longitudes
         new_lats = new_latitudes
-    
+
     ny_old, nx_old = lats.shape
     ny_new, nx_new = new_lats.shape
 
     for iy in np.arange(ny_old):
-        if not all(x<y for x,y in zip(lons[iy,:], lons[iy,1:])):
-            lons[iy,:][lons[iy,:] <0] = lons[iy,:][lons[iy,:] <0]+360.
-        
+        if not all(x < y for x, y in zip(lons[iy, :], lons[iy, 1:])):
+            lons[iy, :][lons[iy, :] < 0] = lons[iy, :][lons[iy, :] < 0] + 360.
+
     # Make masked array of shape (times, new_latitudes,new_longitudes)
     new_values = ma.zeros([len(target_dataset.times),
                            ny_new, nx_new])
@@ -258,7 +258,7 @@ def spatial_regrid(target_dataset, new_latitudes, new_longitudes,
                     mn = lats.min()
                     mx = lats.max()
                     new_lats_indices[iy, ix] = (
-                        ny_old - 1.) * (new_lats[iy, ix] - mn)/ (mx - mn)
+                        ny_old - 1.) * (new_lats[iy, ix] - mn) / (mx - mn)
                     mn = lons.min()
                     mx = lons.max()
                     new_lons_indices[iy, ix] = (
@@ -391,12 +391,12 @@ def subset(target_dataset, subregion, subregion_name=None, extract=True, user_ma
     if not subregion.start:
         subregion.start = target_dataset.times[0]
         subregion.end = target_dataset.times[-1]
-    
+
     if not subregion_name:
         subregion_name = target_dataset.name
 
     if hasattr(subregion, 'lat_min'):
-        _are_bounds_contained_by_dataset(target_dataset, subregion)   
+        _are_bounds_contained_by_dataset(target_dataset, subregion)
 
         if target_dataset.lats.ndim == 2 and target_dataset.lons.ndim == 2:
             start_time_index = np.where(
@@ -413,7 +413,8 @@ def subset(target_dataset, subregion, subregion_name=None, extract=True, user_ma
                     target_dataset.lons <= subregion.lon_min))
             for it in np.arange(nt):
                 target_dataset.values[it, y_index, x_index] = 1.e+20
-            target_dataset.values = ma.masked_equal(target_dataset.values, 1.e+20)
+            target_dataset.values = ma.masked_equal(
+                target_dataset.values, 1.e+20)
             return target_dataset
 
         elif target_dataset.lats.ndim == 1 and target_dataset.lons.ndim == 1:
@@ -445,22 +446,22 @@ def subset(target_dataset, subregion, subregion_name=None, extract=True, user_ma
                     dataset_slices["lon_start"]:dataset_slices["lon_end"] + 1]
             # Build new dataset with subset information
             return ds.Dataset(
-            # Slice the lats array with our calculated slice indices
+                # Slice the lats array with our calculated slice indices
                 target_dataset.lats[dataset_slices["lat_start"]:
                                     dataset_slices["lat_end"] + 1],
-            # Slice the lons array with our calculated slice indices
+                # Slice the lons array with our calculated slice indices
                 target_dataset.lons[dataset_slices["lon_start"]:
                                     dataset_slices["lon_end"] + 1],
-            # Slice the times array with our calculated slice indices
+                # Slice the times array with our calculated slice indices
                 target_dataset.times[dataset_slices["time_start"]:
                                      dataset_slices["time_end"] + 1],
-            # Slice the values array with our calculated slice indices
+                # Slice the values array with our calculated slice indices
                 subset_values,
                 variable=target_dataset.variable,
                 units=target_dataset.units,
                 name=subregion_name,
                 origin=target_dataset.origin
-                )
+            )
 
     if subregion.boundary_type == 'us_states' or subregion.boundary_type == 'countries':
         start_time_index = np.where(
@@ -469,18 +470,21 @@ def subset(target_dataset, subregion, subregion_name=None, extract=True, user_ma
         target_dataset = temporal_slice(
             target_dataset, start_time_index, end_time_index)
         nt, ny, nx = target_dataset.values.shape
-        spatial_mask = utils.mask_using_shapefile_info(target_dataset.lons, target_dataset.lats, 
-                                                       subregion.masked_regions, extract = extract)
-        target_dataset.values = utils.propagate_spatial_mask_over_time(target_dataset.values, mask=spatial_mask)
+        spatial_mask = utils.mask_using_shapefile_info(target_dataset.lons, target_dataset.lats,
+                                                       subregion.masked_regions, extract=extract)
+        target_dataset.values = utils.propagate_spatial_mask_over_time(
+            target_dataset.values, mask=spatial_mask)
         return target_dataset
-            
+
     if subregion.boundary_type == 'user':
-        spatial_mask = utils.regrid_spatial_mask(target_dataset.lons, target_dataset.lats, 
+        spatial_mask = utils.regrid_spatial_mask(target_dataset.lons, target_dataset.lats,
                                                  subregion.mask_longitude, subregion.mask_latitude, subregion.mask_variable,
-                                                 user_mask_values, extract = extract)
-        target_dataset.values = utils.propagate_spatial_mask_over_time(target_dataset.values, mask=spatial_mask)
+                                                 user_mask_values, extract=extract)
+        target_dataset.values = utils.propagate_spatial_mask_over_time(
+            target_dataset.values, mask=spatial_mask)
         return target_dataset
 
+
 def temporal_slice(target_dataset, start_time_index, end_time_index):
     '''Temporally slice given dataset(s) with subregion information. This does not
     spatially subset the target_Dataset
@@ -765,16 +769,17 @@ def water_flux_unit_conversion(dataset):
 
 
 def temperature_unit_conversion(dataset):
-    ''' Convert temperature units as necessary
-
+    ''' Convert temperature units as necessary \
     Automatically convert Celcius to Kelvin in the given dataset.
 
-    :param dataset: The dataset for which units should be updated.
+    :param dataset: The dataset for which units should be updated. \
     :type dataset; :class:`dataset.Dataset`
 
-    :returns: The dataset with (potentially) updated units.
+    :returns: The dataset with (potentially) updated units. \
     :rtype: :class:`dataset.Dataset`
+
     '''
+
     temperature_variables = ['temp', 'tas', 'tasmax', 'taxmin', 'T', 'tg']
     variable = dataset.variable.lower()
 
@@ -819,6 +824,7 @@ def _rcmes_normalize_datetimes(datetimes, timestep):
     :param timestep: The flag for how to normalize the datetimes.
     :type timestep: String
     """
+
     normalDatetimes = []
     if timestep.lower() == 'monthly':
         for inputDatetime in datetimes:
@@ -851,6 +857,7 @@ def mask_missing_data(dataset_array):
     the values at the grid point in all other datasets are masked.
     :param dataset_array: an array of OCW datasets
     '''
+
     mask_array = np.zeros(dataset_array[0].values.shape)
     for dataset in dataset_array:
         index = np.where(dataset.values.mask == True)
@@ -862,6 +869,7 @@ def mask_missing_data(dataset_array):
         masked_array.append(dataset)
     return [masked_dataset for masked_dataset in masked_array]
 
+
 def deseasonalize_dataset(dataset):
     '''Calculate daily climatology and subtract the climatology from
     the input dataset
@@ -872,19 +880,22 @@ def deseasonalize_dataset(dataset):
     :returns: A Dataset with values converted to new units.
     :rtype: :class:`dataset.Dataset`
     '''
-    days = [d.month*100. + d.day for d in dataset.times]
+
+    days = [d.month * 100. + d.day for d in dataset.times]
     days_sorted = np.unique(days)
     ndays = days_sorted.size
     nt, ny, nx = dataset.values.shape
     values_clim = ma.zeros([ndays, ny, nx])
     for iday, day in enumerate(days_sorted):
         t_index = np.where(days == day)[0]
-        values_clim[iday,:] = ma.mean(dataset.values[t_index,:], axis=0)
+        values_clim[iday, :] = ma.mean(dataset.values[t_index, :], axis=0)
     for iday, day in enumerate(days_sorted):
         t_index = np.where(days == day)[0]
-        dataset.values[t_index,:] = dataset.values[t_index,:] - values_clim[iday,:]
+        dataset.values[t_index, :] = dataset.values[
+            t_index, :] - values_clim[iday, :]
     return dataset
 
+
 def _rcmes_spatial_regrid(spatial_values, lat, lon, lat2, lon2, order=1):
     '''
     Spatial regrid from one set of lat,lon values onto a new set (lat2,lon2)
@@ -1298,8 +1309,8 @@ def _congrid(a, newdims, method='linear', centre=False, minusone=False):
     old = np.array(a.shape)
     ndims = len(a.shape)
     if len(newdims) != ndims:
-        print("[congrid] dimensions error. " \
-              "This routine currently only supports " \
+        print("[congrid] dimensions error. "
+              "This routine currently only supports "
               "rebinning to the same number of dimensions.")
         return None
     newdims = np.asarray(newdims, dtype=float)
@@ -1353,8 +1364,8 @@ def _congrid(a, newdims, method='linear', centre=False, minusone=False):
         newa = scipy.ndimage.map_coordinates(a, newcoords)
         return newa
     else:
-        print("Congrid error: Unrecognized interpolation type.\n", \
-              "Currently only \'neighbour\', \'nearest\',\'linear\',", \
+        print("Congrid error: Unrecognized interpolation type.\n",
+              "Currently only \'neighbour\', \'nearest\',\'linear\',",
               "and \'spline\' are supported.")
         return None
 

http://git-wip-us.apache.org/repos/asf/climate/blob/731419f8/ocw/esgf/download.py
----------------------------------------------------------------------
diff --git a/ocw/esgf/download.py b/ocw/esgf/download.py
index f322e1d..23c107b 100644
--- a/ocw/esgf/download.py
+++ b/ocw/esgf/download.py
@@ -21,45 +21,50 @@ RCMES module to download a file from ESGF.
 
 '''
 
-import urllib2, httplib
+import urllib2
+import httplib
 from os.path import expanduser, join
 
 from ocw.esgf.constants import ESGF_CREDENTIALS
 
+
 class HTTPSClientAuthHandler(urllib2.HTTPSHandler):
     '''
     HTTP handler that transmits an X509 certificate as part of the request
     '''
-    
+
     def __init__(self, key, cert):
-            urllib2.HTTPSHandler.__init__(self)
-            self.key = key
-            self.cert = cert
+        urllib2.HTTPSHandler.__init__(self)
+        self.key = key
+        self.cert = cert
+
     def https_open(self, req):
-            return self.do_open(self.getConnection, req)
+        return self.do_open(self.getConnection, req)
+
     def getConnection(self, host, timeout=300):
-            return httplib.HTTPSConnection(host, key_file=self.key, cert_file=self.cert)
+        return httplib.HTTPSConnection(host, key_file=self.key, cert_file=self.cert)
+
 
 def download(url, toDirectory="/tmp"):
     '''
     Function to download a single file from ESGF.
-    
+
     :param url: the URL of the file to download
     :param toDirectory: target directory where the file will be written
     '''
-    
+
     # setup HTTP handler
     certFile = expanduser(ESGF_CREDENTIALS)
-    opener = urllib2.build_opener(HTTPSClientAuthHandler(certFile,certFile))
+    opener = urllib2.build_opener(HTTPSClientAuthHandler(certFile, certFile))
     opener.add_handler(urllib2.HTTPCookieProcessor())
-    
+
     # download file
-    localFilePath = join(toDirectory,url.split('/')[-1])
+    localFilePath = join(toDirectory, url.split('/')[-1])
     print("\nDownloading url: %s to local path: %s ..." % (url, localFilePath))
-    localFile=open( localFilePath, 'w')
-    webFile=opener.open(url)
+    localFile = open(localFilePath, 'w')
+    webFile = opener.open(url)
     localFile.write(webFile.read())
-    
+
     # cleanup
     localFile.close()
     webFile.close()

http://git-wip-us.apache.org/repos/asf/climate/blob/731419f8/ocw/esgf/logon.py
----------------------------------------------------------------------
diff --git a/ocw/esgf/logon.py b/ocw/esgf/logon.py
index 8b4c034..b792cfa 100644
--- a/ocw/esgf/logon.py
+++ b/ocw/esgf/logon.py
@@ -25,8 +25,8 @@ from pyesgf.logon import LogonManager
 
 from ocw.esgf.constants import JPL_MYPROXY_SERVER_DN, JPL_HOSTNAME
 
-def logon(openid, password):
 
+def logon(openid, password):
     '''
     Function to retrieve a short-term X.509 certificate that can be used to authenticate with ESGF.
     The certificate is written in the location ~/.esg/credentials.pem.

http://git-wip-us.apache.org/repos/asf/climate/blob/731419f8/ocw/esgf/main.py
----------------------------------------------------------------------
diff --git a/ocw/esgf/main.py b/ocw/esgf/main.py
index 799ad38..5c90042 100644
--- a/ocw/esgf/main.py
+++ b/ocw/esgf/main.py
@@ -28,6 +28,7 @@ from ocw.esgf.logon import logon
 from ocw.esgf.search import SearchClient
 from ocw.esgf.download import download
 
+
 def main():
     '''Example driver program'''
 
@@ -43,69 +44,73 @@ def main():
     # step 2: execute faceted search for files
     urls = main_obs4mips()
     #urls = main_cmip5()
-    
+
     # step 3: download file(s)
     for i, url in enumerate(urls):
-        if i>=1:
+        if i >= 1:
             break
         download(url, toDirectory=DATA_DIRECTORY)
 
-    
+
 def main_cmip5():
     '''
     Example workflow to search for CMIP5 files
     '''
-    
-    searchClient = SearchClient(searchServiceUrl="http://pcmdi9.llnl.gov/esg-search/search", distrib=False)
-    
+
+    searchClient = SearchClient(
+        searchServiceUrl="http://pcmdi9.llnl.gov/esg-search/search", distrib=False)
+
     print('\nAvailable projects=%s' % searchClient.getFacets('project'))
     searchClient.setConstraint(project='CMIP5')
     print("Number of Datasets=%d" % searchClient.getNumberOfDatasets())
-    
+
     print('\nAvailable models=%s' % searchClient.getFacets('model'))
     searchClient.setConstraint(model='INM-CM4')
     print("Number of Datasets=%d" % searchClient.getNumberOfDatasets())
-    
+
     print('\nAvailable experiments=%s' % searchClient.getFacets('experiment'))
     searchClient.setConstraint(experiment='historical')
     print("Number of Datasets=%d" % searchClient.getNumberOfDatasets())
-    
-    print('\nAvailable time frequencies=%s' % searchClient.getFacets('time_frequency'))
+
+    print('\nAvailable time frequencies=%s' %
+          searchClient.getFacets('time_frequency'))
     searchClient.setConstraint(time_frequency='mon')
     print("Number of Datasets=%d" % searchClient.getNumberOfDatasets())
 
-    print('\nAvailable CF standard names=%s' % searchClient.getFacets('cf_standard_name'))
+    print('\nAvailable CF standard names=%s' %
+          searchClient.getFacets('cf_standard_name'))
     searchClient.setConstraint(cf_standard_name='air_temperature')
     print("Number of Datasets=%d" % searchClient.getNumberOfDatasets())
-    
+
     urls = searchClient.getFiles()
     return urls
-    
-    
+
+
 def main_obs4mips():
     '''
     Example workflow to search for obs4MIPs files.
     '''
-    
+
     searchClient = SearchClient(distrib=False)
-    
+
     # obs4MIPs
     print('\nAvailable projects=%s' % searchClient.getFacets('project'))
     searchClient.setConstraint(project='obs4MIPs')
     print("Number of Datasets=%d" % searchClient.getNumberOfDatasets())
-    
+
     print('\nAvailable variables=%s' % searchClient.getFacets('variable'))
     searchClient.setConstraint(variable='hus')
     print("Number of Datasets=%d" % searchClient.getNumberOfDatasets())
-    
-    print('\nAvailable time frequencies=%s' % searchClient.getFacets('time_frequency'))
+
+    print('\nAvailable time frequencies=%s' %
+          searchClient.getFacets('time_frequency'))
     searchClient.setConstraint(time_frequency='mon')
     print("Number of Datasets=%d" % searchClient.getNumberOfDatasets())
-    
+
     print('\nAvailable models=%s' % searchClient.getFacets('model'))
     searchClient.setConstraint(model='Obs-MLS')
     print("Number of Datasets=%d" % searchClient.getNumberOfDatasetsi())
-    
+
     urls = searchClient.getFiles()
     return urls
 

http://git-wip-us.apache.org/repos/asf/climate/blob/731419f8/ocw/esgf/search.py
----------------------------------------------------------------------
diff --git a/ocw/esgf/search.py b/ocw/esgf/search.py
index a6b527a..c2f4e12 100644
--- a/ocw/esgf/search.py
+++ b/ocw/esgf/search.py
@@ -25,13 +25,14 @@ from pyesgf.search import SearchConnection
 
 from ocw.esgf.constants import JPL_SEARCH_SERVICE_URL
 
+
 class SearchClient():
     """
     Simple ESGF search client for RCMES.
     This class is a thin layer on top of the esgfpy-client package.
     Note: this class always searches for latest versions, no replicas.
     """
-    
+
     def __init__(self, searchServiceUrl=JPL_SEARCH_SERVICE_URL, distrib=True):
         """
         :param searchServiceUrl: URL of ESGF search service to query
@@ -39,14 +40,14 @@ class SearchClient():
                         False to search only the specified search service
         """
         connection = SearchConnection(searchServiceUrl, distrib=distrib)
-        
+
         # dictionary of query constraints
-        self.constraints = { "latest":True, "replica":False, "distrib":distrib } 
-    
+        self.constraints = {"latest": True,
+                            "replica": False, "distrib": distrib}
+
         # initial search context
-        self.context = connection.new_context( **self.constraints )
-        
-        
+        self.context = connection.new_context(**self.constraints)
+
     def setConstraint(self, **constraints):
         """
         Sets one or more facet constraints.
@@ -56,20 +57,20 @@ class SearchClient():
             print('Setting constraint: %s=%s' % (key, constraints[key]))
             self.constraints[key] = constraints[key]
         self.context = self.context.constrain(**constraints)
-        
+
     def getNumberOfDatasets(self):
         """
         :return: the number of datasets matching the current constraints.
         """
         return self.context.hit_count
-        
+
     def getFacets(self, facet):
         """
         :return: a dictionary of (facet value, facet count) for the specified facet and current constraints.
         Example (for facet='project'): {u'COUND': 4, u'CMIP5': 2657, u'obs4MIPs': 7} 
         """
         return self.context.facet_counts[facet]
-    
+
     def getFiles(self):
         """
         Executes a search for files with the current constraints.
@@ -78,12 +79,10 @@ class SearchClient():
         datasets = self.context.search()
         urls = []
         for dataset in datasets:
-            print("\nSearching files for dataset=%s with constraints: %s" % (dataset.dataset_id, self.constraints))
+            print("\nSearching files for dataset=%s with constraints: %s" %
+                  (dataset.dataset_id, self.constraints))
             files = dataset.file_context().search(**self.constraints)
             for file in files:
                 print('Found file=%s' % file.download_url)
                 urls.append(file.download_url)
         return urls
-        
-    
-    

http://git-wip-us.apache.org/repos/asf/climate/blob/731419f8/ocw/evaluation.py
----------------------------------------------------------------------
diff --git a/ocw/evaluation.py b/ocw/evaluation.py
index cd06450..51c2ead 100644
--- a/ocw/evaluation.py
+++ b/ocw/evaluation.py
@@ -29,6 +29,7 @@ import numpy.ma as ma
 
 logger = logging.getLogger(__name__)
 
+
 class Evaluation(object):
     '''Container for running an evaluation
 
@@ -74,12 +75,12 @@ class Evaluation(object):
         '''
         #: The reference dataset.
         self._ref_dataset = reference
-        #: The target dataset(s) which should each be compared with 
+        #: The target dataset(s) which should each be compared with
         #: the reference dataset when the evaluation is run.
         self.target_datasets = []
         self.add_datasets(targets)
 
-        #: The list of "binary" metrics (A metric which takes two Datasets) 
+        #: The list of "binary" metrics (A metric which takes two Datasets)
         #: that the Evaluation should use.
         self.metrics = []
         #: The list of "unary" metrics (A metric which takes one Dataset) that
@@ -91,17 +92,17 @@ class Evaluation(object):
         self.add_metrics(metrics)
 
         #: An optional list of subregion bounds to use when running the
-        #: evaluation. 
+        #: evaluation.
         self._subregions = subregions
 
-        #: A list containing the results of running regular metric evaluations. 
+        #: A list containing the results of running regular metric evaluations.
         #: The shape of results is ``(num_target_datasets, num_metrics)`` if
         #: the user doesn't specify subregion information. Otherwise the shape
         #: is ``(num_target_datasets, num_metrics, num_subregions)``.
         self.results = []
-        #: A list containing the results of running the unary metric 
-        #: evaluations. The shape of unary_results is 
-        #: ``(num_targets, num_metrics)`` where ``num_targets = 
+        #: A list containing the results of running the unary metric
+        #: evaluations. The shape of unary_results is
+        #: ``(num_targets, num_metrics)`` where ``num_targets =
         #: num_target_ds + (1 if ref_dataset != None else 0``
         self.unary_results = []
 
@@ -207,7 +208,6 @@ class Evaluation(object):
         for metric in metrics:
             self.add_metric(metric)
 
-
     def run(self):
         '''Run the evaluation.
 
@@ -218,7 +218,7 @@ class Evaluation(object):
 
         If there is subregion information provided then each dataset is subset
         before being run through the binary metrics. 
-        
+
         ..note:: Only the binary metrics are subset with subregion information.
 
         Next, if there are any "unary" metrics they are run. Unary metrics are
@@ -243,7 +243,7 @@ class Evaluation(object):
 
     def _evaluation_is_valid(self):
         '''Check if the evaluation is well-formed.
-        
+
         * If there are no metrics or no datasets it's invalid.
         * If there is a unary metric there must be a reference dataset or at
             least one target dataset.
@@ -333,7 +333,7 @@ class Evaluation(object):
                 for t in range(len(self.target_datasets)):
                     unary_results[-1][-1].append(metric.run(new_targets[t][i]))
 
-        return convert_unary_evaluation_result(unary_results, subregion = True)
+        return convert_unary_evaluation_result(unary_results, subregion=True)
 
     def __str__(self):
         formatted_repr = (
@@ -352,18 +352,19 @@ class Evaluation(object):
             str(self.subregions)
         )
 
-def convert_evaluation_result(evaluation_result, subregion = False):
+
+def convert_evaluation_result(evaluation_result, subregion=False):
     if not subregion:
         nmodel = len(evaluation_result)
         nmetric = len(evaluation_result[0])
-        results = [] 
+        results = []
         for imetric in range(nmetric):
-            if evaluation_result[0][imetric].ndim !=0:
+            if evaluation_result[0][imetric].ndim != 0:
                 result_shape = list(evaluation_result[0][imetric].shape)
                 result_shape.insert(0, nmodel)
                 result = ma.zeros(result_shape)
                 for imodel in range(nmodel):
-                    result[imodel,:] = evaluation_result[imodel][imetric]
+                    result[imodel, :] = evaluation_result[imodel][imetric]
             else:
                 result = ma.zeros(nmodel)
                 for imodel in range(nmodel):
@@ -379,32 +380,36 @@ def convert_evaluation_result(evaluation_result, subregion = False):
         for isubregion in range(nsubregion):
             subregion_results = []
             for imetric in range(nmetric):
-                if evaluation_result[0][imetric][isubregion].ndim !=0:
-                    result_shape = list(evaluation_result[0][imetric][isubregion].shape)
+                if evaluation_result[0][imetric][isubregion].ndim != 0:
+                    result_shape = list(evaluation_result[0][
+                                        imetric][isubregion].shape)
                     result_shape.insert(0, nmodel)
                     result = ma.zeros(result_shape)
                     for imodel in range(nmodel):
-                        result[imodel,:] = evaluation_result[imodel][imetric][isubregion]
+                        result[imodel, :] = evaluation_result[
+                            imodel][imetric][isubregion]
                 else:
                     result = ma.zeros(nmodel)
                     for imodel in range(nmodel):
-                        result[imodel] = evaluation_result[imodel][imetric][isubregion]
+                        result[imodel] = evaluation_result[
+                            imodel][imetric][isubregion]
                 subregion_results.append(result)
             results.append(subregion_results)
         return results
-             
-def convert_unary_evaluation_result(evaluation_result, subregion = False):
+
+
+def convert_unary_evaluation_result(evaluation_result, subregion=False):
     if not subregion:
         nmetric = len(evaluation_result)
         nmodel = len(evaluation_result[0])
         results = []
         for imetric in range(nmetric):
-            if evaluation_result[imetric][0].ndim !=0:
+            if evaluation_result[imetric][0].ndim != 0:
                 result_shape = list(evaluation_result[imetric][0].shape)
                 result_shape.insert(0, nmodel)
                 result = ma.zeros(result_shape)
                 for imodel in range(nmodel):
-                    result[imodel,:] = evaluation_result[imetric][imodel]
+                    result[imodel, :] = evaluation_result[imetric][imodel]
             else:
                 result = ma.zeros(nmodel)
                 for imodel in range(nmodel):
@@ -420,17 +425,19 @@ def convert_unary_evaluation_result(evaluation_result, subregion = False):
         for isubregion in range(nsubregion):
             subregion_results = []
             for imetric in range(nmetric):
-                if evaluation_result[imetric][isubregion][0].ndim !=0:
-                    result_shape = list(evaluation_result[imetric][isubregion][0].shape)
+                if evaluation_result[imetric][isubregion][0].ndim != 0:
+                    result_shape = list(evaluation_result[imetric][
+                                        isubregion][0].shape)
                     result_shape.insert(0, nmodel)
                     result = ma.zeros(result_shape)
                     for imodel in range(nmodel):
-                        result[imodel,:] = evaluation_result[imetric][isubregion][imodel]
+                        result[imodel, :] = evaluation_result[
+                            imetric][isubregion][imodel]
                 else:
-                    result = ma.zeros(nmodel) 
+                    result = ma.zeros(nmodel)
                     for imodel in range(nmodel):
-                        result[imodel] = evaluation_result[imetric][isubregion][imodel]
+                        result[imodel] = evaluation_result[
+                            imetric][isubregion][imodel]
                 subregion_results.append(result)
             results.append(subregion_results)
         return results
-

http://git-wip-us.apache.org/repos/asf/climate/blob/731419f8/ocw/metrics.py
----------------------------------------------------------------------
diff --git a/ocw/metrics.py b/ocw/metrics.py
index 8e61799..c796e96 100644
--- a/ocw/metrics.py
+++ b/ocw/metrics.py
@@ -26,6 +26,7 @@ import numpy
 import numpy.ma as ma
 from scipy.stats import mstats
 
+
 class Metric(object):
     '''Base Metric Class'''
     __metaclass__ = ABCMeta
@@ -87,7 +88,8 @@ class Bias(BinaryMetric):
         :returns: The difference between the reference and target datasets.
         :rtype: :class:`numpy.ndarray`
         '''
-        return calc_bias(target_dataset.values,ref_dataset.values) 
+        return calc_bias(target_dataset.values, ref_dataset.values)
+
 
 class SpatialPatternTaylorDiagram(BinaryMetric):
     ''' Calculate the target to reference ratio of spatial standard deviation and pattern correlation'''
@@ -97,7 +99,7 @@ class SpatialPatternTaylorDiagram(BinaryMetric):
 
         .. note::
            Overrides BinaryMetric.run() 
-        
+
         :param ref_dataset: The reference dataset to use in this metric run.
         :type ref_dataset: :class:`dataset.Dataset`
 
@@ -148,7 +150,7 @@ class StdDevRatio(BinaryMetric):
 
         :returns: The standard deviation ratio of the reference and target
         '''
-       
+
         return calc_stddev_ratio(target_dataset.values, ref_dataset.values)
 
 
@@ -172,7 +174,8 @@ class PatternCorrelation(BinaryMetric):
         '''
         # stats.pearsonr returns correlation_coefficient, 2-tailed p-value
         # We only care about the correlation coefficient
-        # Docs at http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html
+        # Docs at
+        # http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html
 
         return calc_correlation(target_dataset.values, ref_dataset.values)
 
@@ -205,9 +208,9 @@ class TemporalCorrelation(BinaryMetric):
         for i in numpy.arange(num_lats):
             for j in numpy.arange(num_lons):
                 coefficients[i, j] = calc_correlation(
-                        target_dataset.values[:, i, j],
-                        reference_dataset.values[:, i, j])
-        return coefficients 
+                    target_dataset.values[:, i, j],
+                    reference_dataset.values[:, i, j])
+        return coefficients
 
 
 class TemporalMeanBias(BinaryMetric):
@@ -229,7 +232,8 @@ class TemporalMeanBias(BinaryMetric):
         :returns: The mean bias between a reference and target dataset over time.
         '''
 
-        return calc_bias(target_dataset.values,ref_dataset.values, average_over_time=True) 
+        return calc_bias(target_dataset.values, ref_dataset.values, average_over_time=True)
+
 
 class RMSError(BinaryMetric):
     '''Calculate the Root Mean Square Difference (RMS Error), with the mean
@@ -255,7 +259,8 @@ class RMSError(BinaryMetric):
 
         return calc_rmse(target_dataset.values, reference_dataset.values)
 
-def calc_bias(target_array, reference_array, average_over_time = False):
+
+def calc_bias(target_array, reference_array, average_over_time=False):
     ''' Calculate difference between two arrays
 
     :param target_array: an array to be evaluated, as model output
@@ -270,19 +275,20 @@ def calc_bias(target_array, reference_array, average_over_time = False):
     :returns: Biases array of the target dataset
     :rtype: :class:'numpy.ma.core.MaskedArray'
     '''
-    
+
     bias = target_array - reference_array
     if average_over_time:
         return ma.average(bias, axis=0)
     else:
         return bias
 
+
 def calc_stddev(array, axis=None):
     ''' Calculate a sample standard deviation of an array along the array
 
     :param array: an array to calculate sample standard deviation
     :type array: :class:'numpy.ma.core.MaskedArray'
-    
+
     :param axis: Axis along which the sample standard deviation is computed.
     :type axis: 'int'
 
@@ -294,7 +300,7 @@ def calc_stddev(array, axis=None):
         return ma.std(array, axis=axis, ddof=1)
     else:
         return ma.std(array, ddof=1)
-        
+
 
 def calc_stddev_ratio(target_array, reference_array):
     ''' Calculate ratio of standard deivations of the two arrays
@@ -312,7 +318,8 @@ def calc_stddev_ratio(target_array, reference_array):
     :rtype: :class:'float'
     '''
 
-    return calc_stddev(target_array)/calc_stddev(reference_array)
+    return calc_stddev(target_array) / calc_stddev(reference_array)
+
 
 def calc_correlation(target_array, reference_array):
     '''Calculate the correlation coefficient between two arrays.
@@ -327,8 +334,9 @@ def calc_correlation(target_array, reference_array):
     :rtype: :class:'numpy.ma.core.MaskedArray'
     '''
 
-    return mstats.pearsonr(reference_array.flatten(), target_array.flatten())[0]  
-       
+    return mstats.pearsonr(reference_array.flatten(), target_array.flatten())[0]
+
+
 def calc_rmse(target_array, reference_array):
     ''' Calculate ratio of standard deivations of the two arrays
 
@@ -345,7 +353,8 @@ def calc_rmse(target_array, reference_array):
     :rtype: :class:'float'
     '''
 
-    return (ma.mean((calc_bias(target_array, reference_array))**2))**0.5 
+    return (ma.mean((calc_bias(target_array, reference_array))**2))**0.5
+
 
 def calc_histogram_overlap(hist1, hist2):
     ''' from Lee et al. (2014)
@@ -354,18 +363,19 @@ def calc_histogram_overlap(hist1, hist2):
     :param hist2: a histogram array with the same size as hist1
     :type hist2: :class:'numpy.ndarray'
     '''
-   
+
     hist1_flat = hist1.flatten()
     hist2_flat = hist2.flatten()
 
     if len(hist1_flat) != len(hist2_flat):
         err = "The two histograms have different sizes"
-        raise ValueError(err) 
+        raise ValueError(err)
     overlap = 0.
     for ii in len(hist1_flat):
         overlap = overlap + numpy.min(hist1_flat[ii], hist2_flat[ii])
     return overlap
 
+
 def calc_joint_histogram(data_array1, data_array2, bins_for_data1, bins_for_data2):
     ''' Calculate a joint histogram of two variables in data_array1 and data_array2
     :param data_array1: the first variable
@@ -377,17 +387,20 @@ def calc_joint_histogram(data_array1, data_array2, bins_for_data1, bins_for_data
     :param bins_for_data2: histogram bin edges for data_array2
     :type bins_for_data2: :class:'numpy.ndarray'
     '''
-    if ma.count_masked(data_array1)!=0 or ma.count_masked(data_array2)!=0:
-        index = numpy.where((data_array1.mask == False) & (data_array2.mask==False)) 
+    if ma.count_masked(data_array1) != 0 or ma.count_masked(data_array2) != 0:
+        index = numpy.where((data_array1.mask == False) &
+                            (data_array2.mask == False))
         new_array1 = data_array1[index]
-        new_array2 = data_array2[index] 
+        new_array2 = data_array2[index]
     else:
         new_array1 = data_array1.flatten()
         new_array2 = data_array2.flatten()
 
-    histo2d, xedge, yedge = numpy.histogram2d(new_array1, new_array2, bins=[bins_for_data1, bins_for_data2])
+    histo2d, xedge, yedge = numpy.histogram2d(new_array1, new_array2, bins=[
+                                              bins_for_data1, bins_for_data2])
     return histo2d
- 
+
+
 def wet_spell_analysis(reference_array, threshold=0.1, nyear=1, dt=3.):
     ''' Characterize wet spells using sub-daily (hourly) data
 
@@ -405,29 +418,32 @@ def wet_spell_analysis(reference_array, threshold=0.1, nyear=1, dt=3.):
     '''
     nt = reference_array.shape[0]
     if reference_array.ndim == 3:
-        reshaped_array = reference_array.reshape[nt, reference_array.size/nt]
+        reshaped_array = reference_array.reshape[nt, reference_array.size / nt]
     else:
         reshaped_array = reference_array
-    xy_indices = np.where(reshaped_array.mask[0,:] == False)[0]
+    xy_indices = np.where(reshaped_array.mask[0, :] == False)[0]
 
-    nt_each_year = nt/nyear 
+    nt_each_year = nt / nyear
     spell_duration = []
     peak_rainfall = []
     total_rainfall = []
-   
+
     for index in xy_indices:
         for iyear in np.arange(nyear):
-            data0_temp = reshaped_array[nt_each_year*iyear:nt_each_year*(iyear+1),
-                                        index] 
-            # time indices when precipitation rate is smaller than the threshold [mm/hr]
-            t_index = np.where((data0_temp <= threshold) & (data0_temp.mask ==False))[0]
+            data0_temp = reshaped_array[nt_each_year * iyear:nt_each_year * (iyear + 1),
+                                        index]
+            # time indices when precipitation rate is smaller than the
+            # threshold [mm/hr]
+            t_index = np.where((data0_temp <= threshold) &
+                               (data0_temp.mask == False))[0]
             t_index = np.insert(t_index, 0, 0)
-            t_index = t_index + nt_each_year*iyear
-            for it in np.arange(t_index.size-1):
-                if t_index[it+1] - t_index[it] >1:
-                    data1_temp = data0_temp[t_index[it]+1:t_index[it+1]]
+            t_index = t_index + nt_each_year * iyear
+            for it in np.arange(t_index.size - 1):
+                if t_index[it + 1] - t_index[it] > 1:
+                    data1_temp = data0_temp[t_index[it] + 1:t_index[it + 1]]
                     if not ma.is_masked(data1_temp):
-                        spell_duration.append((t_index[it+1]-t_index[it]-1)*dt)
+                        spell_duration.append(
+                            (t_index[it + 1] - t_index[it] - 1) * dt)
                         peak_rainfall.append(data1_temp.max())
                         total_rainfall.append(data1_temp.sum())
     return np.array(spell_duration), np.array(peak_rainfall), np.array(total_rainfall)

http://git-wip-us.apache.org/repos/asf/climate/blob/731419f8/ocw/plotter.py
----------------------------------------------------------------------
diff --git a/ocw/plotter.py b/ocw/plotter.py
index 72376bd..aedba6e 100755
--- a/ocw/plotter.py
+++ b/ocw/plotter.py
@@ -27,6 +27,7 @@ import numpy.ma as ma
 # Set the default colormap to coolwarm
 mpl.rc('image', cmap='coolwarm')
 
+
 def set_cmap(name):
     '''
     Sets the default colormap (eg when setting cmap=None in a function)
@@ -43,6 +44,7 @@ def set_cmap(name):
     cmap = plt.get_cmap(name)
     mpl.rc('image', cmap=cmap.name)
 
+
 def _nice_intervals(data, nlevs):
     '''
     Purpose::
@@ -63,15 +65,15 @@ def _nice_intervals(data, nlevs):
     data = data.ravel()
     mn = mstats.scoreatpercentile(data, 5)
     mx = mstats.scoreatpercentile(data, 95)
-    #if there min less than 0 and
-    # or max more than 0 
-    #put 0 in center of color bar
+    # if there min less than 0 and
+    # or max more than 0
+    # put 0 in center of color bar
     if mn < 0 and mx > 0:
         level = max(abs(mn), abs(mx))
         mnlvl = -1 * level
         mxlvl = level
-    #if min is larger than 0 then
-    #have color bar between min and max
+    # if min is larger than 0 then
+    # have color bar between min and max
     else:
         mnlvl = mn
         mxlvl = mx
@@ -102,7 +104,8 @@ def _best_grid_shape(nplots, oldshape):
     size = nrows * ncols
     diff = size - nplots
     if diff < 0:
-        raise ValueError('gridshape=(%d, %d): Cannot fit enough subplots for data' %(nrows, ncols))
+        raise ValueError(
+            'gridshape=(%d, %d): Cannot fit enough subplots for data' % (nrows, ncols))
     else:
         # If the user enters an excessively large number of
         # rows and columns for gridshape, automatically
@@ -120,6 +123,7 @@ def _best_grid_shape(nplots, oldshape):
         newshape = nrows, ncols
         return newshape
 
+
 def _fig_size(gridshape, aspect=None):
     '''
     Purpose::
@@ -149,8 +153,9 @@ def _fig_size(gridshape, aspect=None):
 
     return width, height
 
+
 def draw_taylor_diagram(results, names, refname, fname, fmt='png',
-                        gridshape=(1,1), ptitle='', subtitles=None,
+                        gridshape=(1, 1), ptitle='', subtitles=None,
                         pos='upper right', frameon=True, radmax=1.5):
     ''' Draw a Taylor diagram.
 
@@ -176,7 +181,7 @@ def draw_taylor_diagram(results, names, refname, fname, fmt='png',
 
     :param ptitle: (Optional) plot title.
     :type ptitle: :mod:`string`
-    
+
     :param subtitles: (Optional) list of strings specifying the title for each
         subplot.
     :type subtitles: :class:`list` of :mod:`string`
@@ -212,7 +217,8 @@ def draw_taylor_diagram(results, names, refname, fname, fmt='png',
         rect = str(rect[0]) + str(rect[1]) + str(rect[2])
 
         # Create Taylor Diagram object
-        dia = TaylorDiagram(1, fig=fig, rect=rect, label=refname, radmax=radmax)
+        dia = TaylorDiagram(1, fig=fig, rect=rect,
+                            label=refname, radmax=radmax)
         for i, (stddev, corrcoef) in enumerate(data):
             dia.add_sample(stddev, corrcoef, marker='$%d$' % (i + 1), ms=6,
                            label=names[i])
@@ -230,9 +236,10 @@ def draw_taylor_diagram(results, names, refname, fname, fmt='png',
     # Add title and save the figure
     fig.suptitle(ptitle)
     plt.tight_layout(.05, .05)
-    fig.savefig('%s.%s' %(fname, fmt), bbox_inches='tight', dpi=fig.dpi)
+    fig.savefig('%s.%s' % (fname, fmt), bbox_inches='tight', dpi=fig.dpi)
     fig.clf()
 
+
 def draw_subregions(subregions, lats, lons, fname, fmt='png', ptitle='',
                     parallels=None, meridians=None, subregion_masks=None):
     ''' Draw subregion domain(s) on a map.
@@ -301,9 +308,11 @@ def draw_subregions(subregions, lats, lons, fname, fmt='png', ptitle='',
         dlatlon = np.round(length, decimals=-1)
 
     if meridians is None:
-        meridians = np.r_[np.arange(0, -180, -dlatlon)[::-1], np.arange(0, 180, dlatlon)]
+        meridians = np.r_[
+            np.arange(0, -180, -dlatlon)[::-1], np.arange(0, 180, dlatlon)]
     if parallels is None:
-        parallels = np.r_[np.arange(0, -90, -dlatlon)[::-1], np.arange(0, 90, dlatlon)]
+        parallels = np.r_[np.arange(0, -90, -dlatlon)
+                          [::-1], np.arange(0, 90, dlatlon)]
 
     # Draw parallels / meridians
     m.drawmeridians(meridians, labels=[0, 0, 0, 1], linewidth=.75, fontsize=10)
@@ -336,15 +345,17 @@ def draw_subregions(subregions, lats, lons, fname, fmt='png', ptitle='',
 
         # Label the subregion
         xm, ym = x.mean(), y.mean()
-        m.plot(xm, ym, marker='$%s$' %("R"+str(i+1)), markersize=12, color='k')
+        m.plot(xm, ym, marker='$%s$' %
+               ("R" + str(i + 1)), markersize=12, color='k')
 
     # Add the title
     ax.set_title(ptitle)
 
     # Save the figure
-    fig.savefig('%s.%s' %(fname, fmt), bbox_inches='tight', dpi=fig.dpi)
+    fig.savefig('%s.%s' % (fname, fmt), bbox_inches='tight', dpi=fig.dpi)
     fig.clf()
 
+
 def draw_time_series(results, times, labels, fname, fmt='png', gridshape=(1, 1),
                      xlabel='', ylabel='', ptitle='', subtitles=None,
                      label_month=False, yscale='linear', aspect=None):
@@ -372,7 +383,7 @@ def draw_time_series(results, times, labels, fname, fmt='png', gridshape=(1, 1),
 
     :param xlabel: (Optional) x-axis title.
     :type xlabel: :mod:`string`
-    
+
     :param ylabel: (Optional) y-axis title.
     :type ylabel: :mod:`string`
 
@@ -381,7 +392,7 @@ def draw_time_series(results, times, labels, fname, fmt='png', gridshape=(1, 1),
 
     :param subtitles: (Optional) list of titles for each subplot.
     :type subtitles: :class:`list` of :mod:`string`
-    
+
     :param label_month: (Optional) flag to toggle drawing month labels on the
         x-axis.
     :type label_month: :class:`bool`
@@ -389,7 +400,7 @@ def draw_time_series(results, times, labels, fname, fmt='png', gridshape=(1, 1),
     :param yscale: (Optional) y-axis scale value, 'linear' for linear and 'log'
         for log base 10.
     :type yscale: :mod:`string`
-    
+
     :param aspect: (Optional) approximate aspect ratio of each subplot
         (width / height). Default is 8.5 / 5.5
     :type aspect: :class:`float`
@@ -458,7 +469,8 @@ def draw_time_series(results, times, labels, fname, fmt='png', gridshape=(1, 1),
 
     # Create a master axes rectangle for figure wide labels
     fax = fig.add_subplot(111, frameon=False)
-    fax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
+    fax.tick_params(labelcolor='none', top='off',
+                    bottom='off', left='off', right='off')
     fax.set_ylabel(ylabel)
     fax.set_title(ptitle, fontsize=16)
     fax.title.set_y(1.04)
@@ -470,7 +482,7 @@ def draw_time_series(results, times, labels, fname, fmt='png', gridshape=(1, 1),
     cax.set_xticks([])
     cax.set_yticks([])
     cax.legend((lines), labels, loc='upper center', ncol=10, fontsize='small',
-                   mode='expand', frameon=False)
+               mode='expand', frameon=False)
 
     # Note that due to weird behavior by axes_grid, it is more convenient to
     # place the x-axis label relative to the colorbar axes instead of the
@@ -485,11 +497,12 @@ def draw_time_series(results, times, labels, fname, fmt='png', gridshape=(1, 1),
             xtick.set_rotation(30)
 
     # Save the figure
-    fig.savefig('%s.%s' %(fname, fmt), bbox_inches='tight', dpi=fig.dpi)
+    fig.savefig('%s.%s' % (fname, fmt), bbox_inches='tight', dpi=fig.dpi)
     fig.clf()
 
-def draw_barchart(results, yvalues, fname, ptitle='', fmt='png', 
-                     xlabel='', ylabel=''):
+
+def draw_barchart(results, yvalues, fname, ptitle='', fmt='png',
+                  xlabel='', ylabel=''):
     ''' Draw a barchart.
 
     :param results: 1D array of  data.
@@ -509,36 +522,38 @@ def draw_barchart(results, yvalues, fname, ptitle='', fmt='png',
 
     :param xlabel: (Optional) x-axis title.
     :type xlabel: :mod:`string`
-    
+
     :param ylabel: (Optional) y-axis title.
     :type ylabel: :mod:`string`
 
     '''
 
-    y_pos = list(range(len(yvalues))) 
-    fig = plt.figure() 
+    y_pos = list(range(len(yvalues)))
+    fig = plt.figure()
     fig.set_size_inches((11., 8.5))
     fig.dpi = 300
     ax = plt.subplot(111)
     plt.barh(y_pos, results, align="center", height=0.8, linewidth=0)
     plt.yticks(y_pos, yvalues)
-    plt.tick_params(axis="both", which="both", bottom="on", top="off",labelbottom="on", left="off", right="off", labelleft="on")
+    plt.tick_params(axis="both", which="both", bottom="on", top="off",
+                    labelbottom="on", left="off", right="off", labelleft="on")
     ax.spines["top"].set_visible(False)
     ax.spines["right"].set_visible(False)
 
-    ymin = min(y_pos) 
+    ymin = min(y_pos)
     ymax = max(y_pos)
-    ymin = min((ymin - ((ymax - ymin) * 0.1)/2),0.5) 
+    ymin = min((ymin - ((ymax - ymin) * 0.1) / 2), 0.5)
     ymax = ymax + ((ymax - ymin) * 0.1)
     ax.set_ylim((ymin, ymax))
     plt.xlabel(xlabel)
     plt.tight_layout()
-       
+
     # Save the figure
-    fig.savefig('%s.%s' %(fname, fmt), bbox_inches='tight', dpi=fig.dpi)
+    fig.savefig('%s.%s' % (fname, fmt), bbox_inches='tight', dpi=fig.dpi)
     fig.clf()
 
-def draw_marker_on_map(lat, lon, fname, fmt='png', location_name=' ',gridshape=(1,1)):
+
+def draw_marker_on_map(lat, lon, fname, fmt='png', location_name=' ', gridshape=(1, 1)):
     '''
     Purpose::
         Draw a marker on a map
@@ -547,31 +562,34 @@ def draw_marker_on_map(lat, lon, fname, fmt='png', location_name=' ',gridshape=(
         lat - latitude for plotting a marker
         lon - longitude for plotting a marker
         fname  - a string specifying the filename of the plot
-    '''   
+    '''
     fig = plt.figure()
     fig.dpi = 300
     ax = fig.add_subplot(111)
-    
-    m = Basemap(projection='cyl', resolution = 'c', llcrnrlat =lat-30, urcrnrlat = lat+30, llcrnrlon = lon-60, urcrnrlon = lon+60)
+
+    m = Basemap(projection='cyl', resolution='c', llcrnrlat=lat -
+                30, urcrnrlat=lat + 30, llcrnrlon=lon - 60, urcrnrlon=lon + 60)
     m.drawcoastlines(linewidth=1)
     m.drawcountries(linewidth=1)
     m.drawmapboundary(fill_color='aqua')
-    m.fillcontinents(color='coral',lake_color='aqua')
+    m.fillcontinents(color='coral', lake_color='aqua')
     m.ax = ax
-   
-    xpt,ypt = m(lon,lat)
-    m.plot(xpt,ypt,'bo')  # plot a blue dot there
+
+    xpt, ypt = m(lon, lat)
+    m.plot(xpt, ypt, 'bo')  # plot a blue dot there
     # put some text next to the dot, offset a little bit
     # (the offset is in map projection coordinates)
-    plt.text(xpt+0.5, ypt+1.5,location_name+'\n(lon: %5.1f, lat: %3.1f)' % (lon, lat)) 
-                       
-    fig.savefig('%s.%s' %(fname, fmt), bbox_inches='tight', dpi=fig.dpi)
+    plt.text(xpt + 0.5, ypt + 1.5, location_name +
+             '\n(lon: %5.1f, lat: %3.1f)' % (lon, lat))
+
+    fig.savefig('%s.%s' % (fname, fmt), bbox_inches='tight', dpi=fig.dpi)
     fig.clf()
 
+
 def draw_contour_map(dataset, lats, lons, fname, fmt='png', gridshape=(1, 1),
                      clabel='', ptitle='', subtitles=None, cmap=None,
                      clevs=None, nlevs=10, parallels=None, meridians=None,
-                     extend='neither', aspect=8.5/2.5):
+                     extend='neither', aspect=8.5 / 2.5):
     ''' Draw a multiple panel contour map plot.
 
     :param dataset: 3D array of data to be plotted with shape (nT, nLat, nLon).
@@ -609,7 +627,7 @@ def draw_contour_map(dataset, lats, lons, fname, fmt='png', gridshape=(1, 1),
 
     :param clevs: (Optional) contour levels values.
     :type clevs: :class:`list` of :class:`int` or :class:`float`
-    
+
     :param nlevs: (Optional) target number of contour levels if clevs is None.
     :type nlevs: :class:`int`
 
@@ -667,8 +685,8 @@ def draw_contour_map(dataset, lats, lons, fname, fmt='png', gridshape=(1, 1),
     lonmax = lons.max()
     latmin = lats.min()
     latmax = lats.max()
-    m = Basemap(projection = 'cyl', llcrnrlat = latmin, urcrnrlat = latmax,
-                llcrnrlon = lonmin, urcrnrlon = lonmax, resolution = 'l')
+    m = Basemap(projection='cyl', llcrnrlat=latmin, urcrnrlat=latmax,
+                llcrnrlon=lonmin, urcrnrlon=lonmax, resolution='l')
 
     # Convert lats and lons to projection coordinates
     if lats.ndim == 1 and lons.ndim == 1:
@@ -692,11 +710,13 @@ def draw_contour_map(dataset, lats, lons, fname, fmt='png', gridshape=(1, 1),
     elif length <= 5:
         dlatlon = 5
     else:
-        dlatlon = np.round(length, decimals = -1)
+        dlatlon = np.round(length, decimals=-1)
     if meridians is None:
-        meridians = np.r_[np.arange(0, -180, -dlatlon)[::-1], np.arange(0, 180, dlatlon)]
+        meridians = np.r_[
+            np.arange(0, -180, -dlatlon)[::-1], np.arange(0, 180, dlatlon)]
     if parallels is None:
-        parallels = np.r_[np.arange(0, -90, -dlatlon)[::-1], np.arange(0, 90, dlatlon)]
+        parallels = np.r_[np.arange(0, -90, -dlatlon)
+                          [::-1], np.arange(0, 90, dlatlon)]
 
     x, y = m(lons, lats)
     for i, ax in enumerate(grid):
@@ -720,7 +740,8 @@ def draw_contour_map(dataset, lats, lons, fname, fmt='png', gridshape=(1, 1),
             ax.set_title(subtitles[i], fontsize='small')
 
     # Add colorbar
-    cbar = fig.colorbar(cs, cax=ax.cax, drawedges=True, orientation='horizontal', extendfrac='auto')
+    cbar = fig.colorbar(cs, cax=ax.cax, drawedges=True,
+                        orientation='horizontal', extendfrac='auto')
     cbar.set_label(clabel)
     cbar.set_ticks(clevs)
     cbar.ax.tick_params(labelsize=6)
@@ -739,9 +760,10 @@ def draw_contour_map(dataset, lats, lons, fname, fmt='png', gridshape=(1, 1),
 
     # Add figure title
     fig.suptitle(ptitle, y=ymax + .06, fontsize=16)
-    fig.savefig('%s.%s' %(fname, fmt), bbox_inches='tight', dpi=fig.dpi)
+    fig.savefig('%s.%s' % (fname, fmt), bbox_inches='tight', dpi=fig.dpi)
     fig.clf()
 
+
 def draw_portrait_diagram(results, rowlabels, collabels, fname, fmt='png',
                           gridshape=(1, 1), xlabel='', ylabel='', clabel='',
                           ptitle='', subtitles=None, cmap=None, clevs=None,
@@ -761,7 +783,7 @@ def draw_portrait_diagram(results, rowlabels, collabels, fname, fmt='png',
 
     :param fname: Filename of the plot.
     :type fname: :mod:`string`
-    
+
     :param fmt: (Optional) filetype for the output.
     :type fmt: :mod:`string`
 
@@ -817,7 +839,8 @@ def draw_portrait_diagram(results, rowlabels, collabels, fname, fmt='png',
     # the input data too
     prows, pcols = results.shape[1:]
     if len(rowlabels) != prows or len(collabels) != pcols:
-        raise ValueError('rowlabels and collabels must have %d and %d elements respectively' %(prows, pcols))
+        raise ValueError(
+            'rowlabels and collabels must have %d and %d elements respectively' % (prows, pcols))
 
     # Set up the figure
     width, height = _fig_size(gridshape)
@@ -853,7 +876,8 @@ def draw_portrait_diagram(results, rowlabels, collabels, fname, fmt='png',
     # Do the plotting
     for i, ax in enumerate(grid):
         data = results[i]
-        cs = ax.matshow(data, cmap=cmap, aspect='auto', origin='lower', norm=norm)
+        cs = ax.matshow(data, cmap=cmap, aspect='auto',
+                        origin='lower', norm=norm)
 
         # Add grid lines
         ax.xaxis.set_ticks(np.arange(data.shape[1] + 1))
@@ -873,11 +897,12 @@ def draw_portrait_diagram(results, rowlabels, collabels, fname, fmt='png',
         # Add axes title
         if subtitles is not None:
             ax.text(0.5, 1.04, subtitles[i], va='center', ha='center',
-                    transform = ax.transAxes, fontsize='small')
+                    transform=ax.transAxes, fontsize='small')
 
     # Create a master axes rectangle for figure wide labels
     fax = fig.add_subplot(111, frameon=False)
-    fax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
+    fax.tick_params(labelcolor='none', top='off',
+                    bottom='off', left='off', right='off')
     fax.set_ylabel(ylabel)
     fax.set_title(ptitle, fontsize=16)
     fax.title.set_y(1.04)
@@ -899,9 +924,10 @@ def draw_portrait_diagram(results, rowlabels, collabels, fname, fmt='png',
     cax.title.set_y(1.5)
 
     # Save the figure
-    fig.savefig('%s.%s' %(fname, fmt), bbox_inches='tight', dpi=fig.dpi)
+    fig.savefig('%s.%s' % (fname, fmt), bbox_inches='tight', dpi=fig.dpi)
     fig.clf()
 
+
 class TaylorDiagram(object):
     """ Taylor diagram helper class
 
@@ -929,18 +955,18 @@ class TaylorDiagram(object):
         tr = PolarAxes.PolarTransform()
 
         # Correlation labels
-        rlocs = np.concatenate((np.arange(10)/10.,[0.95,0.99]))
+        rlocs = np.concatenate((np.arange(10) / 10., [0.95, 0.99]))
         tlocs = np.arccos(rlocs)        # Conversion to polar angles
         gl1 = GF.FixedLocator(tlocs)    # Positions
-        tf1 = GF.DictFormatter(dict(zip(tlocs, map(str,rlocs))))
+        tf1 = GF.DictFormatter(dict(zip(tlocs, map(str, rlocs))))
 
         # Standard deviation axis extent
         self.smin = 0
-        self.smax = radmax*self.refstd
+        self.smax = radmax * self.refstd
 
         ghelper = FA.GridHelperCurveLinear(tr,
-                                           extremes=(0,np.pi/2, # 1st quadrant
-                                                     self.smin,self.smax),
+                                           extremes=(0, np.pi / 2,  # 1st quadrant
+                                                     self.smin, self.smax),
                                            grid_locator1=gl1,
                                            tick_formatter1=tf1,
                                            )
@@ -958,7 +984,7 @@ class TaylorDiagram(object):
         ax.axis["top"].label.set_axis_direction("top")
         ax.axis["top"].label.set_text("Correlation")
 
-        ax.axis["left"].set_axis_direction("bottom") # "X axis"
+        ax.axis["left"].set_axis_direction("bottom")  # "X axis"
         ax.axis["left"].label.set_text("Standard deviation")
 
         ax.axis["right"].set_axis_direction("top")   # "Y axis"
@@ -977,9 +1003,9 @@ class TaylorDiagram(object):
         # print "Reference std:", self.refstd
         l, = self.ax.plot([0], self.refstd, 'k*',
                           ls='', ms=10, label=label)
-        t = np.linspace(0, np.pi/2)
+        t = np.linspace(0, np.pi / 2)
         r = np.zeros_like(t) + self.refstd
-        self.ax.plot(t,r, 'k--', label='_')
+        self.ax.plot(t, r, 'k--', label='_')
 
         # Collect sample points for latter use (e.g. legend)
         self.samplePoints = [l]
@@ -990,7 +1016,7 @@ class TaylorDiagram(object):
         command."""
 
         l, = self.ax.plot(np.arccos(corrcoef), stddev,
-                          *args, **kwargs) # (theta,radius)
+                          *args, **kwargs)  # (theta,radius)
         self.samplePoints.append(l)
 
         return l
@@ -998,10 +1024,11 @@ class TaylorDiagram(object):
     def add_rms_contours(self, levels=5, **kwargs):
         """Add constant centered RMS difference contours."""
 
-        rs,ts = np.meshgrid(np.linspace(self.smin,self.smax),
-                            np.linspace(0,np.pi/2))
+        rs, ts = np.meshgrid(np.linspace(self.smin, self.smax),
+                             np.linspace(0, np.pi / 2))
         # Compute centered RMS difference
-        rms = np.sqrt(self.refstd**2 + rs**2 - 2*self.refstd*rs*np.cos(ts))
+        rms = np.sqrt(self.refstd**2 + rs**2 - 2 *
+                      self.refstd * rs * np.cos(ts))
 
         contours = self.ax.contour(ts, rs, rms, levels, **kwargs)
 
@@ -1011,16 +1038,17 @@ class TaylorDiagram(object):
 
         t = np.linspace(np.arccos(corr1), np.arccos(corr2))
         r = np.zeros_like(t) + std
-        return self.ax.plot(t,r,'red', linewidth=2)
+        return self.ax.plot(t, r, 'red', linewidth=2)
 
-    def add_contours(self,std1,corr1,std2,corr2, **kwargs):
+    def add_contours(self, std1, corr1, std2, corr2, **kwargs):
         """Add a line between two points
         [std1, corr1] and [std2, corr2]"""
 
         t = np.linspace(np.arccos(corr1), np.arccos(corr2))
         r = np.linspace(std1, std2)
 
-        return self.ax.plot(t,r,'red',linewidth=2)
+        return self.ax.plot(t, r, 'red', linewidth=2)
+
 
 def draw_histogram(dataset_array, data_names, fname, fmt='png', nbins=10):
     '''
@@ -1032,25 +1060,25 @@ def draw_histogram(dataset_array, data_names, fname, fmt='png', nbins=10):
         data_names    - a list of data names  ['name1','name2',....]
         fname  - a string specifying the filename of the plot
         bins - number of bins
-    '''    
+    '''
     fig = plt.figure()
     fig.dpi = 300
     ndata = len(dataset_array)
-   
+
     data_min = 500.
     data_max = 0.
- 
+
     for data in dataset_array:
-        data_min = np.min([data_min,data.min()]) 
-        data_max = np.max([data_max,data.max()]) 
+        data_min = np.min([data_min, data.min()])
+        data_max = np.max([data_max, data.max()])
 
-    bins = np.linspace(np.round(data_min), np.round(data_max+1), nbins)
-    for idata,data in enumerate(dataset_array):
-        ax = fig.add_subplot(ndata, 1, idata+1)
-        ax.hist(data, bins, alpha = 0.5, label=data_names[idata], normed = True)
+    bins = np.linspace(np.round(data_min), np.round(data_max + 1), nbins)
+    for idata, data in enumerate(dataset_array):
+        ax = fig.add_subplot(ndata, 1, idata + 1)
+        ax.hist(data, bins, alpha=0.5, label=data_names[idata], normed=True)
         leg = ax.legend()
         leg.get_frame().set_alpha(0.5)
-        ax.set_xlim([data_min-(data_max-data_min)*0.15, data_max+(data_max-data_min)*0.15])
-        
-    fig.savefig('%s.%s' %(fname, fmt), bbox_inches='tight', dpi=fig.dpi)
- 
+        ax.set_xlim([data_min - (data_max - data_min) * 0.15,
+                     data_max + (data_max - data_min) * 0.15])
+
+    fig.savefig('%s.%s' % (fname, fmt), bbox_inches='tight', dpi=fig.dpi)

http://git-wip-us.apache.org/repos/asf/climate/blob/731419f8/ocw/statistical_downscaling.py
----------------------------------------------------------------------
diff --git a/ocw/statistical_downscaling.py b/ocw/statistical_downscaling.py
index 75e2adc..57013a0 100755
--- a/ocw/statistical_downscaling.py
+++ b/ocw/statistical_downscaling.py
@@ -20,7 +20,9 @@ import ocw.utils as utils
 import numpy as np
 from scipy.stats import percentileofscore, linregress
 
+
 class Downscaling:
+
     def __init__(self, ref_dataset, model_present, model_future):
         '''
         :param ref_dataset: The Dataset to use as the reference dataset (observation)
@@ -33,7 +35,7 @@ class Downscaling:
         self.ref_dataset = ref_dataset[~ref_dataset.mask].ravel()
         self.model_present = model_present.ravel()
         self.model_future = model_future.ravel()
-             
+
     description = "statistical downscaling methods"
 
     def Delta_addition(self):
@@ -41,12 +43,12 @@ class Downscaling:
            then add the difference to the observed distribution
 
         :returns: downscaled model_present and model_future
-        ''' 
-        ref = self.ref_dataset 
-        model_present = self.model_present 
-        model_future = self.model_future 
+        '''
+        ref = self.ref_dataset
+        model_present = self.model_present
+        model_future = self.model_future
 
-        return model_present, ref + np.mean(model_future-model_present)
+        return model_present, ref + np.mean(model_future - model_present)
 
     def Delta_correction(self):
         '''Calculate the mean difference between observation and present simulation,
@@ -58,7 +60,7 @@ class Downscaling:
         model_present = self.model_present
         model_future = self.model_future
 
-        return model_present+np.mean(ref) - np.mean(model_present), model_future + np.mean(ref) - np.mean(model_present)
+        return model_present + np.mean(ref) - np.mean(model_present), model_future + np.mean(ref) - np.mean(model_present)
 
     def Quantile_mapping(self):
         '''Remove the biases for each quantile value 
@@ -72,16 +74,16 @@ class Downscaling:
         model_future = self.model_future
         model_future_corrected = np.zeros(model_future.size)
 
-
         for ival, model_value in enumerate(model_present):
             percentile = percentileofscore(model_present, model_value)
-            model_present_corrected[ival] = np.percentile(ref, percentile) 
+            model_present_corrected[ival] = np.percentile(ref, percentile)
 
         for ival, model_value in enumerate(model_future):
             percentile = percentileofscore(model_future, model_value)
-            model_future_corrected[ival] = model_value + np.percentile(ref, percentile) - np.percentile(model_present, percentile) 
+            model_future_corrected[ival] = model_value + np.percentile(
+                ref, percentile) - np.percentile(model_present, percentile)
 
-        return model_present_corrected, model_future_corrected     
+        return model_present_corrected, model_future_corrected
 
     def Asynchronous_regression(self):
         '''Remove the biases by fitting a linear regression model with ordered observational and model datasets
@@ -94,18 +96,15 @@ class Downscaling:
         model_present = self.model_present
         model_present_sorted = np.sort(model_present)
         model_future = self.model_future
- 
-        ref = np.zeros(model_present.size)   # For linear regression, the size of reference data must be same as model data. 
+
+        # For linear regression, the size of reference data must be same as
+        # model data.
+        ref = np.zeros(model_present.size)
 
         for ival, model_value in enumerate(model_present_sorted):
             percentile = percentileofscore(model_present_sorted, model_value)
-            ref[ival] = np.percentile(ref_original, percentile)       
-
-        slope, intercept = linregress(model_present_sorted, ref)[0:2] 
-        
-        return model_present*slope+intercept, model_future*slope+intercept
-
-
-
+            ref[ival] = np.percentile(ref_original, percentile)
 
+        slope, intercept = linregress(model_present_sorted, ref)[0:2]
 
+        return model_present * slope + intercept, model_future * slope + intercept

http://git-wip-us.apache.org/repos/asf/climate/blob/731419f8/ocw/tests/test_dap.py
----------------------------------------------------------------------
diff --git a/ocw/tests/test_dap.py b/ocw/tests/test_dap.py
index 6ae7546..c302b6d 100644
--- a/ocw/tests/test_dap.py
+++ b/ocw/tests/test_dap.py
@@ -22,6 +22,7 @@ from ocw.dataset import Dataset
 
 
 class TestDap(unittest.TestCase):
+
     @classmethod
     def setUpClass(cls):
 


Mime
View raw message