diff --git a/notebooks/example-workflows/ecs-cmip6.ipynb b/notebooks/example-workflows/ecs-cmip6.ipynb index b575cb5..9d5988a 100644 --- a/notebooks/example-workflows/ecs-cmip6.ipynb +++ b/notebooks/example-workflows/ecs-cmip6.ipynb @@ -311,7 +311,7 @@ " which can lead to issues when merging.\"\"\"\n", " drop_vars = [vname for vname in ds.coords\n", " if (('_bounds') in vname ) or ('_bnds') in vname]\n", - " return ds.drop(drop_vars)\n", + " return ds.drop_vars(drop_vars)\n", "\n", "def open_dsets(df):\n", " \"\"\"Open datasets from cloud storage and return xarray dataset.\"\"\"\n", @@ -471,7 +471,7 @@ " # https://github.com/pydata/xarray/issues/2237#issuecomment-620961663\n", " dsets_ann_mean = [v[expt].pipe(global_mean)\n", " .swap_dims({'time': 'year'})\n", - " .drop('time')\n", + " .drop_vars('time')\n", " .coarsen(year=12).mean()\n", " for expt in expts]\n", "\n", @@ -645,14 +645,12 @@ "source": [ "ds_abrupt = ds_anom.sel(year=first_150_years, experiment_id=co2_option).reset_coords(drop=True)\n", "\n", - "def calc_ecs(ds):\n", - " # Some sources don't have all 150 years, drop those missing values.\n", - " a, b = np.polyfit(ds.tas.dropna(\"year\"),\n", - " ds.imbalance.dropna(\"year\"), 1)\n", + "def calc_ecs(tas, imb):\n", + " a, b = np.polyfit(tas, imb, 1)\n", " ecs = -1.0 * (b/a) # Change -1.0 to -0.5 if using 4xCO2\n", " return xr.DataArray(ecs)\n", "\n", - "ds_abrupt['ecs'] = ds_abrupt.groupby('source_id').apply(calc_ecs)\n", + "ds_abrupt['ecs'] = xr.apply_ufunc(calc_ecs, ds_abrupt.tas, ds_abrupt.imbalance, vectorize=True, input_core_dims=[['year'], ['year']])\n", "ds_abrupt.compute()" ] }, @@ -776,7 +774,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.9" + "version": "3.10.15" }, "nbdime-conflicts": { "local_diff": [