diff --git a/examples/storm-surge/ike/setplot.py b/examples/storm-surge/ike/setplot.py index 5784f9ce5..865093be4 100644 --- a/examples/storm-surge/ike/setplot.py +++ b/examples/storm-surge/ike/setplot.py @@ -155,38 +155,23 @@ def friction_after_axes(cd): plotfigure.show = True plotfigure.clf_each_gauge = True - # Set up for axes in this figure: plotaxes = plotfigure.new_plotaxes() - plotaxes.xlimits = [-2, 1] - # plotaxes.xlabel = "Days from landfall" - # plotaxes.ylabel = "Surface (m)" - plotaxes.ylimits = [-1, 5] - plotaxes.title = 'Surface' - - def gauge_afteraxes(cd): - - axes = plt.gca() - surgeplot.plot_landfall_gauge(cd.gaugesoln, axes) - - # Fix up plot - in particular fix time labels - axes.set_title('Station %s' % cd.gaugeno) - axes.set_xlabel('Days relative to landfall') - axes.set_ylabel('Surface (m)') - axes.set_xlim([-2, 1]) - axes.set_ylim([-1, 5]) - axes.set_xticks([-2, -1, 0, 1]) - axes.set_xticklabels([r"$-2$", r"$-1$", r"$0$", r"$1$"]) - axes.grid(True) - plotaxes.afteraxes = gauge_afteraxes - - # Plot surface as blue curve: + plotaxes.time_scale = 1 / (24 * 60**2) + plotaxes.grid = True + plotaxes.xlimits = 'auto' + plotaxes.ylimits = 'auto' + plotaxes.title = "Surface" + plotaxes.ylabel = "Surface (m)" + plotaxes.time_label = "Days relative to landfall" + plotitem = plotaxes.new_plotitem(plot_type='1d_plot') - # plotitem.plot_var = 3 - # plotitem.plotstyle = 'b-' - - # + plotitem.plot_var = surgeplot.gauge_surface + # Plot red area if gauge is dry + plotitem = plotaxes.new_plotitem(plot_type='1d_plot') + plotitem.plot_var = surgeplot.gauge_dry_regions + plotitem.kwargs = {"color":'lightcoral', "linewidth":5} + # Gauge Location Plot - # def gauge_location_afteraxes(cd): plt.subplots_adjust(left=0.12, bottom=0.06, right=0.97, top=0.97) surge_afteraxes(cd) diff --git a/examples/storm-surge/isaac/setplot.py b/examples/storm-surge/isaac/setplot.py index 5fb7aebc3..8fe715b15 100644 --- a/examples/storm-surge/isaac/setplot.py +++ b/examples/storm-surge/isaac/setplot.py @@ -1,20 +1,18 @@ - -from __future__ import absolute_import -from __future__ import print_function +#!/usr/bin/env python import os +import warnings +import datetime -import numpy +import numpy as np import matplotlib.pyplot as plt -import datetime import clawpack.visclaw.colormaps as colormap import clawpack.visclaw.gaugetools as gaugetools import clawpack.clawutil.data as clawutil import clawpack.amrclaw.data as amrclaw import clawpack.geoclaw.data as geodata - - +import clawpack.geoclaw.util as geoutil import clawpack.geoclaw.surge.plot as surgeplot try: @@ -88,9 +86,9 @@ def friction_after_axes(cd): regions = {"Gulf": {"xlimits": (clawdata.lower[0], clawdata.upper[0]), "ylimits": (clawdata.lower[1], clawdata.upper[1]), "figsize": (6.4, 4.8)}, - "Louisiana": {"xlimits": (-92, -83), - "ylimits": (27.5, 30.5), - "figsize": (8, 2.7)}} + "Louisiana": {"xlimits": (-92, -83), + "ylimits": (27.5, 30.5), + "figsize": (8, 2.7)}} for (name, region_dict) in regions.items(): @@ -175,40 +173,82 @@ def friction_after_axes(cd): # ======================================================================== # Figures for gauges # ======================================================================== + def plot_observed(current_data): + """Fetch and plot gauge data for gauges used.""" + + # Map GeoClaw gauge number to NOAA gauge number and location/name + gauge_mapping = {1: [8761724, "Grand Isle, LA"], + 2: [8760922, 'Pilots Station East, SW Pass, LA']} + + station_id, station_name = gauge_mapping[current_data.gaugesoln.id] + landfall_time = np.datetime64(datetime.datetime(2012, 8, 29, 0)) + begin_date = datetime.datetime(2012, 8, 27) + end_date = datetime.datetime(2012, 8, 31) + + # Fetch data if needed + date_time, water_level, tide = geoutil.fetch_noaa_tide_data(station_id, + begin_date, + end_date) + if water_level is None: + print("*** Could not fetch gauge {}.".format(station_id)) + else: + # Convert to seconds relative to landfall + t = (date_time - landfall_time) / np.timedelta64(1, 's') + t /= (24 * 60**2) + + # Detide + water_level -= tide + + # Plot data + ax = plt.gca() + ax.plot(t, water_level, color='lightgray', marker='x') + ax.set_title(station_name) + ax.legend(['Computed', "Observed"]) + + plotfigure = plotdata.new_plotfigure(name='Gauge Surfaces', figno=300, type='each_gauge') plotfigure.show = True plotfigure.clf_each_gauge = True - # Set up for axes in this figure: plotaxes = plotfigure.new_plotaxes() - plotaxes.xlimits = [-2, 1] - # plotaxes.xlabel = "Days from landfall" - # plotaxes.ylabel = "Surface (m)" - plotaxes.ylimits = [-1, 5] - plotaxes.title = 'Surface' - - def gauge_afteraxes(cd): - - axes = plt.gca() - landfall = 0. - surgeplot.plot_landfall_gauge(cd.gaugesoln, axes, landfall=landfall) - - # Fix up plot - in particular fix time labels - axes.set_title('Station %s' % cd.gaugeno) - axes.set_xlabel('Days relative to landfall') - axes.set_ylabel('Surface (m)') - axes.set_xlim([-2, 1]) - axes.set_ylim([-1, 5]) - axes.set_xticks([-2, -1, 0, 1]) - axes.set_xticklabels([r"$-2$", r"$-1$", r"$0$", r"$1$"]) - axes.grid(True) - plotaxes.afteraxes = gauge_afteraxes - - # Plot surface as blue curve: + plotaxes.time_scale = 1 / (24 * 60**2) + plotaxes.grid = True + plotaxes.xlimits = [-2, 1.5] + plotaxes.ylimits = [-0.25, 1] + plotaxes.title = "Surface" + plotaxes.ylabel = "Surface (m)" + plotaxes.time_label = "Days relative to landfall" + plotaxes.afteraxes = plot_observed + + plotitem = plotaxes.new_plotitem(plot_type='1d_plot') + plotitem.plot_var = surgeplot.gauge_surface + # Plot red area if gauge is dry plotitem = plotaxes.new_plotitem(plot_type='1d_plot') - plotitem.plot_var = 3 - plotitem.plotstyle = 'b-' + plotitem.plot_var = surgeplot.gauge_dry_regions + plotitem.kwargs = {"color":'lightcoral', "linewidth":5} + + # Gauge Location Plot + def gauge_location_afteraxes(cd): + plt.subplots_adjust(left=0.12, bottom=0.06, right=0.97, top=0.97) + surge_afteraxes(cd) + gaugetools.plot_gauge_locations(cd.plotdata, gaugenos='all', + format_string='ko', add_labels=True) + + plotfigure = plotdata.new_plotfigure(name="Gauge Locations") + plotfigure.show = True + + # Set up for axes in this figure: + plotaxes = plotfigure.new_plotaxes() + plotaxes.title = 'Gauge Locations' + plotaxes.scaled = True + plotaxes.xlimits = regions['Louisiana']['xlimits'] + plotaxes.ylimits = regions['Louisiana']['ylimits'] + plotaxes.afteraxes = gauge_location_afteraxes + surgeplot.add_surface_elevation(plotaxes, bounds=surface_limits) + surgeplot.add_land(plotaxes, bounds=[0.0, 20.0]) + plotaxes.plotitem_dict['surface'].amr_patchedges_show = [0] * 10 + plotaxes.plotitem_dict['land'].amr_patchedges_show = [0] * 10 # ----------------------------------------- # Parameters used only when creating html and/or latex hardcopy @@ -217,7 +257,7 @@ def gauge_afteraxes(cd): plotdata.printfigs = True # print figures plotdata.print_format = 'png' # file format plotdata.print_framenos = 'all' # list of frames to print - plotdata.print_gaugenos = [1, 2, 3, 4] # list of gauges to print + plotdata.print_gaugenos = 'all' # list of gauges to print plotdata.print_fignos = 'all' # list of figures to print plotdata.html = True # create html files of plots? plotdata.latex = True # create latex file of plots? diff --git a/examples/storm-surge/isaac/setrun.py b/examples/storm-surge/isaac/setrun.py index d6d63eb93..4e8299286 100644 --- a/examples/storm-surge/isaac/setrun.py +++ b/examples/storm-surge/isaac/setrun.py @@ -313,17 +313,16 @@ def setrun(claw_pkg='geoclaw'): regions = rundata.regiondata.regions # to specify regions of refinement append lines of the form # [minlevel,maxlevel,t1,t2,x1,x2,y1,y2] - # Gauges from Ike AWR paper (2011 Dawson et al) - rundata.gaugedata.gauges.append([1, -95.04, 29.07, - rundata.clawdata.t0, - rundata.clawdata.tfinal]) - rundata.gaugedata.gauges.append([2, -94.71, 29.28, - rundata.clawdata.t0, - rundata.clawdata.tfinal]) - rundata.gaugedata.gauges.append([3, -94.39, 29.49, + + # Pilots Station East, S.W. Pass, LA - 28°55.9429'N, 89°24.4445'W + # https://tidesandcurrents.noaa.gov/stationhome.html?id=8760922 + rundata.gaugedata.gauges.append([1, -89.40740833, 28.93238167, rundata.clawdata.t0, rundata.clawdata.tfinal]) - rundata.gaugedata.gauges.append([4, -94.13, 29.58, + + # Grand Isle, LA - 29°15.8761'N 89°57.4960'W + # https://tidesandcurrents.noaa.gov/stationhome.html?id=8761724 + rundata.gaugedata.gauges.append([2, -89.95826667, 29.26460167, rundata.clawdata.t0, rundata.clawdata.tfinal]) diff --git a/src/2d/bouss/Makefile.bouss b/src/2d/bouss/Makefile.bouss index 225e7a539..07a4e6c01 100644 --- a/src/2d/bouss/Makefile.bouss +++ b/src/2d/bouss/Makefile.bouss @@ -167,6 +167,6 @@ COMMON_SOURCES += \ $(BOUSSLIB)/resetBoussStuff.f \ $(BOUSSLIB)/simpleBound.f90 \ $(BOUSSLIB)/umfpack_support.f \ - $(BOUSSLIB)/rpn2_geoclaw.f \ - $(BOUSSLIB)/rpt2_geoclaw_sym.f \ - $(BOUSSLIB)/geoclaw_riemann_utils.f \ + $(CLAW)/riemann/src/rpn2_geoclaw.f \ + $(CLAW)/riemann/src/rpt2_geoclaw.f \ + $(CLAW)/riemann/src/geoclaw_riemann_utils.f \ diff --git a/src/2d/bouss/geoclaw_riemann_utils.f b/src/2d/bouss/geoclaw_riemann_utils.f deleted file mode 100644 index 4745cf43d..000000000 --- a/src/2d/bouss/geoclaw_riemann_utils.f +++ /dev/null @@ -1,658 +0,0 @@ -c----------------------------------------------------------------------- - subroutine riemann_aug_JCP(maxiter,meqn,mwaves,hL,hR,huL,huR, - & hvL,hvR,bL,bR,uL,uR,vL,vR,phiL,phiR,pL,pR,sE1,sE2,drytol,g,rho, - & sw,fw) - - ! solve shallow water equations given single left and right states - ! This solver is described in J. Comput. Phys. (6): 3089-3113, March 2008 - ! Augmented Riemann Solvers for the Shallow Equations with Steady States and Inundation - - ! To use the original solver call with maxiter=1. - - ! This solver allows iteration when maxiter > 1. The iteration seems to help with - ! instabilities that arise (with any solver) as flow becomes transcritical over variable topo - ! due to loss of hyperbolicity. - - implicit none - - !input - integer meqn,mwaves,maxiter - double precision fw(meqn,mwaves) - double precision sw(mwaves) - double precision hL,hR,huL,huR,bL,bR,uL,uR,phiL,phiR,sE1,sE2 - double precision hvL,hvR,vL,vR,pL,pR - double precision drytol,g,rho - - - !local - integer m,mw,k,iter - double precision A(3,3) - double precision r(3,3) - double precision lambda(3) - double precision del(3) - double precision beta(3) - - double precision delh,delhu,delphi,delb,delnorm - double precision rare1st,rare2st,sdelta,raremin,raremax - double precision criticaltol,convergencetol,raretol - double precision criticaltol_2, hustar_interface - double precision s1s2bar,s1s2tilde,hbar,hLstar,hRstar,hustar - double precision huRstar,huLstar,uRstar,uLstar,hstarHLL - double precision deldelh,deldelphi,delP - double precision s1m,s2m,hm - double precision det1,det2,det3,determinant - - logical rare1,rare2,rarecorrector,rarecorrectortest,sonic - - !determine del vectors - delh = hR-hL - delhu = huR-huL - delphi = phiR-phiL - delb = bR-bL - delP = pR - pL - delnorm = delh**2 + delphi**2 - - call riemanntype(hL,hR,uL,uR,hm,s1m,s2m,rare1,rare2, - & 1,drytol,g) - - - lambda(1)= min(sE1,s2m) !Modified Einfeldt speed - lambda(3)= max(sE2,s1m) !Modified Eindfeldt speed - sE1=lambda(1) - sE2=lambda(3) - lambda(2) = 0.d0 ! ### Fix to avoid uninitialized value in loop on mw -- Correct?? ### - - - hstarHLL = max((huL-huR+sE2*hR-sE1*hL)/(sE2-sE1),0.d0) ! middle state in an HLL solve - -c !determine the middle entropy corrector wave------------------------ - rarecorrectortest=.false. - rarecorrector=.false. - if (rarecorrectortest) then - sdelta=lambda(3)-lambda(1) - raremin = 0.5d0 - raremax = 0.9d0 - if (rare1.and.sE1*s1m.lt.0.d0) raremin=0.2d0 - if (rare2.and.sE2*s2m.lt.0.d0) raremin=0.2d0 - if (rare1.or.rare2) then - !see which rarefaction is larger - rare1st=3.d0*(sqrt(g*hL)-sqrt(g*hm)) - rare2st=3.d0*(sqrt(g*hR)-sqrt(g*hm)) - if (max(rare1st,rare2st).gt.raremin*sdelta.and. - & max(rare1st,rare2st).lt.raremax*sdelta) then - rarecorrector=.true. - if (rare1st.gt.rare2st) then - lambda(2)=s1m - elseif (rare2st.gt.rare1st) then - lambda(2)=s2m - else - lambda(2)=0.5d0*(s1m+s2m) - endif - endif - endif - if (hstarHLL.lt.min(hL,hR)/5.d0) rarecorrector=.false. - endif - -c ## Is this correct 2-wave when rarecorrector == .true. ?? - do mw=1,mwaves - r(1,mw)=1.d0 - r(2,mw)=lambda(mw) - r(3,mw)=(lambda(mw))**2 - enddo - if (.not.rarecorrector) then - lambda(2) = 0.5d0*(lambda(1)+lambda(3)) -c lambda(2) = max(min(0.5d0*(s1m+s2m),sE2),sE1) - r(1,2)=0.d0 - r(2,2)=0.d0 - r(3,2)=1.d0 - endif -c !--------------------------------------------------- - - -c !determine the steady state wave ------------------- - !criticaltol = 1.d-6 - ! MODIFIED: - criticaltol = max(drytol*g, 1d-6) - criticaltol_2 = sqrt(criticaltol) - deldelh = -delb - deldelphi = -0.5d0 * (hR + hL) * (g * delb + delp / rho) - -c !determine a few quanitites needed for steady state wave if iterated - hLstar=hL - hRstar=hR - uLstar=uL - uRstar=uR - huLstar=uLstar*hLstar - huRstar=uRstar*hRstar - - !iterate to better determine the steady state wave - convergencetol=1.d-6 - do iter=1,maxiter - !determine steady state wave (this will be subtracted from the delta vectors) - if (min(hLstar,hRstar).lt.drytol.and.rarecorrector) then - rarecorrector=.false. - hLstar=hL - hRstar=hR - uLstar=uL - uRstar=uR - huLstar=uLstar*hLstar - huRstar=uRstar*hRstar - lambda(2) = 0.5d0*(lambda(1)+lambda(3)) -c lambda(2) = max(min(0.5d0*(s1m+s2m),sE2),sE1) - r(1,2)=0.d0 - r(2,2)=0.d0 - r(3,2)=1.d0 - endif - - hbar = max(0.5d0*(hLstar+hRstar),0.d0) - s1s2bar = 0.25d0*(uLstar+uRstar)**2 - g*hbar - s1s2tilde= max(0.d0,uLstar*uRstar) - g*hbar - -c !find if sonic problem - ! MODIFIED from 5.3.1 version - sonic = .false. - if (abs(s1s2bar) <= criticaltol) then - sonic = .true. - else if (s1s2bar*s1s2tilde <= criticaltol**2) then - sonic = .true. - else if (s1s2bar*sE1*sE2 <= criticaltol**2) then - sonic = .true. - else if (min(abs(sE1),abs(sE2)) < criticaltol_2) then - sonic = .true. - else if (sE1 < criticaltol_2 .and. s1m > -criticaltol_2) then - sonic = .true. - else if (sE2 > -criticaltol_2 .and. s2m < criticaltol_2) then - sonic = .true. - else if ((uL+dsqrt(g*hL))*(uR+dsqrt(g*hR)) < 0.d0) then - sonic = .true. - else if ((uL- dsqrt(g*hL))*(uR- dsqrt(g*hR)) < 0.d0) then - sonic = .true. - end if - -c !find jump in h, deldelh - if (sonic) then - deldelh = -delb - else - deldelh = delb*g*hbar/s1s2bar - endif -c !find bounds in case of critical state resonance, or negative states - if (sE1.lt.-criticaltol.and.sE2.gt.criticaltol) then - deldelh = min(deldelh,hstarHLL*(sE2-sE1)/sE2) - deldelh = max(deldelh,hstarHLL*(sE2-sE1)/sE1) - elseif (sE1.ge.criticaltol) then - deldelh = min(deldelh,hstarHLL*(sE2-sE1)/sE1) - deldelh = max(deldelh,-hL) - elseif (sE2.le.-criticaltol) then - deldelh = min(deldelh,hR) - deldelh = max(deldelh,hstarHLL*(sE2-sE1)/sE2) - endif - -c ! adjust deldelh for well-balancing of atmospheric pressure difference - deldelh = deldelh - delP/(rho*g) - -c !find jump in phi, deldelphi - if (sonic) then - deldelphi = -g*hbar*delb - else - deldelphi = -delb*g*hbar*s1s2tilde/s1s2bar - endif -c !find bounds in case of critical state resonance, or negative states - deldelphi=min(deldelphi,g*max(-hLstar*delb,-hRstar*delb)) - deldelphi=max(deldelphi,g*min(-hLstar*delb,-hRstar*delb)) - deldelphi = deldelphi - hbar * delp / rho - - del(1)=delh-deldelh - del(2)=delhu - del(3)=delphi-deldelphi - -c !Determine determinant of eigenvector matrix======== - det1=r(1,1)*(r(2,2)*r(3,3)-r(2,3)*r(3,2)) - det2=r(1,2)*(r(2,1)*r(3,3)-r(2,3)*r(3,1)) - det3=r(1,3)*(r(2,1)*r(3,2)-r(2,2)*r(3,1)) - determinant=det1-det2+det3 - -c !solve for beta(k) using Cramers Rule================= - do k=1,3 - do mw=1,3 - A(1,mw)=r(1,mw) - A(2,mw)=r(2,mw) - A(3,mw)=r(3,mw) - enddo - A(1,k)=del(1) - A(2,k)=del(2) - A(3,k)=del(3) - det1=A(1,1)*(A(2,2)*A(3,3)-A(2,3)*A(3,2)) - det2=A(1,2)*(A(2,1)*A(3,3)-A(2,3)*A(3,1)) - det3=A(1,3)*(A(2,1)*A(3,2)-A(2,2)*A(3,1)) - beta(k)=(det1-det2+det3)/determinant - enddo - - !exit if things aren't changing - if (abs(del(1)**2+del(3)**2-delnorm).lt.convergencetol) exit - delnorm = del(1)**2+del(3)**2 - !find new states qLstar and qRstar on either side of interface - hLstar=hL - hRstar=hR - uLstar=uL - uRstar=uR - huLstar=uLstar*hLstar - huRstar=uRstar*hRstar - do mw=1,mwaves - if (lambda(mw).lt.0.d0) then - hLstar= hLstar + beta(mw)*r(1,mw) - huLstar= huLstar + beta(mw)*r(2,mw) - endif - enddo - do mw=mwaves,1,-1 - if (lambda(mw).gt.0.d0) then - hRstar= hRstar - beta(mw)*r(1,mw) - huRstar= huRstar - beta(mw)*r(2,mw) - endif - enddo - - if (hLstar.gt.drytol) then - uLstar=huLstar/hLstar - else - hLstar=max(hLstar,0.d0) - uLstar=0.d0 - endif - if (hRstar.gt.drytol) then - uRstar=huRstar/hRstar - else - hRstar=max(hRstar,0.d0) - uRstar=0.d0 - endif - - enddo ! end iteration on Riemann problem - - fw(:,:) = 0.d0 ! initialize before setting some waves - - do mw=1,mwaves - sw(mw)=lambda(mw) - fw(1,mw)=beta(mw)*r(2,mw) - fw(2,mw)=beta(mw)*r(3,mw) - fw(3,mw)=beta(mw)*r(2,mw) - enddo - !find transverse components (ie huv jumps). - ! MODIFIED from 5.3.1 version - fw(3,1)=fw(3,1)*vL - fw(3,3)=fw(3,3)*vR - fw(3,2)= 0.d0 - - hustar_interface = huL + fw(1,1) ! = huR - fw(1,3) - if (hustar_interface <= 0.0d0) then - fw(3,1) = fw(3,1) + (hR*uR*vR - hL*uL*vL - fw(3,1)- fw(3,3)) - else - fw(3,3) = fw(3,3) + (hR*uR*vR - hL*uL*vL - fw(3,1)- fw(3,3)) - end if - - - return - - end !subroutine riemann_aug_JCP------------------------------------------------- - - -c----------------------------------------------------------------------- - subroutine riemann_ssqfwave(maxiter,meqn,mwaves,hL,hR,huL,huR, - & hvL,hvR,bL,bR,uL,uR,vL,vR,phiL,phiR,pL,pR,sE1,sE2,drytol,g, - & rho,sw,fw) - - ! solve shallow water equations given single left and right states - ! steady state wave is subtracted from delta [q,f]^T before decomposition - - implicit none - - !input - integer meqn,mwaves,maxiter - - double precision hL,hR,huL,huR,bL,bR,uL,uR,phiL,phiR,sE1,sE2 - double precision vL,vR,hvL,hvR,pL,pR - double precision drytol,g,rho - - !local - integer iter - - logical sonic - - double precision delh,delhu,delphi,delb,delhdecomp,delphidecomp - double precision s1s2bar,s1s2tilde,hbar,hLstar,hRstar,hustar - double precision uRstar,uLstar,hstarHLL - double precision deldelh,deldelphi,delP - double precision alpha1,alpha2,beta1,beta2,delalpha1,delalpha2 - double precision criticaltol,convergencetol - double precision sL,sR - double precision uhat,chat,sRoe1,sRoe2 - - double precision sw(mwaves) - double precision fw(meqn,mwaves) - - !determine del vectors - delh = hR-hL - delhu = huR-huL - delphi = phiR-phiL - delb = bR-bL - delP = pR - pL - - convergencetol= 1.d-16 - criticaltol = 1.d-99 - - deldelh = -delb - deldelphi = -0.5d0 * (hR + hL) * (g * delb + delP / rho) - -! !if no source term, skip determining steady state wave - if (abs(delb).gt.0.d0) then -! - !determine a few quanitites needed for steady state wave if iterated - hLstar=hL - hRstar=hR - uLstar=uL - uRstar=uR - hstarHLL = max((huL-huR+sE2*hR-sE1*hL)/(sE2-sE1),0.d0) ! middle state in an HLL solve - - alpha1=0.d0 - alpha2=0.d0 - -! !iterate to better determine Riemann problem - do iter=1,maxiter - - !determine steady state wave (this will be subtracted from the delta vectors) - hbar = max(0.5d0*(hLstar+hRstar),0.d0) - s1s2bar = 0.25d0*(uLstar+uRstar)**2 - g*hbar - s1s2tilde= max(0.d0,uLstar*uRstar) - g*hbar - - -c !find if sonic problem - sonic=.false. - if (abs(s1s2bar).le.criticaltol) sonic=.true. - if (s1s2bar*s1s2tilde.le.criticaltol) sonic=.true. - if (s1s2bar*sE1*sE2.le.criticaltol) sonic = .true. - if (min(abs(sE1),abs(sE2)).lt.criticaltol) sonic=.true. - -c !find jump in h, deldelh - if (sonic) then - deldelh = -delb - else - deldelh = delb*g*hbar/s1s2bar - endif -! !bounds in case of critical state resonance, or negative states - if (sE1.lt.-criticaltol.and.sE2.gt.criticaltol) then - deldelh = min(deldelh,hstarHLL*(sE2-sE1)/sE2) - deldelh = max(deldelh,hstarHLL*(sE2-sE1)/sE1) - elseif (sE1.ge.criticaltol) then - deldelh = min(deldelh,hstarHLL*(sE2-sE1)/sE1) - deldelh = max(deldelh,-hL) - elseif (sE2.le.-criticaltol) then - deldelh = min(deldelh,hR) - deldelh = max(deldelh,hstarHLL*(sE2-sE1)/sE2) - endif - -c !find jump in phi, deldelphi - if (sonic) then - deldelphi = -g*hbar*delb - else - deldelphi = -delb*g*hbar*s1s2tilde/s1s2bar - endif -! !bounds in case of critical state resonance, or negative states - deldelphi=min(deldelphi,g*max(-hLstar*delb,-hRstar*delb)) - deldelphi=max(deldelphi,g*min(-hLstar*delb,-hRstar*delb)) - -!---------determine fwaves ------------------------------------------ - -! !first decomposition - delhdecomp = delh-deldelh - delalpha1 = (sE2*delhdecomp - delhu)/(sE2-sE1)-alpha1 - alpha1 = alpha1 + delalpha1 - delalpha2 = (delhu - sE1*delhdecomp)/(sE2-sE1)-alpha2 - alpha2 = alpha2 + delalpha2 - - !second decomposition - delphidecomp = delphi - deldelphi - beta1 = (sE2*delhu - delphidecomp)/(sE2-sE1) - beta2 = (delphidecomp - sE1*delhu)/(sE2-sE1) - - if ((delalpha2**2+delalpha1**2).lt.convergencetol**2) then - exit - endif -! - if (sE2.gt.0.d0.and.sE1.lt.0.d0) then - hLstar=hL+alpha1 - hRstar=hR-alpha2 -c hustar=huL+alpha1*sE1 - hustar = huL + beta1 - elseif (sE1.ge.0.d0) then - hLstar=hL - hustar=huL - hRstar=hR - alpha1 - alpha2 - elseif (sE2.le.0.d0) then - hRstar=hR - hustar=huR - hLstar=hL + alpha1 + alpha2 - endif -! - if (hLstar.gt.drytol) then - uLstar=hustar/hLstar - else - hLstar=max(hLstar,0.d0) - uLstar=0.d0 - endif -! - if (hRstar.gt.drytol) then - uRstar=hustar/hRstar - else - hRstar=max(hRstar,0.d0) - uRstar=0.d0 - endif - - enddo - endif - - delhdecomp = delh - deldelh - delphidecomp = delphi - deldelphi - - !first decomposition - alpha1 = (sE2*delhdecomp - delhu)/(sE2-sE1) - alpha2 = (delhu - sE1*delhdecomp)/(sE2-sE1) - - !second decomposition - beta1 = (sE2*delhu - delphidecomp)/(sE2-sE1) - beta2 = (delphidecomp - sE1*delhu)/(sE2-sE1) - - ! 1st nonlinear wave - fw(1,1) = alpha1*sE1 - fw(2,1) = beta1*sE1 - fw(3,1) = fw(1,1)*vL - ! 2nd nonlinear wave - fw(1,3) = alpha2*sE2 - fw(2,3) = beta2*sE2 - fw(3,3) = fw(1,3)*vR - ! advection of transverse wave - fw(1,2) = 0.d0 - fw(2,2) = 0.d0 - fw(3,2) = hR*uR*vR - hL*uL*vL -fw(3,1)-fw(3,3) - !speeds - sw(1)=sE1 - sw(2)=0.5d0*(sE1+sE2) - sw(3)=sE2 - - return - - end subroutine !------------------------------------------------- - - -c----------------------------------------------------------------------- - subroutine riemann_fwave(meqn,mwaves,hL,hR,huL,huR,hvL,hvR, - & bL,bR,uL,uR,vL,vR,phiL,phiR,pL,pR,s1,s2,drytol,g,rho, - & sw,fw) - - ! solve shallow water equations given single left and right states - ! solution has two waves. - ! flux - source is decomposed. - - implicit none - - !input - integer meqn,mwaves - - double precision hL,hR,huL,huR,bL,bR,uL,uR,phiL,phiR,s1,s2 - double precision hvL,hvR,vL,vR,pL,pR - double precision drytol,g,rho - - double precision sw(mwaves) - double precision fw(meqn,mwaves) - - !local - double precision delh,delhu,delphi,delb,delhdecomp,delphidecomp - double precision deldelh,deldelphi,delP - double precision beta1,beta2 - - - !determine del vectors - delh = hR-hL - delhu = huR-huL - delphi = phiR-phiL - delb = bR-bL - delP = pR - pL - - deldelphi = -0.5d0 * (hR + hL) * (g * delb + delP / rho) - delphidecomp = delphi - deldelphi - - !flux decomposition - beta1 = (s2*delhu - delphidecomp)/(s2-s1) - beta2 = (delphidecomp - s1*delhu)/(s2-s1) - - sw(1)=s1 - sw(2)=0.5d0*(s1+s2) - sw(3)=s2 - ! 1st nonlinear wave - fw(1,1) = beta1 - fw(2,1) = beta1*s1 - fw(3,1) = beta1*vL - ! 2nd nonlinear wave - fw(1,3) = beta2 - fw(2,3) = beta2*s2 - fw(3,3) = beta2*vR - ! advection of transverse wave - fw(1,2) = 0.d0 - fw(2,2) = 0.d0 - fw(3,2) = hR*uR*vR - hL*uL*vL -fw(3,1)-fw(3,3) - return - - end !subroutine ------------------------------------------------- - - - - - -c============================================================================= - subroutine riemanntype(hL,hR,uL,uR,hm,s1m,s2m,rare1,rare2, - & maxiter,drytol,g) - - !determine the Riemann structure (wave-type in each family) - - - implicit none - - !input - double precision hL,hR,uL,uR,drytol,g - integer maxiter - - !output - double precision s1m,s2m - logical rare1,rare2 - - !local - double precision hm,u1m,u2m,um,delu - double precision h_max,h_min,h0,F_max,F_min,dfdh,F0,slope,gL,gR - integer iter - - - -c !Test for Riemann structure - - h_min=min(hR,hL) - h_max=max(hR,hL) - delu=uR-uL - - if (h_min.le.drytol) then - hm=0.d0 - um=0.d0 - s1m=uR+uL-2.d0*sqrt(g*hR)+2.d0*sqrt(g*hL) - s2m=uR+uL-2.d0*sqrt(g*hR)+2.d0*sqrt(g*hL) - if (hL.le.0.d0) then - rare2=.true. - rare1=.false. - else - rare1=.true. - rare2=.false. - endif - - else - F_min= delu+2.d0*(sqrt(g*h_min)-sqrt(g*h_max)) - F_max= delu + - & (h_max-h_min)*(sqrt(.5d0*g*(h_max+h_min)/(h_max*h_min))) - - if (F_min.gt.0.d0) then !2-rarefactions - - hm=(1.d0/(16.d0*g))* - & max(0.d0,-delu+2.d0*(sqrt(g*hL)+sqrt(g*hR)))**2 - um=sign(1.d0,hm)*(uL+2.d0*(sqrt(g*hL)-sqrt(g*hm))) - - s1m=uL+2.d0*sqrt(g*hL)-3.d0*sqrt(g*hm) - s2m=uR-2.d0*sqrt(g*hR)+3.d0*sqrt(g*hm) - - rare1=.true. - rare2=.true. - - elseif (F_max.le.0.d0) then !2 shocks - -c !root finding using a Newton iteration on sqrt(h)=== - h0=h_max - do iter=1,maxiter - gL=sqrt(.5d0*g*(1/h0 + 1/hL)) - gR=sqrt(.5d0*g*(1/h0 + 1/hR)) - F0=delu+(h0-hL)*gL + (h0-hR)*gR - dfdh=gL-g*(h0-hL)/(4.d0*(h0**2)*gL)+ - & gR-g*(h0-hR)/(4.d0*(h0**2)*gR) - slope=2.d0*sqrt(h0)*dfdh - h0=(sqrt(h0)-F0/slope)**2 - enddo - hm=h0 - u1m=uL-(hm-hL)*sqrt((.5d0*g)*(1/hm + 1/hL)) - u2m=uR+(hm-hR)*sqrt((.5d0*g)*(1/hm + 1/hR)) - um=.5d0*(u1m+u2m) - - s1m=u1m-sqrt(g*hm) - s2m=u2m+sqrt(g*hm) - rare1=.false. - rare2=.false. - - else !one shock one rarefaction - h0=h_min - - do iter=1,maxiter - F0=delu + 2.d0*(sqrt(g*h0)-sqrt(g*h_max)) - & + (h0-h_min)*sqrt(.5d0*g*(1/h0+1/h_min)) - slope=(F_max-F0)/(h_max-h_min) - h0=h0-F0/slope - enddo - - hm=h0 - if (hL.gt.hR) then - um=uL+2.d0*sqrt(g*hL)-2.d0*sqrt(g*hm) - s1m=uL+2.d0*sqrt(g*hL)-3.d0*sqrt(g*hm) - s2m=uL+2.d0*sqrt(g*hL)-sqrt(g*hm) - rare1=.true. - rare2=.false. - else - s2m=uR-2.d0*sqrt(g*hR)+3.d0*sqrt(g*hm) - s1m=uR-2.d0*sqrt(g*hR)+sqrt(g*hm) - um=uR-2.d0*sqrt(g*hR)+2.d0*sqrt(g*hm) - rare2=.true. - rare1=.false. - endif - endif - endif - - return - - end ! subroutine riemanntype---------------------------------------------------------------- diff --git a/src/2d/bouss/rpn2_geoclaw.f b/src/2d/bouss/rpn2_geoclaw.f deleted file mode 100644 index 076fcd79f..000000000 --- a/src/2d/bouss/rpn2_geoclaw.f +++ /dev/null @@ -1,305 +0,0 @@ -c====================================================================== - subroutine rpn2(ixy,maxm,meqn,mwaves,maux,mbc,mx, - & ql,qr,auxl,auxr,fwave,s,amdq,apdq) -c====================================================================== -c -c Solves normal Riemann problems for the 2D SHALLOW WATER equations -c with topography: -c # h_t + (hu)_x + (hv)_y = 0 # -c # (hu)_t + (hu^2 + 0.5gh^2)_x + (huv)_y = -ghb_x # -c # (hv)_t + (huv)_x + (hv^2 + 0.5gh^2)_y = -ghb_y # - -c On input, ql contains the state vector at the left edge of each cell -c qr contains the state vector at the right edge of each cell -c -c This data is along a slice in the x-direction if ixy=1 -c or the y-direction if ixy=2. - -c Note that the i'th Riemann problem has left state qr(i-1,:) -c and right state ql(i,:) -c From the basic clawpack routines, this routine is called with -c ql = qr -c -c -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -! ! -! # This Riemann solver is for the shallow water equations. ! -! ! -! It allows the user to easily select a Riemann solver in ! -! riemannsolvers_geo.f. this routine initializes all the variables ! -! for the shallow water equations, accounting for wet dry boundary ! -! dry cells, wave speeds etc. ! -! ! -! David George, Vancouver WA, Feb. 2009 ! -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - use geoclaw_module, only: g => grav, drytol => dry_tolerance, rho - use geoclaw_module, only: earth_radius, deg2rad - use amr_module, only: mcapa - - use storm_module, only: pressure_forcing, pressure_index - - implicit none - - !input - integer maxm,meqn,maux,mwaves,mbc,mx,ixy - - double precision fwave(meqn, mwaves, 1-mbc:maxm+mbc) - double precision s(mwaves, 1-mbc:maxm+mbc) - double precision ql(meqn, 1-mbc:maxm+mbc) - double precision qr(meqn, 1-mbc:maxm+mbc) - double precision apdq(meqn,1-mbc:maxm+mbc) - double precision amdq(meqn,1-mbc:maxm+mbc) - double precision auxl(maux,1-mbc:maxm+mbc) - double precision auxr(maux,1-mbc:maxm+mbc) - - !local only - integer m,i,mw,maxiter,mu,nv - double precision wall(3) - double precision fw(meqn,3) - double precision sw(3) - - double precision hR,hL,huR,huL,uR,uL,hvR,hvL,vR,vL,phiR,phiL,pL,pR - double precision bR,bL,sL,sR,sRoe1,sRoe2,sE1,sE2,uhat,chat - double precision s1m,s2m - double precision hstar,hstartest,hstarHLL,sLtest,sRtest - double precision tw,dxdc - - logical :: debug - - logical rare1,rare2 - - ! In case there is no pressure forcing - pL = 0.d0 - pR = 0.d0 - debug = .false. - - ! initialize all components to 0 - fw(:,:) = 0.d0 - fwave(:,:,:) = 0.d0 - s(:,:) = 0.d0 - amdq(:,:) = 0.d0 - apdq(:,:) = 0.d0 - - !loop through Riemann problems at each grid cell - do i=2-mbc,mx+mbc - -!-----------------------Initializing----------------------------------- - !inform of a bad riemann problem from the start - if((qr(1,i-1).lt.0.d0).or.(ql(1,i) .lt. 0.d0)) then - write(*,*) 'Negative input: hl,hr,i=',qr(1,i-1),ql(1,i),i - endif - - -c !set normal direction - if (ixy.eq.1) then - mu=2 - nv=3 - else - mu=3 - nv=2 - endif - - !zero (small) negative values if they exist - if (qr(1,i-1).lt.0.d0) then - qr(1,i-1)=0.d0 - qr(2,i-1)=0.d0 - qr(3,i-1)=0.d0 - endif - - if (ql(1,i).lt.0.d0) then - ql(1,i)=0.d0 - ql(2,i)=0.d0 - ql(3,i)=0.d0 - endif - - !skip problem if in a completely dry area - if (qr(1,i-1) <= drytol .and. ql(1,i) <= drytol) then - go to 30 - endif - - !Riemann problem variables - hL = qr(1,i-1) - hR = ql(1,i) - huL = qr(mu,i-1) - huR = ql(mu,i) - bL = auxr(1,i-1) - bR = auxl(1,i) - if (pressure_forcing) then - pL = auxr(pressure_index, i-1) - pR = auxl(pressure_index, i) - end if - - hvL=qr(nv,i-1) - hvR=ql(nv,i) - - !check for wet/dry boundary - if (hR.gt.drytol) then - uR=huR/hR - vR=hvR/hR - phiR = 0.5d0*g*hR**2 + huR**2/hR - else - hR = 0.d0 - huR = 0.d0 - hvR = 0.d0 - uR = 0.d0 - vR = 0.d0 - phiR = 0.d0 - endif - - if (hL.gt.drytol) then - uL=huL/hL - vL=hvL/hL - phiL = 0.5d0*g*hL**2 + huL**2/hL - else - hL=0.d0 - huL=0.d0 - hvL=0.d0 - uL=0.d0 - vL=0.d0 - phiL = 0.d0 - endif - - wall(1) = 1.d0 - wall(2) = 1.d0 - wall(3) = 1.d0 - if (hR.le.drytol) then - call riemanntype(hL,hL,uL,-uL,hstar,s1m,s2m, - & rare1,rare2,1,drytol,g) - hstartest=max(hL,hstar) - if (hstartest+bL.lt.bR) then !right state should become ghost values that mirror left for wall problem -c bR=hstartest+bL - wall(2)=0.d0 - wall(3)=0.d0 - hR=hL - huR=-huL - bR=bL - phiR=phiL - uR=-uL - vR=vL - elseif (hL+bL.lt.bR) then - bR=hL+bL - endif - elseif (hL.le.drytol) then ! right surface is lower than left topo - call riemanntype(hR,hR,-uR,uR,hstar,s1m,s2m, - & rare1,rare2,1,drytol,g) - hstartest=max(hR,hstar) - if (hstartest+bR.lt.bL) then !left state should become ghost values that mirror right -c bL=hstartest+bR - wall(1)=0.d0 - wall(2)=0.d0 - hL=hR - huL=-huR - bL=bR - phiL=phiR - uL=-uR - vL=vR - elseif (hR+bR.lt.bL) then - bL=hR+bR - endif - endif - - !determine wave speeds - sL=uL-sqrt(g*hL) ! 1 wave speed of left state - sR=uR+sqrt(g*hR) ! 2 wave speed of right state - - uhat=(sqrt(g*hL)*uL + sqrt(g*hR)*uR)/(sqrt(g*hR)+sqrt(g*hL)) ! Roe average - chat=sqrt(g*0.5d0*(hR+hL)) ! Roe average - sRoe1=uhat-chat ! Roe wave speed 1 wave - sRoe2=uhat+chat ! Roe wave speed 2 wave - - sE1 = min(sL,sRoe1) ! Eindfeldt speed 1 wave - sE2 = max(sR,sRoe2) ! Eindfeldt speed 2 wave - - !--------------------end initializing...finally---------- - !solve Riemann problem. - - maxiter = 1 - - call riemann_aug_JCP(maxiter,meqn,mwaves,hL,hR,huL, - & huR,hvL,hvR,bL,bR,uL,uR,vL,vR,phiL,phiR,pL,pR,sE1,sE2, - & drytol,g,rho,sw,fw) - -C call riemann_ssqfwave(maxiter,meqn,mwaves,hL,hR,huL,huR, -C & hvL,hvR,bL,bR,uL,uR,vL,vR,phiL,phiR,pL,pR,sE1,sE2,drytol,g, -C & rho,sw,fw) - -C call riemann_fwave(meqn,mwaves,hL,hR,huL,huR,hvL,hvR, -C & bL,bR,uL,uR,vL,vR,phiL,phiR,pL,pR,sE1,sE2,drytol,g,rho,sw, -C & fw) - -c !eliminate ghost fluxes for wall - do mw=1,3 - sw(mw)=sw(mw)*wall(mw) - - fw(1,mw)=fw(1,mw)*wall(mw) - fw(2,mw)=fw(2,mw)*wall(mw) - fw(3,mw)=fw(3,mw)*wall(mw) - enddo - - do mw=1,mwaves - s(mw,i)=sw(mw) - fwave(1,mw,i)=fw(1,mw) - fwave(mu,mw,i)=fw(2,mw) - fwave(nv,mw,i)=fw(3,mw) -! write(51,515) sw(mw),fw(1,mw),fw(2,mw),fw(3,mw) -!515 format("++sw",4e25.16) - enddo - - 30 continue - enddo - - -c==========Capacity for mapping from latitude longitude to physical space==== - if (mcapa.gt.0) then - do i=2-mbc,mx+mbc - if (ixy.eq.1) then - dxdc=(earth_radius*deg2rad) - else - dxdc=earth_radius*cos(auxl(3,i))*deg2rad - endif - - do mw=1,mwaves -c if (s(mw,i) .gt. 316.d0) then -c # shouldn't happen unless h > 10 km! -c write(6,*) 'speed > 316: i,mw,s(mw,i): ',i,mw,s(mw,i) -c endif - s(mw,i)=dxdc*s(mw,i) - fwave(1,mw,i)=dxdc*fwave(1,mw,i) - fwave(2,mw,i)=dxdc*fwave(2,mw,i) - fwave(3,mw,i)=dxdc*fwave(3,mw,i) - enddo - enddo - endif - -c=============================================================================== - - -c============= compute fluctuations============================================= - - do i=2-mbc,mx+mbc - do mw=1,mwaves - if (s(mw,i) < 0.d0) then - amdq(1:3,i) = amdq(1:3,i) + fwave(1:3,mw,i) - else if (s(mw,i) > 0.d0) then - apdq(1:3,i) = apdq(1:3,i) + fwave(1:3,mw,i) - else - amdq(1:3,i) = amdq(1:3,i) + 0.5d0 * fwave(1:3,mw,i) - apdq(1:3,i) = apdq(1:3,i) + 0.5d0 * fwave(1:3,mw,i) - endif - enddo - enddo - - if (debug) then - do i=2-mbc,mx+mbc - do m=1,meqn - write(51,151) m,i,amdq(m,i),apdq(m,i) - write(51,152) fwave(m,1,i),fwave(m,2,i),fwave(m,3,i) - 151 format("+++ ampdq ",2i4,2e25.15) - 152 format("+++ fwave ",8x,3e25.15) - enddo - enddo - endif - - return - end subroutine diff --git a/src/2d/bouss/rpt2_geoclaw_sym.f b/src/2d/bouss/rpt2_geoclaw_sym.f deleted file mode 100644 index ce51c0d3c..000000000 --- a/src/2d/bouss/rpt2_geoclaw_sym.f +++ /dev/null @@ -1,260 +0,0 @@ -! ===================================================== - subroutine rpt2(ixy,imp,maxm,meqn,mwaves,maux,mbc,mx, - & ql,qr,aux1,aux2,aux3,asdq,bmasdq,bpasdq) -! ===================================================== - use geoclaw_module, only: g => grav, tol => dry_tolerance - use geoclaw_module, only: coordinate_system,earth_radius,deg2rad - - implicit none -! -! # Riemann solver in the transverse direction using an einfeldt -! Jacobian. - -!-----------------------last modified 1/10/05---------------------- - - integer ixy,maxm,meqn,maux,mwaves,mbc,mx,imp - - double precision ql(meqn,1-mbc:maxm+mbc) - double precision qr(meqn,1-mbc:maxm+mbc) - double precision asdq(meqn,1-mbc:maxm+mbc) - double precision bmasdq(meqn,1-mbc:maxm+mbc) - double precision bpasdq(meqn,1-mbc:maxm+mbc) - double precision aux1(maux,1-mbc:maxm+mbc) - double precision aux2(maux,1-mbc:maxm+mbc) - double precision aux3(maux,1-mbc:maxm+mbc) - - double precision s(mwaves) - double precision r(meqn,mwaves) - double precision beta(mwaves) - double precision abs_tol - double precision hl,hr,hul,hur,hvl,hvr,vl,vr,ul,ur,bl,br - double precision uhat,vhat,hhat,roe1,roe3,s1,s2,s3,s1down,s3up - double precision delf1,delf2,delf3,dxdcd,dxdcu - double precision dxdcm,dxdcp,topo1,topo3,eta - - integer i,m,mw,mu,mv - - !write(83,*) 'rpt2, imp = ',imp - - ! initialize all components to 0: - bmasdq(:,:) = 0.d0 - bpasdq(:,:) = 0.d0 - - abs_tol=tol - - if (ixy.eq.1) then - mu = 2 - mv = 3 - else - mu = 3 - mv = 2 - endif - - do i=2-mbc,mx+mbc - - hl=qr(1,i-1) - hr=ql(1,i) - hul=qr(mu,i-1) - hur=ql(mu,i) - hvl=qr(mv,i-1) - hvr=ql(mv,i) - -!===========determine velocity from momentum=========================== - if (hl.lt.abs_tol) then - hl=0.d0 - ul=0.d0 - vl=0.d0 - else - ul=hul/hl - vl=hvl/hl - endif - - if (hr.lt.abs_tol) then - hr=0.d0 - ur=0.d0 - vr=0.d0 - else - ur=hur/hr - vr=hvr/hr - endif - - do mw=1,mwaves - s(mw)=0.d0 - beta(mw)=0.d0 - do m=1,meqn - r(m,mw)=0.d0 - enddo - enddo - dxdcp = 1.d0 - dxdcm = 1.d0 - - if (hl <= tol .and. hr <= tol) go to 90 - -* !check and see if cell that transverse waves are going in is high and dry - if (imp.eq.1) then - eta = qr(1,i-1) + aux2(1,i-1) - topo1 = aux1(1,i-1) - topo3 = aux3(1,i-1) -c s1 = vl-sqrt(g*hl) -c s3 = vl+sqrt(g*hl) -c s2 = 0.5d0*(s1+s3) - else - eta = ql(1,i) + aux2(1,i) - topo1 = aux1(1,i) - topo3 = aux3(1,i) -c s1 = vr-sqrt(g*hr) -c s3 = vr+sqrt(g*hr) -c s2 = 0.5d0*(s1+s3) - endif - if (eta.lt.max(topo1,topo3)) go to 90 - - if (coordinate_system.eq.2) then - if (ixy.eq.2) then - dxdcp=(earth_radius*deg2rad) - dxdcm = dxdcp - else - if (imp.eq.1) then - dxdcp = earth_radius*cos(aux3(3,i-1))*deg2rad - dxdcm = earth_radius*cos(aux1(3,i-1))*deg2rad - else - dxdcp = earth_radius*cos(aux3(3,i))*deg2rad - dxdcm = earth_radius*cos(aux1(3,i))*deg2rad - endif - endif - endif - -c=====Determine some speeds necessary for the Jacobian================= -c vhat=(vr*dsqrt(hr))/(dsqrt(hr)+dsqrt(hl)) + -c & (vl*dsqrt(hl))/(dsqrt(hr)+dsqrt(hl)) -c -c uhat=(ur*dsqrt(hr))/(dsqrt(hr)+dsqrt(hl)) + -c & (ul*dsqrt(hl))/(dsqrt(hr)+dsqrt(hl)) -c hhat=(hr+hl)/2.d0 - - ! Note that we used left right states to define Roe averages, - ! which is consistent with those used in rpn2. - ! But now we are computing upgoing, downgoing waves either in - ! cell on left (if imp==1) or on right (if imp==2) so we - ! should perhaps use Roe averages based on values above or below, - ! but these aren't available. - - !roe1=vhat-dsqrt(g*hhat) - !roe3=vhat+dsqrt(g*hhat) - - ! modified to at least use hl,vl or hr,vr properly based on imp: - ! (since s1 and s3 are now vertical velocities, - ! it made no sense to use h,v in cell i-1 for downgoing - ! and cell i for upgoing) - - if (imp == 1) then - ! asdq is leftgoing, use q from cell i-1: - if (hl <= tol) go to 90 - s1 = vl-dsqrt(g*hl) - s3 = vl+dsqrt(g*hl) - s2 = vl - uhat = ul - hhat = hl - else - ! asdq is rightgoing, use q from cell i: - if (hr <= tol) go to 90 - s1 = vr-dsqrt(g*hr) - s3 = vr+dsqrt(g*hr) - s2 = vr - uhat = ur - hhat = hr - endif - - ! don't use Roe averages: - !s1=dmin1(roe1,s1down) - !s3=dmax1(roe3,s3up) - - !s2=0.5d0*(s1+s3) - - s(1)=s1 - s(2)=s2 - s(3)=s3 -c=======================Determine asdq decomposition (beta)============ - delf1=asdq(1,i) - delf2=asdq(mu,i) - delf3=asdq(mv, i) - - ! fixed bug in beta(2): uhat in place of s(2) - beta(1) = (s3*delf1/(s3-s1))-(delf3/(s3-s1)) - beta(2) = -uhat*delf1 + delf2 - beta(3) = (delf3/(s3-s1))-(s1*delf1/(s3-s1)) -c======================End ================================================= - -c=====================Set-up eigenvectors=================================== - r(1,1) = 1.d0 - r(2,1) = uhat ! fixed bug, uhat not s2 - r(3,1) = s1 - - r(1,2) = 0.d0 - r(2,2) = 1.d0 - r(3,2) = 0.d0 - - r(1,3) = 1.d0 - r(2,3) = uhat ! fixed bug, uhat not s2 - r(3,3) = s3 -c============================================================================ -90 continue -c============= compute fluctuations========================================== - - bmasdq(1,i)=0.0d0 - bpasdq(1,i)=0.0d0 - bmasdq(2,i)=0.0d0 - bpasdq(2,i)=0.0d0 - bmasdq(3,i)=0.0d0 - bpasdq(3,i)=0.0d0 - - do mw=1,3 - if ((abs(s(mw)) > 0.d0) .and. - & (abs(s(mw)) < 0.001d0*dsqrt(g*hhat))) then - ! split correction symmetrically if nearly zero - ! Note wave drops out if s(mw)==0 exactly, so don't split - bmasdq(1,i) =bmasdq(1,i) + - & 0.5d0*dxdcm*s(mw)*beta(mw)*r(1,mw) - bmasdq(mu,i)=bmasdq(mu,i)+ - & 0.5d0*dxdcm*s(mw)*beta(mw)*r(2,mw) - bmasdq(mv,i)=bmasdq(mv,i)+ - & 0.5d0*dxdcm*s(mw)*beta(mw)*r(3,mw) - bpasdq(1,i) =bpasdq(1,i) + - & 0.5d0*dxdcp*s(mw)*beta(mw)*r(1,mw) - bpasdq(mu,i)=bpasdq(mu,i)+ - & 0.5d0*dxdcp*s(mw)*beta(mw)*r(2,mw) - bpasdq(mv,i)=bpasdq(mv,i)+ - & 0.5d0*dxdcp*s(mw)*beta(mw)*r(3,mw) - elseif (s(mw).lt.0.d0) then - bmasdq(1,i) =bmasdq(1,i) + dxdcm*s(mw)*beta(mw)*r(1,mw) - bmasdq(mu,i)=bmasdq(mu,i)+ dxdcm*s(mw)*beta(mw)*r(2,mw) - bmasdq(mv,i)=bmasdq(mv,i)+ dxdcm*s(mw)*beta(mw)*r(3,mw) - elseif (s(mw).gt.0.d0) then - bpasdq(1,i) =bpasdq(1,i) + dxdcp*s(mw)*beta(mw)*r(1,mw) - bpasdq(mu,i)=bpasdq(mu,i)+ dxdcp*s(mw)*beta(mw)*r(2,mw) - bpasdq(mv,i)=bpasdq(mv,i)+ dxdcp*s(mw)*beta(mw)*r(3,mw) - endif - enddo - - !if ((i>3) .and. (i<6)) then - if (.false.) then - ! DEBUG - write(83,*) 'i = ',i - write(83,831) s(1),s(2),s(3) - write(83,831) beta(1),beta(2),beta(3) - do m=1,3 - write(83,831) r(m,1),r(m,2),r(m,3) - enddo - do m=1,3 - write(83,831) asdq(m,i), bmasdq(m,i), bpasdq(m,i) - 831 format(3d20.12) - enddo - endif -c======================================================================== - enddo ! do i loop -c - - -c - - return - end diff --git a/src/2d/shallow/gauges_module.f90 b/src/2d/shallow/gauges_module.f90 index 82b20e2a7..5c60833d2 100644 --- a/src/2d/shallow/gauges_module.f90 +++ b/src/2d/shallow/gauges_module.f90 @@ -58,8 +58,8 @@ module gauges_module integer :: gauge_num integer :: gdata_bytes - character(len=14) :: file_name ! for header (and data if 'ascii') - character(len=14) :: file_name_bin ! used if file_format='binary' + character(len=24) :: file_name ! for header (and data if 'ascii') + character(len=24) :: file_name_bin ! used if file_format='binary' ! Location in time and space real(kind=8) :: x, y, t_start, t_end @@ -116,6 +116,7 @@ subroutine set_gauges(restart, num_eqn, num_aux, fname) integer, parameter :: UNIT = 7 character(len=128) :: header_1 character(len=40) :: q_column, aux_column + character(len=15) :: numstr if (.not.module_setup) then @@ -207,17 +208,15 @@ subroutine set_gauges(restart, num_eqn, num_aux, fname) ! Create gauge output files do i = 1, num_gauges - gauges(i)%file_name = 'gaugexxxxx.txt' ! ascii num = gauges(i)%gauge_num - do pos = 10, 6, -1 - digit = mod(num,10) - gauges(i)%file_name(pos:pos) = char(ichar('0') + digit) - num = num / 10 - end do + + ! convert num to string numstr with zero padding if <5 digits + ! since we want format gauge00012.txt or gauge1234567.txt: + write (numstr,'(I0.5)') num + gauges(i)%file_name = 'gauge'//trim(numstr)//'.txt' if (gauges(i)%file_format >= 2) then - gauges(i)%file_name_bin = gauges(i)%file_name - gauges(i)%file_name_bin(12:14) = 'bin' + gauges(i)%file_name_bin = 'gauge'//trim(numstr)//'.bin' endif @@ -243,7 +242,7 @@ subroutine set_gauges(restart, num_eqn, num_aux, fname) if (.not. restart) then ! Write header to .txt file: - header_1 = "('# gauge_id= ',i5,' " // & + header_1 = "('# gauge_id= ',i0,' " // & "location=( ',1e17.10,' ',1e17.10,' ) " // & "num_var= ',i2)" write(OUTGAUGEUNIT, header_1) gauges(i)%gauge_num, & diff --git a/src/2d/shallow/src2.f90 b/src/2d/shallow/src2.f90 index dc603c36e..376daf7e0 100644 --- a/src/2d/shallow/src2.f90 +++ b/src/2d/shallow/src2.f90 @@ -6,7 +6,7 @@ subroutine src2(meqn,mbc,mx,my,xlower,ylower,dx,dy,q,maux,aux,t,dt) use geoclaw_module, only: manning_break, num_manning use geoclaw_module, only: spherical_distance, coordinate_system use geoclaw_module, only: RAD2DEG, pi, dry_tolerance, DEG2RAD - use geoclaw_module, only: rho_air + use geoclaw_module, only: rho_air, rho use geoclaw_module, only: earth_radius, sphere_source use storm_module, only: wind_forcing, pressure_forcing, wind_drag @@ -40,10 +40,6 @@ subroutine src2(meqn,mbc,mx,my,xlower,ylower,dx,dy,q,maux,aux,t,dt) ! friction source term real(kind=8), parameter :: depth_tolerance = 1.0d-30 - ! Physics - ! Nominal density of water - real(kind=8), parameter :: rho = 1025.d0 - ! ---------------------------------------------------------------- ! Spherical geometry source term(s) ! @@ -154,7 +150,7 @@ subroutine src2(meqn,mbc,mx,my,xlower,ylower,dx,dy,q,maux,aux,t,dt) endif wind_speed = sqrt(aux(wind_index,i,j)**2 & + aux(wind_index+1,i,j)**2) - tau = wind_drag(wind_speed, phi) * rho_air * wind_speed / rho + tau = wind_drag(wind_speed, phi) * rho_air * wind_speed / rho(1) q(2,i,j) = q(2,i,j) + dt * tau * aux(wind_index,i,j) q(3,i,j) = q(3,i,j) + dt * tau * aux(wind_index+1,i,j) endif @@ -195,8 +191,8 @@ subroutine src2(meqn,mbc,mx,my,xlower,ylower,dx,dy,q,maux,aux,t,dt) ! ! Modify momentum in each layer ! if (h > dry_tolerance) then - ! q(2, i, j) = q(2, i, j) - dt * h * P_gradient(1) / rho - ! q(3, i, j) = q(3, i, j) - dt * h * P_gradient(2) / rho + ! q(2, i, j) = q(2, i, j) - dt * h * P_gradient(1) / rho(1) + ! q(3, i, j) = q(3, i, j) - dt * h * P_gradient(2) / rho(1) ! end if ! enddo ! enddo diff --git a/src/2d/shallow/surge/model_storm_module.f90 b/src/2d/shallow/surge/model_storm_module.f90 index cce7445a9..b49e98120 100644 --- a/src/2d/shallow/surge/model_storm_module.f90 +++ b/src/2d/shallow/surge/model_storm_module.f90 @@ -51,6 +51,19 @@ module model_storm_module end type model_storm_type + ! How to deterimine which way a storm should be made to spin + ! The default defers simply to assuming y is a latitude + ! Here either an integer or bool can be returned but as implemented 1 + ! refers to the Northern Hemisphere and therefore causes the storm to spin + ! in a counter-clockwise direction. + abstract interface + logical pure function rotation_def(x, y) + implicit none + real(kind=8), intent(in) :: x, y + end function rotation_def + end interface + procedure(rotation_def), pointer :: rotation + ! Interal tracking variables for storm integer, private :: last_storm_index @@ -68,12 +81,6 @@ module model_storm_module ! track to be close to but not equal the start time of the simulation real(kind=8), parameter :: TRACKING_TOLERANCE = 1d-10 - ! Global constants #TODO: Some of these are in geoclaw already - real(kind=8) :: pi=3.1415927 - real(kind=8) :: omega=7.2921e-5 - real(kind=8) :: chi=1.0 - real(kind=8) :: alpha=1.0 - contains @@ -153,8 +160,8 @@ subroutine set_storm(storm_data_path, storm, storm_spec_type, log_unit) 0.5d0 * (x(1) + y(1)), y(2)) storm%velocity(2, i) = sign(ds / dt, y(2) - x(2)) else - storm%velocity(1, i) = abs(x(2) - x(1)) / dt - storm%velocity(2, i) = abs(y(2) - y(1)) / dt + storm%velocity(1, i) = (y(1) - x(1)) / dt + storm%velocity(2, i) = (y(2) - x(2)) / dt end if end do @@ -338,7 +345,7 @@ subroutine get_storm_data(t, storm, location, velocity, max_wind_radius, & max_wind_speed, central_pressure, & radius, central_pressure_change) - use geoclaw_module, only: deg2rad, latlon2xy, xy2latlon + use geoclaw_module, only: deg2rad, latlon2xy, xy2latlon, coordinate_system implicit none @@ -382,13 +389,20 @@ subroutine get_storm_data(t, storm, location, velocity, max_wind_radius, & ! Convert coordinates temporarily to meters so that we can use ! the pre-calculated m/s velocities from before - x = latlon2xy(storm%track(2:3,i),storm%track(2:3,i)) - x = x + (t - storm%track(1,i)) * storm%velocity(:,i) - - fn = [xy2latlon(x,storm%track(2:3,i)), & - storm%velocity(:,i), storm%max_wind_radius(i), & - storm%max_wind_speed(i), storm%central_pressure(i), & - storm%radius(i), storm%central_pressure_change(i)] + if (coordinate_system == 2) then + x = latlon2xy(storm%track(2:3,i),storm%track(2:3,i)) + x = x + (t - storm%track(1,i)) * storm%velocity(:,i) + fn = [xy2latlon(x,storm%track(2:3,i)), & + storm%velocity(:,i), storm%max_wind_radius(i), & + storm%max_wind_speed(i), storm%central_pressure(i), & + storm%radius(i), storm%central_pressure_change(i)] + else + x = x + (t - storm%track(1,i)) * storm%velocity(:,i) + fn = [x, & + storm%velocity(:,i), storm%max_wind_radius(i), & + storm%max_wind_speed(i), storm%central_pressure(i), & + storm%radius(i), storm%central_pressure_change(i)] + end if else ! Inbetween two forecast time points (the function storm_index ! ensures that we are not before the first data point, i.e. i > 1) @@ -430,11 +444,8 @@ end subroutine get_storm_data pure subroutine adjust_max_wind(tv, mws, mod_mws, convert_height) real (kind=8), intent(inout) :: tv(2) - real (kind=8), intent(in) :: mws - logical, intent(in) :: convert_height - real (kind=8), intent(out) :: mod_mws real (kind=8) :: trans_speed, trans_mod @@ -521,21 +532,21 @@ end subroutine post_process_wind_estimate ! ========================================================================== pure subroutine calculate_polar_coordinate(x, y, sloc, r, theta) - use geoclaw_module, only: deg2rad, coordinate_system - use geoclaw_module, only: spherical_distance + use geoclaw_module, only: deg2rad, coordinate_system + use geoclaw_module, only: spherical_distance - real(kind=8), intent(in) :: x, y, sloc(2) - real(kind=8), intent(out) :: r, theta + real(kind=8), intent(in) :: x, y, sloc(2) + real(kind=8), intent(out) :: r, theta - if (coordinate_system == 2) then - ! lat-long coordinates, uses Haversine formula - r = spherical_distance(x, y, sloc(1), sloc(2)) - theta = atan2((y - sloc(2)) * DEG2RAD,(x - sloc(1)) * DEG2RAD) - else - ! Strictly cartesian - r = sqrt( (x - sloc(1))**2 + (y - sloc(2))**2) - theta = atan2(y - sloc(2), x - sloc(1)) - end if + if (coordinate_system == 2) then + ! lat-long coordinates, uses Haversine formula + r = spherical_distance(x, y, sloc(1), sloc(2)) + theta = atan2((y - sloc(2)),(x - sloc(1))) + else + ! Strictly cartesian + r = sqrt( (x - sloc(1))**2 + (y - sloc(2))**2) + theta = atan2((y - sloc(2)), (x - sloc(1))) + end if end subroutine calculate_polar_coordinate @@ -653,7 +664,7 @@ subroutine set_holland_1980_fields(maux, mbc, mx, my, xlower, ylower, & call post_process_wind_estimate(maux, mbc, mx, my, i, j, wind, aux, & wind_index, pressure_index, r, radius, tv, mod_mws, theta, & - convert_height, y >= 0) + convert_height, rotation(x, y)) enddo enddo @@ -725,7 +736,7 @@ subroutine set_holland_2008_fields(maux, mbc, mx, my, xlower, ylower, & call post_process_wind_estimate(maux, mbc, mx, my, i, j, wind, aux, & wind_index, pressure_index, r, radius, tv, mod_mws, theta, & - convert_height, y >= 0) + convert_height, rotation(x, y)) enddo enddo @@ -817,7 +828,7 @@ subroutine set_holland_2010_fields(maux, mbc, mx, my, xlower, ylower, & call post_process_wind_estimate(maux, mbc, mx, my, i, j, wind, aux, & wind_index, pressure_index, r, radius, tv, mod_mws, theta, & - convert_height, y >= 0) + convert_height, rotation(x, y)) enddo enddo @@ -974,6 +985,7 @@ real(kind=8) function evaluate_inner_derivative(f,r_m,v_m,r_a,v_a) result(dMa) ! Variables real(kind=8) :: denominator, M_a + real(kind=8), parameter :: alpha=1.0 ! Calculate necessary components of the derivative to make ! calculations easier @@ -992,6 +1004,7 @@ real(kind=8) function evaluate_v_a(f,r_m,v_m,r_a) result(v_a) ! Input real(kind=8), intent(in) :: f, r_m, v_m, r_a + real(kind=8), parameter :: alpha=1.0 v_a = ((2.0*(r_a/r_m)**2)/(2.0-alpha+alpha*(r_a/r_m)**2))**(1.0/(2.0-alpha)) v_a = v_a*(0.5*f*r_m**2 + r_m*v_m) @@ -1061,6 +1074,7 @@ subroutine integrate_m_out(f,r_0,r_a,res,m_out) ! Parameters and Other variables real(kind=8) :: V_m, dr, r_guess, r_p integer :: i + real(kind=8), parameter :: chi=1.0 ! Intialize m_out(res) = 0.5*f*r_0**2 @@ -1104,6 +1118,8 @@ subroutine solve_hurricane_wind_parameters(f, r_m, v_m, res, r_0, r_a) real(kind=8) :: dr, r real(kind=8) :: inner_res, outer_res real(kind=8), dimension(1:res) :: v_out, v_in + real(kind=8), parameter :: chi=1.0 + real(kind=8), parameter :: alpha=1.0 ! Initialize guesses for merge and r_0 r_a = 2.0*r_m @@ -1255,8 +1271,8 @@ subroutine set_SLOSH_fields(maux, mbc, mx, my, xlower, ylower, & trans_speed_x = tv(1) * mwr * r / (mwr**2.d0 + r**2.d0) trans_speed_y = tv(2) * mwr * r / (mwr**2.d0 + r**2.d0) - aux(wind_index,i,j) = wind * merge(-1, 1, y >= 0) * sin(theta) + trans_speed_x - aux(wind_index+1,i,j) = wind * merge(1, -1, y >= 0) * cos(theta) + trans_speed_y + aux(wind_index,i,j) = wind * merge(-1, 1, rotation(x, y)) * sin(theta) + trans_speed_x + aux(wind_index+1,i,j) = wind * merge(1, -1, rotation(x, y)) * cos(theta) + trans_speed_y enddo enddo @@ -1331,7 +1347,7 @@ subroutine set_rankine_fields(maux, mbc, mx, my, xlower, ylower, & call post_process_wind_estimate(maux, mbc, mx, my, i, j, wind, aux, & wind_index, pressure_index, r, radius, tv, mod_mws, theta, & - convert_height, y >= 0) + convert_height, rotation(x, y)) enddo enddo @@ -1419,7 +1435,7 @@ subroutine set_modified_rankine_fields(maux, mbc, mx, my, xlower, ylower, & call post_process_wind_estimate(maux, mbc, mx, my, i, j, wind, aux, & wind_index, pressure_index, r, radius, tv, mod_mws, theta, & - convert_height, y >= 0) + convert_height, rotation(x, y)) enddo enddo @@ -1492,7 +1508,7 @@ subroutine set_deMaria_fields(maux, mbc, mx, my, xlower, ylower, & call post_process_wind_estimate(maux, mbc, mx, my, i, j, wind, aux, & wind_index, pressure_index, r, radius, tv, mod_mws, theta, & - convert_height, y >= 0) + convert_height, rotation(x, y)) enddo enddo @@ -1584,7 +1600,7 @@ subroutine set_willoughby_fields(maux, mbc, mx, my, xlower, ylower, & call post_process_wind_estimate(maux, mbc, mx, my, i, j, wind, aux, & wind_index, pressure_index, r, radius, tv, mod_mws, theta, & - convert_height, y >= 0) + convert_height, rotation(x, y)) enddo enddo diff --git a/src/2d/shallow/surge/storm_module.f90 b/src/2d/shallow/surge/storm_module.f90 index 7af8b46ae..632cbff90 100644 --- a/src/2d/shallow/surge/storm_module.f90 +++ b/src/2d/shallow/surge/storm_module.f90 @@ -11,7 +11,7 @@ module storm_module - use model_storm_module, only: model_storm_type + use model_storm_module, only: model_storm_type, rotation use data_storm_module, only: data_storm_type implicit none @@ -126,7 +126,7 @@ subroutine set_storm(data_file) ! Locals integer, parameter :: unit = 13 - integer :: i, drag_law + integer :: i, drag_law, rotation_override character(len=200) :: storm_file_path, line if (.not.module_setup) then @@ -153,6 +153,17 @@ subroutine set_storm(data_file) stop "*** ERROR *** Invalid wind drag law." end select read(unit,*) pressure_forcing + read(unit,*) rotation_override + select case(rotation_override) + case(0) + rotation => hemisphere_rotation + case(1) + rotation => N_rotation + case(2) + rotation => S_rotation + case default + stop " *** ERROR *** Roation override invalid." + end select read(unit,*) ! Set some parameters @@ -480,4 +491,26 @@ subroutine output_storm_location(t) end subroutine output_storm_location + ! ========================================================================== + ! Default to assuming y is a latitude and if y >= 0 we are want to spin + ! counter-clockwise + ! ========================================================================== + logical pure function hemisphere_rotation(x, y) result(rotation) + implicit none + real(kind=8), intent(in) :: x, y + rotation = (y >= 0.d0) + end function hemisphere_rotation + ! This version just returns the user defined direction + logical pure function N_rotation(x, y) result(rotation) + implicit none + real(kind=8), intent(in) :: x, y + rotation = .true. + end function N_rotation + ! This version just returns the user defined direction + logical pure function S_rotation(x, y) result(rotation) + implicit none + real(kind=8), intent(in) :: x, y + rotation = .false. + end function S_rotation + end module storm_module diff --git a/src/python/geoclaw/data.py b/src/python/geoclaw/data.py index e3d514ffc..83d9fa769 100755 --- a/src/python/geoclaw/data.py +++ b/src/python/geoclaw/data.py @@ -545,17 +545,19 @@ class SurgeData(clawpack.clawutil.data.ClawData): 'SLOSH': 4, 'rankine': 5, 'modified-rankine': 6, - 'DeMaria': 7 + 'DeMaria': 7, + 'willoughby': 9, } - storm_spec_not_implemented = ['CLE'] + storm_spec_not_implemented = ['CLE', 'willoughby'] def __init__(self): - super(SurgeData,self).__init__() + super(SurgeData, self).__init__() # Source term controls - self.add_attribute('wind_forcing',False) - self.add_attribute('drag_law',1) - self.add_attribute('pressure_forcing',False) + self.add_attribute('wind_forcing', False) + self.add_attribute('drag_law', 1) + self.add_attribute('pressure_forcing', False) + self.add_attribute('rotation_override', 0) # Algorithm parameters - Indexing is python based self.add_attribute("wind_index", 4) @@ -563,8 +565,8 @@ def __init__(self): self.add_attribute("display_landfall_time", False) # AMR parameters - self.add_attribute('wind_refine',[20.0,40.0,60.0]) - self.add_attribute('R_refine',[60.0e3,40e3,20e3]) + self.add_attribute('wind_refine', [20.0,40.0,60.0]) + self.add_attribute('R_refine', [60.0e3,40e3,20e3]) # Storm parameters self.add_attribute('storm_type', None) # Backwards compatibility @@ -572,7 +574,7 @@ def __init__(self): self.add_attribute("storm_file", None) # File(s) containing data - def write(self,out_file='surge.data',data_source="setrun.py"): + def write(self, out_file='surge.data', data_source="setrun.py"): """Write out the data file to the path given""" # print "Creating data file %s" % out_file @@ -582,6 +584,19 @@ def write(self,out_file='surge.data',data_source="setrun.py"): self.data_write('drag_law', description='(Type of drag law to use)') self.data_write('pressure_forcing', description="(Pressure source term used)") + if isinstance(self.rotation_override, str): + if self.rotation_override.lower() == "normal": + self.rotation_override = 0 + elif "n" in self.rotation_override.lower(): + self.rotation_override = 1 + elif "s" in self.rotation_override.lower(): + self.rotation_override = 2 + else: + raise ValueError("Unknown rotation_override specification.") + else: + self.rotation_override = int(self.rotation_override) + self.data_write('rotation_override', + description="(Override storm rotation)") self.data_write() self.data_write("wind_index", value=self.wind_index + 1, diff --git a/src/python/geoclaw/surge/plot.py b/src/python/geoclaw/surge/plot.py index e57d577e6..4186d2d72 100644 --- a/src/python/geoclaw/surge/plot.py +++ b/src/python/geoclaw/surge/plot.py @@ -41,6 +41,7 @@ surge_data = geodata.SurgeData() + class track_data(object): """Read in storm track data from run output""" @@ -84,12 +85,16 @@ def gauge_locations(current_data, gaugenos='all'): yoffset=0.02) -def gaugetopo(current_data): - q = current_data.q - h = q[0, :] - eta = q[3, :] - topo = eta - h - return topo +def gauge_dry_regions(cd, dry_tolerance=1e-16): + """Masked array of zeros where gauge is dry.""" + return np.ma.masked_where(np.abs(cd.gaugesoln.q[0, :]) > dry_tolerance, + np.zeros(cd.gaugesoln.q[0, :].shape)) + + +def gauge_surface(cd, dry_tolerance=1e-16): + """Sea surface at gauge masked when dry.""" + return np.ma.masked_where(np.abs(cd.gaugesoln.q[0, :]) < dry_tolerance, + cd.gaugesoln.q[3, :]) def plot_landfall_gauge(gauge, axes, landfall=0.0, style='b', kwargs={}): @@ -97,6 +102,9 @@ def plot_landfall_gauge(gauge, axes, landfall=0.0, style='b', kwargs={}): This will transform the plot so that it is relative to the landfall value provided. + + This can be done using `plotaxes.time_scale` instead so this function will + be deprecated and removed in a future release. """ axes = plt.gca() @@ -492,19 +500,21 @@ def plot_track(t, x, y, wind_radius, wind_speed, Pc, name=None): # Returns axes # Storm with category plotting function # ======================================================================== -def add_track(Storm, axes, plot_package=None, category_color=None, legend_loc='best', + + +def add_track(Storm, axes, plot_package=None, category_color=None, legend_loc='best', intensity=False, categorization="NHC", limits=None, track_color='red'): if category_color is None: - category_color = {5: 'red', - 4: 'orange', - 3: 'yellow', - 2: 'blue', # edit color - 1: 'violet', - 0: 'black', - -1: 'gray'} + category_color = {5: 'red', + 4: 'orange', + 3: 'yellow', + 2: 'blue', # edit color + 1: 'violet', + 0: 'black', + -1: 'gray'} category = Storm.category(categorization=categorization) - + # make it if intensity = true # basic plotting @@ -520,52 +530,55 @@ def add_track(Storm, axes, plot_package=None, category_color=None, legend_loc='b axes.set_xlabel("Longitude") axes.set_ylabel("Latitude") - - categories_legend = [] - if intensity and categorization == "NHC": + if intensity and categorization == "NHC": categories_legend = [] - # plotitem = plotaxes.new_plotitem(name='category', plot_type='1d_plot') + # plotitem = plotaxes.new_plotitem(name='category', plot_type='1d_plot') if (-1 in category): - negativeone = mlines.Line2D([], [], color=category_color[-1], marker='s', ls='', label="Tropical Depression") + negativeone = mlines.Line2D( + [], [], color=category_color[-1], marker='s', ls='', label="Tropical Depression") categories_legend.append(negativeone) if (0 in category): - zero = mlines.Line2D([], [], color=category_color[0], marker='s', ls='', label="Tropical Storn") + zero = mlines.Line2D( + [], [], color=category_color[0], marker='s', ls='', label="Tropical Storn") categories_legend.append(zero) if (1 in category): - one = mlines.Line2D([], [], color=category_color[1], marker='s', ls='', label="Category 1") + one = mlines.Line2D([], [], color=category_color[1], + marker='s', ls='', label="Category 1") categories_legend.append(one) if (2 in category): - two = mlines.Line2D([], [], color=category_color[2], marker='s', ls='', label="Category 2") + two = mlines.Line2D([], [], color=category_color[2], + marker='s', ls='', label="Category 2") categories_legend.append(two) if (3 in category): - three = mlines.Line2D([], [], color=category_color[3], marker='s', ls='', label="Category 3") + three = mlines.Line2D( + [], [], color=category_color[3], marker='s', ls='', label="Category 3") categories_legend.append(three) if (4 in category): - four = mlines.Line2D([], [], color=category_color[4], marker='s', ls='', label="Category 4") + four = mlines.Line2D( + [], [], color=category_color[4], marker='s', ls='', label="Category 4") categories_legend.append(four) if (5 in category): - five = mlines.Line2D([], [], color=category_color[5], marker='s', ls='', label="Category 5") + five = mlines.Line2D( + [], [], color=category_color[5], marker='s', ls='', label="Category 5") categories_legend.append(five) plt.legend(handles=categories_legend, loc=legend_loc) - + # if bounds is not None: # plotitem.pcolor_cmin = bounds[0] # plotitem.pcolor_cmax = bounds[1] return axes - - # if plot_type == 'pcolor' or plot_type == 'imshow': # plotitem = plotaxes.new_plotitem(name='wind', plot_type='2d_pcolor') # plotitem.plot_var = wind_speed diff --git a/src/python/geoclaw/surge/quality.py b/src/python/geoclaw/surge/quality.py index 6584efbdb..e62db2c98 100644 --- a/src/python/geoclaw/surge/quality.py +++ b/src/python/geoclaw/surge/quality.py @@ -9,13 +9,14 @@ class InstabilityError(Exception): pass + def quality_check(model_output_dir, - regions_to_check = [[-81,22,-40,55], # atlantic - [-100,15,-82,32], # gulf - [-88.5,13.25,-70,19.75]], # carribean - frames_to_check = 4, - mean_tol_abs = .01, - min_depth = 300): + regions_to_check=[[-81, 22, -40, 55], # atlantic + [-100, 15, -82, 32], # gulf + [-88.5, 13.25, -70, 19.75]], # carribean + frames_to_check=4, + mean_tol_abs=.01, + min_depth=300): """Run a simple check to flag runs that were potentially numerically unstable. This function looks through the final *frames_to_check* frames of a model output and checks all cells in AMR level 1 that are below *min_depth* and @@ -23,7 +24,7 @@ def quality_check(model_output_dir, to encompass individual basins). If the absolute value of the mean surface height for these cells, which should not really have much surge, is above/mean_tol_abs, it raises an error. - + :Input: - *model_output_dir* (str) Path to the output directory for a given model - *regions_to_check* (list of lists of floats) A list of 4-element lists @@ -33,17 +34,17 @@ def quality_check(model_output_dir, height within any region in *regions_to_check* is above this value (meters). - *min_depth* (float) Only cells that are below *min_depth* (meters) are checked, in order to avoid including cells that potentially should have large surge values. - + :Raises: - InstabilityError if the model is flagged as potentially unstable in one of the regions. """ - + # find which frame is last - output_files = glob(join(model_output_dir,'fort.b*')) + output_files = glob(join(model_output_dir, 'fort.b*')) last_frame = int(output_files[-1].split('/')[-1][-4:]) # get sl_init - with open(join(model_output_dir,'geoclaw.data'),'r') as f: + with open(join(model_output_dir, 'geoclaw.data'), 'r') as f: for l in f: if '=: sea_level' in l: sl_init = float(l.strip().split()[0]) @@ -51,44 +52,45 @@ def quality_check(model_output_dir, # bring in solution soln = Solution() for frame in range(last_frame-frames_to_check+1, last_frame+1): - soln.read(frame, path = model_output_dir, file_format='binary', read_aux=False) + soln.read(frame, path=model_output_dir, + file_format='binary', read_aux=False) for r in regions_to_check: all_vals = np.array([]) for s in soln.states: # only looking at lowest AMR level - if s.patch.level >1: + if s.patch.level > 1: continue x = s.grid.c_centers[0] y = s.grid.c_centers[1] - eta = s.q[3,:,:] - topo = eta - s.q[0,:,:] - - # only count - eta = np.where(topo<(-min_depth),eta,np.nan) - in_reg = eta[np.ix_((x[:,0]>=r[0]) & (x[:,0]<=r[2]), - (y[0,:]>=r[1]) & (y[0,:]<=r[3]))] - all_vals = np.concatenate([all_vals,in_reg.flatten()]) + eta = s.q[3, :, :] + topo = eta - s.q[0, :, :] + + # only count + eta = np.where(topo < (-min_depth), eta, np.nan) + in_reg = eta[np.ix_((x[:, 0] >= r[0]) & (x[:, 0] <= r[2]), + (y[0, :] >= r[1]) & (y[0, :] <= r[3]))] + all_vals = np.concatenate([all_vals, in_reg.flatten()]) # adjust for sl_init all_vals = all_vals - sl_init - if all_vals.shape[0]>0: + if all_vals.shape[0] > 0: if abs(np.nanmean(all_vals)) > mean_tol_abs: raise InstabilityError("Model possibly unstable due to large magnitude deep " "ocean surge at end of run ({:.1f} cm in region {})".format( - np.nanmean(all_vals)*100, r)) + np.nanmean(all_vals)*100, r)) - -def get_max_boundary_fluxes(model_output_dir, max_refinement_depth_to_check = None, - frames_to_check = 1): + +def get_max_boundary_fluxes(model_output_dir, max_refinement_depth_to_check=None, + frames_to_check=1): """A common cause of instability is a persistant normal flux at the boundary. This code checks the last frame(s) of a model output and returns the maximum magnitude normal fluxes and currents at each boundary, as well as the maximum magnitude fluxes and currents observed within the entire domain. - + :Input: - *model_output_dir* (str) Path to the output directory for a given model - *max_refinement_depth_to_check* (int or None, optional) How many refinement levels to @@ -100,20 +102,20 @@ def get_max_boundary_fluxes(model_output_dir, max_refinement_depth_to_check = No """ # find which frame is last - output_files = glob(join(model_output_dir,'fort.q*')) + output_files = glob(join(model_output_dir, 'fort.q*')) frames = [int(i.split('/')[-1][-4:]) for i in output_files] last_frame = max(frames) - + # get domain and file format - with open(join(model_output_dir,'claw.data'),'r') as f: + with open(join(model_output_dir, 'claw.data'), 'r') as f: for l in f: if '=: lower' in l: - xl,yl = [float(i) for i in l.strip().split()[:2]] + xl, yl = [float(i) for i in l.strip().split()[:2]] elif '=: upper' in l: - xu,yu = [float(i) for i in l.strip().split()[:2]] + xu, yu = [float(i) for i in l.strip().split()[:2]] elif '=: output_format' in l: of = int(l.strip().split()[0]) - 1 - file_format = ['ascii','netcdf','binary'][of] + file_format = ['ascii', 'netcdf', 'binary'][of] maxhxl = -np.inf maxhyl = -np.inf @@ -128,10 +130,11 @@ def get_max_boundary_fluxes(model_output_dir, max_refinement_depth_to_check = No maxcurrx_overall = -np.inf maxcurry_overall = -np.inf - for f in range(last_frame+1-frames_to_check,last_frame+1): + for f in range(last_frame+1-frames_to_check, last_frame+1): soln = Solution() - soln.read(f, path = model_output_dir, file_format=file_format, read_aux=False) + soln.read(f, path=model_output_dir, + file_format=file_format, read_aux=False) for s in soln.states: @@ -139,50 +142,56 @@ def get_max_boundary_fluxes(model_output_dir, max_refinement_depth_to_check = No if max_refinement_depth_to_check is not None: if s.patch.level > max_refinement_depth_to_check: continue - + # get rounding error tolerance delta = s.grid.dimensions[0].delta edge_tol = delta * .001 - + x = s.grid.c_centers[0] xedges = s.grid.c_nodes[0] y = s.grid.c_centers[1] yedges = s.grid.c_nodes[1] - eta = s.q[3,:,:] - h = s.q[0,:,:] - hx = s.q[1,:,:] + eta = s.q[3, :, :] + h = s.q[0, :, :] + hx = s.q[1, :, :] curr_x = hx / h - hy = s.q[2,:,:] + hy = s.q[2, :, :] curr_y = hy / h - topo = eta - s.q[0,:,:] - maxhx_overall = np.nanmax([np.abs(hx).max(),maxhx_overall]) - maxhy_overall = np.nanmax([np.abs(hy).max(),maxhy_overall]) - maxcurrx_overall = np.nanmax([np.nanmax(np.abs(curr_x)),maxcurrx_overall]) - maxcurry_overall = np.nanmax([np.nanmax(np.abs(curr_y)),maxcurry_overall]) - - if abs(xedges[0,0] - xl) < edge_tol: - maxhxl = np.nanmax([maxhxl,np.nanmax(np.abs(hx[0,:]))]) - maxcurrxl = np.nanmax([maxcurrxl,np.nanmax(np.abs(curr_x[0,:]))]) - if abs(xedges[-1,0] - xu) < edge_tol: - maxhxu = np.nanmax([maxhxu,np.nanmax(np.abs(hx[-1,:]))]) - maxcurrxu = np.nanmax([maxcurrxu,np.nanmax(np.abs(curr_x[-1,:]))]) - if abs(yedges[0,0] - yl) < edge_tol: - maxhyl = np.nanmax([maxhyl,np.nanmax(np.abs(hy[:,0]))]) - maxcurryl = np.nanmax([maxcurryl,np.nanmax(np.abs(curr_y[:,0]))]) - if abs(yedges[0,-1] - yu) < edge_tol: - maxhyu = np.nanmax([maxhyu,np.nanmax(np.abs(hy[:,-1]))]) - maxcurryu = np.nanmax([maxcurryu,np.nanmax(np.abs(curr_y[:,-1]))]) - - return ({'max_normal_fluxes':{'W': maxhxl, - 'E': maxhxu, - 'N': maxhyu, - 'S': maxhyl}, - 'max_normal_currents':{'W': maxcurrxl, - 'E': maxcurrxu, - 'N': maxcurryu, - 'S': maxcurryl}, + topo = eta - s.q[0, :, :] + maxhx_overall = np.nanmax([np.abs(hx).max(), maxhx_overall]) + maxhy_overall = np.nanmax([np.abs(hy).max(), maxhy_overall]) + maxcurrx_overall = np.nanmax( + [np.nanmax(np.abs(curr_x)), maxcurrx_overall]) + maxcurry_overall = np.nanmax( + [np.nanmax(np.abs(curr_y)), maxcurry_overall]) + + if abs(xedges[0, 0] - xl) < edge_tol: + maxhxl = np.nanmax([maxhxl, np.nanmax(np.abs(hx[0, :]))]) + maxcurrxl = np.nanmax( + [maxcurrxl, np.nanmax(np.abs(curr_x[0, :]))]) + if abs(xedges[-1, 0] - xu) < edge_tol: + maxhxu = np.nanmax([maxhxu, np.nanmax(np.abs(hx[-1, :]))]) + maxcurrxu = np.nanmax( + [maxcurrxu, np.nanmax(np.abs(curr_x[-1, :]))]) + if abs(yedges[0, 0] - yl) < edge_tol: + maxhyl = np.nanmax([maxhyl, np.nanmax(np.abs(hy[:, 0]))]) + maxcurryl = np.nanmax( + [maxcurryl, np.nanmax(np.abs(curr_y[:, 0]))]) + if abs(yedges[0, -1] - yu) < edge_tol: + maxhyu = np.nanmax([maxhyu, np.nanmax(np.abs(hy[:, -1]))]) + maxcurryu = np.nanmax( + [maxcurryu, np.nanmax(np.abs(curr_y[:, -1]))]) + + return ({'max_normal_fluxes': {'W': maxhxl, + 'E': maxhxu, + 'N': maxhyu, + 'S': maxhyl}, + 'max_normal_currents': {'W': maxcurrxl, + 'E': maxcurrxu, + 'N': maxcurryu, + 'S': maxcurryl}, 'domain_maxs': {'hu': maxhx_overall, 'hv': maxhy_overall, 'u': maxcurrx_overall, - 'v': maxcurry_overall}}) \ No newline at end of file + 'v': maxcurry_overall}}) diff --git a/src/python/geoclaw/surge/storm.py b/src/python/geoclaw/surge/storm.py index 64f8e868d..912d6a388 100644 --- a/src/python/geoclaw/surge/storm.py +++ b/src/python/geoclaw/surge/storm.py @@ -37,10 +37,10 @@ import argparse import datetime -import numpy +import numpy as np +import pandas as pd import clawpack.geoclaw.units as units -import clawpack.clawutil.data # ============================================================================= # Common acronyms across formats @@ -61,12 +61,12 @@ TCVitals_Basins = {"L": "North Atlantic", "E": "North East Pacific", "C": "North Central Pacific", - "W": "North West Pacific", - "B": "Bay of Bengal (North Indian Ocean)", - "A": "Arabian Sea (North Indian Ocean)", - "Q": "South Atlantic", - "P": "South Pacific", - "S": "South Indian Ocean"} + "W": "North West Pacific", + "B": "Bay of Bengal (North Indian Ocean)", + "A": "Arabian Sea (North Indian Ocean)", + "Q": "South Atlantic", + "P": "South Pacific", + "S": "South Indian Ocean"} # Tropical Cyclone Designations # see https://www.nrlmry.navy.mil/atcf_web/docs/database/new/abrdeck.html @@ -113,6 +113,7 @@ missing_necessary_data_warning_str = ("No storm points in the input file" + "had both a max wind speed and a central pressure observation.") + class NoDataError(ValueError): """Exception to raise when no valid data in input file""" pass @@ -136,25 +137,26 @@ class Storm(object): *TODO:* Add description of unit handling :Attributes: - - *t* (list(datetime.datetiem)) Contains the time at which each entry of - the other arrays are at. These are expected to be *datetime* objects. - Note that when written some formats require a *time_offset* to be set. - - *eye_location* (ndarray(:, :)) location of the eye of the storm. - Default units are in signed decimcal longitude and latitude. + - *t* (list(float) or list(datetime.datetiem)) Contains the time at which + each entry of the other arrays are at. These are expected to + be *datetime* objects. Note that when written some formats require + a *time_offset* to be set. + - *eye_location* (ndarray(:, :)) location of the eye of the storm. Default + units are in signed decimal longitude and latitude. - *max_wind_speed* (ndarray(:)) Maximum wind speed. Default units are meters/second. - *max_wind_radius* (ndarray(:)) Radius at which the maximum wind speed occurs. Default units are meters. - - *central_pressure* (ndarray(:)) Central pressure of storm. Default - units are Pascals. + - *central_pressure* (ndarray(:)) Central pressure of storm. Default units + are Pascals. - *storm_radius* (ndarray(:)) Radius of storm, often defined as the last closed iso-bar of pressure. Default units are meters. - *time_offset* (datetime.datetime) A date time that as an offset for the - simulation time. This will default to the beginning of the first of the - year that the first time point is found in. + simulation time. This will default to the beginning of the first of + the year that the first time point is found in. - *wind_speeds* (ndarray(:, :)) Wind speeds defined in every record, such - as 34kt, 50kt, 64kt, etc and their radii. Default units are meters/second - and meters. + as 34kt, 50kt, 64kt, etc and their radii. Default units are + meters/second and meters. :Initialization: 1. Read in existing file at *path*. @@ -194,7 +196,7 @@ def __init__(self, path=None, file_format="ATCF", **kwargs): self.wind_speeds = None # Storm descriptions - not all formats provide these - self.name = None + self.name = None # Possibly a list of a storm's names self.basin = None # Basin containing storm self.ID = None # ID code - depends on format self.classification = None # Classification of storm (e.g. HU) @@ -207,10 +209,12 @@ def __init__(self, path=None, file_format="ATCF", **kwargs): # Basic object support def __str__(self): r"""""" - output = "Name: %s" % self.name - output = "\n".join((output, "Dates: %s - %s" % (self.t[0].isoformat(), - self.t[-1].isoformat()) - )) + output = f"Name: {self.name}\n" + if isinstance(self.t[0], datetime.datetime): + output += f"Dates: {self.t[0].isoformat()}" + output += f" - {self.t[-1].isoformat()}" + else: + output += f"Dates: {self.t[0]} - {self.t[-1]}" return output def __repr__(self): @@ -270,15 +274,20 @@ def read_geoclaw(self, path, verbose=False): - *verbose* (bool) Output more info regarding reading. """ + # Attempt to get name from file if is follows the convention name.storm + if ".storm" in os.path.splitext(path): + self.name = os.path.splitext(os.path.basename(path))[0] + + # Read header with open(path, 'r') as data_file: num_casts = int(data_file.readline()) self.time_offset = datetime.datetime.strptime( - data_file.readline()[:19], - '%Y-%m-%dT%H:%M:%S') - - data = numpy.loadtxt(path, skiprows=3) + data_file.readline()[:19], + '%Y-%m-%dT%H:%M:%S') + # Read rest of data + data = np.loadtxt(path, skiprows=3) num_forecasts = data.shape[0] - self.eye_location = numpy.empty((2, num_forecasts)) + self.eye_location = np.empty((2, num_forecasts)) assert(num_casts == num_forecasts) self.t = [self.time_offset + datetime.timedelta(seconds=data[i, 0]) for i in range(num_forecasts)] @@ -299,70 +308,85 @@ def read_atcf(self, path, verbose=False): - *path* (string) Path to the file to be read. - *verbose* (bool) Output more info regarding reading. """ - try: - import pandas as pd - except ImportError as e: - print("read_atcf currently requires pandas to work.") - raise e # See here for the ATCF format documentation: # https://www.nrlmry.navy.mil/atcf_web/docs/database/new/abdeck.txt + + # Slightly more robust converter for ATCF data fields that can be + # missing + def num_converter(x): + if isinstance(x, str): + if len(x.strip()) == 0: + # Only whitespace + return np.nan + else: + # Assume this is still a number + return float(x) + elif x is None: + return np.nan + return float(x) + df = pd.read_csv(path, engine="python", sep=",+", names=[ - "BASIN", "CY", "YYYYMMDDHH", "TECHNUM", "TECH", "TAU", - "LAT", "LON", "VMAX", "MSLP", "TY", - "RAD", "WINDCODE", "RAD1", "RAD2", "RAD3", "RAD4", - "POUTER", "ROUTER", "RMW", "GUSTS", "EYE", "SUBREGION", - "MAXSEAS", "INITIALS", "DIR", "SPEED", "STORMNAME", "DEPTH", - "SEAS", "SEASCODE", "SEAS1", "SEAS2", "SEAS3", "SEAS4", - "USERDEFINE1", "userdata1", - "USERDEFINE2", "userdata2", - "USERDEFINE3", "userdata3", - "USERDEFINE4", "userdata4", - "USERDEFINE5", "userdata5", - ], + "BASIN", "CY", "YYYYMMDDHH", "TECHNUM", "TECH", "TAU", + "LAT", "LON", "VMAX", "MSLP", "TY", + "RAD", "WINDCODE", "RAD1", "RAD2", "RAD3", "RAD4", + "POUTER", "ROUTER", "RMW", "GUSTS", "EYE", "SUBREGION", + "MAXSEAS", "INITIALS", "DIR", "SPEED", "STORMNAME", "DEPTH", + "SEAS", "SEASCODE", "SEAS1", "SEAS2", "SEAS3", "SEAS4", + "USERDEFINE1", "userdata1", + "USERDEFINE2", "userdata2", + "USERDEFINE3", "userdata3", + "USERDEFINE4", "userdata4", + "USERDEFINE5", "userdata5", + ], converters={ "YYYYMMDDHH": lambda d: datetime.datetime( int(d[1:5]), int(d[5:7]), int(d[7:9]), int(d[9:11])), "TAU": lambda d: datetime.timedelta(hours=int(d)), "LAT": lambda d: (-.1 if d[-1] == "S" else .1) * int(d.strip("NS ")), "LON": lambda d: (-.1 if d[-1] == "W" else .1) * int(d.strip("WE ")), - }, + "RAD": num_converter, + "RAD": num_converter, + "RAD1": num_converter, + "RAD2": num_converter, + "RAD3": num_converter, + "RAD4": num_converter, + "ROUTER": num_converter, + "RMW": num_converter, + "STORMNAME": lambda d: (d.strip() if isinstance(d, str) else d) + }, dtype={ "BASIN": str, "CY": int, "VMAX": float, "MSLP": float, - "TY": str, - "RAD": float, - "RAD1": float, - "RAD2": float, - "RAD3": float, - "RAD4": float, - "ROUTER": float, - "RMW": float, - }) + "TY": str + }) # Grab data regarding basin and cyclone number from first row self.basin = ATCF_basins[df["BASIN"][0]] self.ID = df["CY"][0] + # Keep around the name as an array + self.name = df["STORMNAME"].to_numpy() + # Take forecast period TAU into consideration df['DATE'] = df["YYYYMMDDHH"] + df["TAU"] df = df[["DATE", "TAU", "TY", "LAT", "LON", "VMAX", "MSLP", - "ROUTER", "RMW", "RAD", "RAD1", "RAD2", "RAD3", "RAD4",]] + "ROUTER", "RMW", "RAD", "RAD1", "RAD2", "RAD3", "RAD4", ]] df = df.sort_values(by=["DATE", "TAU"]).reset_index(drop=True) - # For each DATE, choose best (smallest TAU) available data for c in ["LAT", "LON", "VMAX", "MSLP", "ROUTER", "RMW", "RAD", "RAD1", "RAD2", "RAD3", "RAD4"]: - df[c] = df[c].where(df[c] != 0, numpy.nan) # value 0 means NaN + df[c] = df[c].where(df[c] != 0, np.nan) # value 0 means NaN df[c] = df.groupby("DATE")[c].bfill() df = df.groupby("DATE").first() # Wind profile (occasionally missing for older ATCF storms) # Wind speeds and their radii - df["RAD_MEAN"] = df[["RAD1", "RAD2", "RAD3", "RAD4"]].mean(axis=1, skipna=True) + df["RAD_MEAN"] = df[["RAD1", "RAD2", "RAD3", "RAD4"]].mean( + axis=1, skipna=True) df = df.drop(["TAU", "RAD1", "RAD2", "RAD3", "RAD4"], axis=1) df = df.dropna(how="any", subset=["LAT", "LON"]) @@ -380,25 +404,17 @@ def read_atcf(self, path, verbose=False): # max_wind_radius - convert from nm to m - 1.8520000031807990 * 1000.0 # central_pressure - convert from mbar to Pa - 100.0 # Radius of last isobar contour - convert from nm to m - 1.852000003180799d0 * 1000.0 - self.max_wind_speed = units.convert(df["VMAX"].to_numpy(), 'knots', 'm/s') - self.central_pressure = units.convert(df["MSLP"].to_numpy(), 'mbar', 'Pa') + self.max_wind_speed = units.convert( + df["VMAX"].to_numpy(), 'knots', 'm/s') + self.central_pressure = units.convert( + df["MSLP"].to_numpy(), 'mbar', 'Pa') self.max_wind_radius = units.convert(df["RMW"].to_numpy(), 'nmi', 'm') self.storm_radius = units.convert(df["ROUTER"].to_numpy(), 'nmi', 'm') - self.wind_speeds = df[["RAD","RAD_MEAN"]].to_numpy() - self.wind_speeds[:,0] = units.convert(self.wind_speeds[:,0], 'knots', 'm/s') - self.wind_speeds[:,1] = units.convert(self.wind_speeds[:,1], 'nmi', 'm') - - # Set NaNs to -1 to mark them as missing - for ar in [self.max_wind_speed, self.central_pressure, - self.max_wind_radius, self.storm_radius, self.wind_speeds]: - ar[numpy.isnan(ar)] = -1. - - if self.max_wind_speed.min() == -1: - warnings.warn('Some timesteps have missing max wind speed. These will not be written' - ' out to geoclaw format.') - if self.central_pressure.min() == -1: - warnings.warn('Some timesteps have missing central pressure. These will not be written' - ' out to geoclaw format.') + self.wind_speeds = df[["RAD", "RAD_MEAN"]].to_numpy() + self.wind_speeds[:, 0] = units.convert( + self.wind_speeds[:, 0], 'knots', 'm/s') + self.wind_speeds[:, 1] = units.convert( + self.wind_speeds[:, 1], 'nmi', 'm') # Set time offset to first time as a default self.time_offset = self.t[0] @@ -440,13 +456,13 @@ def read_hurdat(self, path, verbose=False): # Parse data block self.t = [] - self.event = numpy.empty(num_lines, dtype=str) - self.classification = numpy.empty(num_lines, dtype=str) - self.eye_location = numpy.empty((num_lines, 2)) - self.max_wind_speed = numpy.empty(num_lines) - self.central_pressure = numpy.empty(num_lines) - self.max_wind_radius = numpy.empty(num_lines) - self.storm_radius = numpy.empty(num_lines) + self.event = np.empty(num_lines, dtype=str) + self.classification = np.empty(num_lines, dtype=str) + self.eye_location = np.empty((num_lines, 2)) + self.max_wind_speed = np.empty(num_lines) + self.central_pressure = np.empty(num_lines) + self.max_wind_radius = np.empty(num_lines) + self.storm_radius = np.empty(num_lines) for (i, line) in enumerate(data_block): if len(line) == 0: @@ -483,28 +499,30 @@ def read_hurdat(self, path, verbose=False): # Intensity information - radii are not included directly in this # format and instead radii of winds above a threshold are included - self.max_wind_speed[i] = units.convert(float(data[6]), 'knots', 'm/s') - self.central_pressure[i] = units.convert(float(data[7]), 'mbar', 'Pa') + self.max_wind_speed[i] = units.convert( + float(data[6]), 'knots', 'm/s') + self.central_pressure[i] = units.convert( + float(data[7]), 'mbar', 'Pa') warnings.warn(missing_data_warning_str) self.max_wind_radius[i] = -1 self.storm_radius[i] = -1 def read_ibtracs(self, path, sid=None, storm_name=None, year=None, start_date=None, - agency_pref = ['wmo', - 'usa', - 'tokyo', - 'newdelhi', - 'reunion', - 'bom', - 'nadi', - 'wellington', - 'cma', - 'hko', - 'ds824', - 'td9636', - 'td9635', - 'neumann', - 'mlc']): + agency_pref=['wmo', + 'usa', + 'tokyo', + 'newdelhi', + 'reunion', + 'bom', + 'nadi', + 'wellington', + 'cma', + 'hko', + 'ds824', + 'td9636', + 'td9635', + 'neumann', + 'mlc']): r"""Read in IBTrACS formatted storm file This reads in the netcdf-formatted IBTrACS v4 data. You must either pass @@ -546,7 +564,8 @@ def read_ibtracs(self, path, sid=None, storm_name=None, year=None, start_date=No # only allow one method for specifying storms if (sid is not None) and ((storm_name is not None) or (year is not None)): - raise ValueError('Cannot specify both *sid* and *storm_name* or *year*.') + raise ValueError( + 'Cannot specify both *sid* and *storm_name* or *year*.') with xr.open_dataset(path) as ds: @@ -557,7 +576,7 @@ def read_ibtracs(self, path, sid=None, storm_name=None, year=None, start_date=No else: storm_name = storm_name.upper() # in case storm is unnamed - if storm_name.upper() in ['UNNAMED','NO-NAME']: + if storm_name.upper() in ['UNNAMED', 'NO-NAME']: storm_name = 'NOT_NAMED' storm_match = (ds.name == storm_name.encode()) year_match = (ds.time.dt.year == year).any(dim='date_time') @@ -570,10 +589,11 @@ def read_ibtracs(self, path, sid=None, storm_name=None, year=None, start_date=No raise ValueError('Storm/year not found in provided file') else: # see if a date was provided for multiple unnamed storms - assert start_date is not None, ValueError('Multiple storms identified and no start_date specified.') + assert start_date is not None, ValueError( + 'Multiple storms identified and no start_date specified.') start_times = ds.time.isel(date_time=0) - start_date = numpy.datetime64(start_date) + start_date = np.datetime64(start_date) # find storm with start date closest to provided storm_ix = abs(start_times - start_date).argmin() @@ -586,36 +606,35 @@ def read_ibtracs(self, path, sid=None, storm_name=None, year=None, start_date=No raise ValueError('No valid wind speeds found for this storm.') ds = ds.sel(date_time=valid_t) - # list of the agencies that correspond to 'usa_*' variables usa_agencies = [b'atcf', b'hurdat_atl', b'hurdat_epa', b'jtwc_ep', - b'nhc_working_bt', b'tcvightals', b'tcvitals'] + b'nhc_working_bt', b'tcvightals', b'tcvitals'] - - ## Create mapping from wmo_ or usa_agency - ## to the appropriate variable - agency_map = {b'':agency_pref.index('wmo')} + # Create mapping from wmo_ or usa_agency + # to the appropriate variable + agency_map = {b'': agency_pref.index('wmo')} # account for multiple usa agencies for a in usa_agencies: agency_map[a] = agency_pref.index('usa') # map all other agencies to themselves - for i in [a for a in agency_pref if a not in ['wmo','usa']]: + for i in [a for a in agency_pref if a not in ['wmo', 'usa']]: agency_map[i.encode('utf-8')] = agency_pref.index(i) # fill in usa as provider if usa_agency is # non-null when wmo_agency is null - provider = ds.wmo_agency.where(ds.wmo_agency!=b'',ds.usa_agency) + provider = ds.wmo_agency.where(ds.wmo_agency != b'', ds.usa_agency) # get index into from agency that is wmo_provider def map_val_to_ix(a): - func = lambda x: agency_map[x] - return xr.apply_ufunc(func,a, vectorize=True) - pref_agency_ix=map_val_to_ix(provider) + def func(x): return agency_map[x] + return xr.apply_ufunc(func, a, vectorize=True) + pref_agency_ix = map_val_to_ix(provider) - ## GET MAX WIND SPEED and PRES + # GET MAX WIND SPEED and PRES pref_vals = {} - for v in ['wind','pres']: - all_vals = ds[['{}_{}'.format(i,v) for i in agency_pref]].to_array(dim='agency') + for v in ['wind', 'pres']: + all_vals = ds[['{}_{}'.format(i, v) for i in agency_pref]].to_array( + dim='agency') # get wmo value val_pref = ds['wmo_'+v] @@ -633,29 +652,26 @@ def map_val_to_ix(a): # add to dict pref_vals[v] = val_pref - - ## THESE CANNOT BE MISSING SO DROP - ## IF EITHER MISSING + # THESE CANNOT BE MISSING SO DROP + # IF EITHER MISSING valid = pref_vals['wind'].notnull() & pref_vals['pres'].notnull() if not valid.any(): raise NoDataError(missing_necessary_data_warning_str) ds = ds.sel(date_time=valid) - for i in ['wind','pres']: + for i in ['wind', 'pres']: pref_vals[i] = pref_vals[i].sel(date_time=valid) - - ## GET RMW and ROCI - ## (these can be missing) - for r in ['rmw','roci']: - order = ['{}_{}'.format(i,r) for i in agency_pref if - '{}_{}'.format(i,r) in ds.data_vars.keys()] + # GET RMW and ROCI + # (these can be missing) + for r in ['rmw', 'roci']: + order = ['{}_{}'.format(i, r) for i in agency_pref if + '{}_{}'.format(i, r) in ds.data_vars.keys()] vals = ds[order].to_array(dim='agency') best_ix = vals.notnull().argmax(dim='agency') val_pref = vals.isel(agency=best_ix) pref_vals[r] = val_pref - - ## CONVERT TO GEOCLAW FORMAT + # CONVERT TO GEOCLAW FORMAT # assign basin to be the basin where track originates # in case track moves across basins @@ -668,36 +684,40 @@ def map_val_to_ix(a): for d in ds.time: print(d) t = d.dt - self.t.append(datetime.datetime(t.year,t.month,t.day,t.hour,t.minute,t.second)) + self.t.append(datetime.datetime(t.year, t.month, + t.day, t.hour, t.minute, t.second)) - ## events + # events self.event = ds.usa_record.values.astype(str) - ## time offset - if (self.event=='L').any(): + # time offset + if (self.event == 'L').any(): # if landfall, use last landfall - self.time_offset = numpy.array(self.t)[self.event=='L'][-1] + self.time_offset = np.array(self.t)[self.event == 'L'][-1] else: - #if no landfall, use last time of storm + # if no landfall, use last time of storm self.time_offset = self.t[-1] # Classification, note that this is not the category of the storm self.classification = ds.usa_status.values - self.eye_location = numpy.array([ds.lon,ds.lat]).T + self.eye_location = np.array([ds.lon, ds.lat]).T # Intensity information - for now, including only common, basic intensity # info. # TODO: add more detailed info for storms that have it - self.max_wind_speed = units.convert(pref_vals['wind'],'knots','m/s').where(pref_vals['wind'].notnull(),-1).values - self.central_pressure = units.convert(pref_vals['pres'],'mbar','Pa').where(pref_vals['pres'].notnull(),-1).values - self.max_wind_radius = units.convert(pref_vals['rmw'],'nmi','m').where(pref_vals['rmw'].notnull(),-1).values - self.storm_radius = units.convert(pref_vals['roci'],'nmi','m').where(pref_vals['roci'].notnull(),-1).values + self.max_wind_speed = units.convert( + pref_vals['wind'], 'knots', 'm/s').where(pref_vals['wind'].notnull(), -1).values + self.central_pressure = units.convert(pref_vals['pres'], 'mbar', 'Pa').where( + pref_vals['pres'].notnull(), -1).values + self.max_wind_radius = units.convert(pref_vals['rmw'], 'nmi', 'm').where( + pref_vals['rmw'].notnull(), -1).values + self.storm_radius = units.convert(pref_vals['roci'], 'nmi', 'm').where( + pref_vals['roci'].notnull(), -1).values # warn if you have missing vals for RMW or ROCI if (self.max_wind_radius.max()) == -1 or (self.storm_radius.max() == -1): warnings.warn(missing_data_warning_str) - def read_jma(self, path, verbose=False): r"""Read in JMA formatted storm file @@ -730,13 +750,13 @@ def read_jma(self, path, verbose=False): # Parse data block self.t = [] - self.event = numpy.empty(num_lines, dtype=str) - self.classification = numpy.empty(num_lines, dtype=str) - self.eye_location = numpy.empty((num_lines, 2)) - self.max_wind_speed = numpy.empty(num_lines) - self.central_pressure = numpy.empty(num_lines) - self.max_wind_radius = numpy.empty(num_lines) - self.storm_radius = numpy.empty(num_lines) + self.event = np.empty(num_lines, dtype=str) + self.classification = np.empty(num_lines, dtype=str) + self.eye_location = np.empty((num_lines, 2)) + self.max_wind_speed = np.empty(num_lines) + self.central_pressure = np.empty(num_lines) + self.max_wind_radius = np.empty(num_lines) + self.storm_radius = np.empty(num_lines) for (i, line) in enumerate(data_block): if len(line) == 0: break @@ -758,13 +778,14 @@ def read_jma(self, path, verbose=False): # Intensity information - current the radii are not directly given # Available data includes max/min of radius of winds of 50 and # 30 kts instead - self.central_pressure[i] = units.convert(float(data[5]), 'hPa', 'Pa') - self.max_wind_speed[i] = units.convert(float(data[6]), 'knots', 'm/s') + self.central_pressure[i] = units.convert( + float(data[5]), 'hPa', 'Pa') + self.max_wind_speed[i] = units.convert( + float(data[6]), 'knots', 'm/s') warnings.warn(missing_data_warning_str) self.max_wind_radius[i] = -1 self.storm_radius[i] = -1 - def read_imd(self, path, verbose=False): r"""Extract relevant hurricane data from IMD file and update storm fields with proper values. @@ -778,7 +799,6 @@ def read_imd(self, path, verbose=False): "implemented yet but is planned for a ", "future release.")) - def read_tcvitals(self, path, verbose=False): r"""Extract relevant hurricane data from TCVITALS file and update storm fields with proper values. @@ -805,12 +825,12 @@ def read_tcvitals(self, path, verbose=False): # Central_pressure - convert from mbar to Pa - 100.0 # Radius of last isobar contour - convert from km to m - 1000.0 self.t = [] - self.classification = numpy.empty(num_lines, dtype=str) - self.eye_location = numpy.empty((num_lines, 2)) - self.max_wind_speed = numpy.empty(num_lines) - self.central_pressure = numpy.empty(num_lines) - self.max_wind_radius = numpy.empty(num_lines) - self.storm_radius = numpy.empty(num_lines) + self.classification = np.empty(num_lines, dtype=str) + self.eye_location = np.empty((num_lines, 2)) + self.max_wind_speed = np.empty(num_lines) + self.central_pressure = np.empty(num_lines) + self.max_wind_radius = np.empty(num_lines) + self.storm_radius = np.empty(num_lines) for (i, data) in enumerate(data_block): # End at an empty lines - skips lines at the bottom of a file @@ -840,13 +860,14 @@ def read_tcvitals(self, path, verbose=False): # Intensity Information self.max_wind_speed[i] = float(data[12]) - self.central_pressure[i] = units.convert(float(data[9]), 'mbar', 'Pa') + self.central_pressure[i] = units.convert( + float(data[9]), 'mbar', 'Pa') self.max_wind_radius[i] = units.convert(float(data[13]), 'km', 'm') self.storm_radius[i] = units.convert(float(data[11]), 'km', 'm') - # ========================================================================= # Write Routines + def write(self, path, file_format="geoclaw", **kwargs): r"""Write out the storm data to *path* in format *file_format* @@ -869,105 +890,120 @@ def write(self, path, file_format="geoclaw", **kwargs): getattr(self, 'write_%s' % file_format.lower())(path, **kwargs) - def write_geoclaw(self, path, verbose=False, max_wind_radius_fill=None, - storm_radius_fill=None): + def write_geoclaw(self, path, force=False, skip=True, verbose=False, + fill_dict={}, **kwargs): r"""Write out a GeoClaw formatted storm file GeoClaw storm files are read in by the GeoClaw Fortran code. :Input: - *path* (string) Path to the file to be written. + - *skip* (bool) Skip a time if NaNs are found and are not replaced. + Default is `True`. + - *force* (bool) Force output of storm even if there is missing data. + Default is `False`. - *verbose* (bool) Print out additional information when writing. - - *max_wind_radius_fill* (func) Function that can be used to fill in - missing data for `max_wind_radius` values. This defaults to simply - setting the value to -1. The function signature should be - `max_wind_radius(t, storm)` where t is the time of the forecast and - `storm` is the storm object. Note that if this or `storm_radius` - field remains -1 that this data line will be assumed to be redundant - and not be written out. - - *storm_radius_fill* (func) Function that can be used to fill in - missing data for `storm_radius` values. This defaults to simply - setting the value to -1. The function signature should be - `storm_radius(t, storm)` where t is the time of the forecast and - `storm` is the storm object. Note that if this or `max_wind_radius` - field remains -1 that this data line will be assumed to be redundant - and not be written + Default is `False`. + - *fill_dict* (dict) Dictionary of functions to use to fill in missing + data represented by NaNs. The keys are the field to be filled and + the function signature should be `my_func(t, storm)` where t is the + time of the forecast and `storm` is the storm object. If the + field remains a NaN or a function is not provided these lines will + be assumed redundant and will be ommitted. Note that the older + keyword arguments are put in this dictionary. Currently the one + default function is for `storm_radius`, which sets the value to + 500 km. """ - if max_wind_radius_fill is None: - max_wind_radius_fill = lambda t, storm: -1 - if storm_radius_fill is None: - storm_radius_fill = lambda t, storm: -1 - - # Create list for output - # Leave this first line blank as we need to count the actual valid lines - # that will be left in the file below + # If a filling function is not provided we will provide some defaults + fill_dict.update({"storm_radius": lambda t, storm: 500e3}) + # Handle older interface that had specific fill functions + if "max_wind_radius_fill" in kwargs.keys(): + fill_dict.update( + {"max_wind_radius": kwargs['max_wind_radius_fill']}) + if "storm_radius_fill" in kwargs.keys(): + fill_dict.update({"storm_radius": kwargs['storm_radius_fill']}) + + # Loop through each line of data and if the line is valid, perform the + # necessary work to write it out. Otherwise either raise an exception + # or skip it num_casts = 0 - data_string = [""] - if self.time_offset is None: - data_string.append("None\n\n") - self.time_offset = self.t[0] - else: - data_string.append("%s\n\n" % self.time_offset.isoformat()) + data = [] for n in range(len(self.t)): - # Remove duplicate times - if n > 0: - if self.t[n] == self.t[n - 1]: - continue - - format_string = ("{:19,.8e} " * 7)[:-1] + "\n" - data = [] - if not isinstance(self.time_offset, float): - data.append((self.t[n] - self.time_offset).total_seconds()) - else: - data.append(self.t[n] - self.time_offset) - data.append(self.eye_location[n, 0]) - data.append(self.eye_location[n, 1]) - - if self.max_wind_speed[n] == -1: + if self.t[n] == self.t[n - 1]: + # Skip this time continue - data.append(self.max_wind_speed[n]) - - # Allow custom function to set max wind radius if not - # available - if self.max_wind_radius[n] == -1: - new_wind_radius = max_wind_radius_fill(self.t[n], self) - if new_wind_radius == -1: - continue - else: - data.append(new_wind_radius) - else: - data.append(self.max_wind_radius[n]) - if self.central_pressure[n] == -1: + # Check each value we need for this time to make sure it is valid + valid = True + for name in ["max_wind_speed", "central_pressure", + "max_wind_radius", "storm_radius"]: + if np.isnan(getattr(self, name)[n]): + if name in fill_dict.keys(): + # Fill value with function provided + getattr(self, name)[n] = fill_dict[name]( + self.t[n], self) + elif skip: + # Skip this line + valid = False + if verbose: + # Just warn that a NaN was found but continue + msg = ("*** WARNING: The value {} at {} is a " + + "NaN. Skipping this line.") + warnings.warn(msg.format(name, self.t[n])) + elif not force: + # If we are not asked to force to write raise an + # exception given the NaN + msg = ("The value {} at {} is a NaN and the storm " + + "will not be written in GeoClaw format. If " + + "you want to fill in the value provide a " + + "function or set `force=True`.") + raise ValueError(msg.format(name, self.t[n])) + if not valid: continue - data.append(self.central_pressure[n]) - # Allow custom function to set storm radius if not available - if self.storm_radius[n] == -1: - new_storm_radius = storm_radius_fill(self.t[n], self) - if new_storm_radius == -1: - continue - else: - data.append(new_storm_radius) - else: - data.append(self.storm_radius[n]) - - data_string.append(format_string.format(*data)) + # Succeeded, add this time to the output num_casts += 1 + data.append(np.empty(7)) + # If we do not have a time offset use the first valid row as the + # offset time + if self.time_offset is None: + self.time_offset = self.t[n] - # Write to actual file now that we know exactly how many lines it will - # contain + # Time + if not isinstance(self.time_offset, float): + data[-1][0] = (self.t[n] - self.time_offset).total_seconds() + else: + data[-1][0] = self.t[n] - self.time_offset + # Eye-location + data[-1][1:3] = self.eye_location[n, :] + # Max wind speed + data[-1][3] = self.max_wind_speed[n] + # Max wind radius + data[-1][4] = self.max_wind_radius[n] + # Central pressure + data[-1][5] = self.central_pressure[n] + # Outer storm radius + data[-1][6] = self.storm_radius[n] + + # Write out file + format_string = ("{:19,.8e} " * 7)[:-1] + "\n" try: - # Update number of forecasts here - data_string[0] = "%s\n" % num_casts with open(path, "w") as data_file: - for data_line in data_string: - data_file.write(data_line) + # Write header + data_file.write(f"{num_casts}\n") + if isinstance(self.time_offset, datetime.datetime): + data_file.write(f"{self.time_offset.isoformat()}\n\n") + else: + data_file.write(f"{str(self.time_offset)}\n\n") + + # Write data lines + for line in data: + data_file.write(format_string.format(*line)) except Exception as e: - # Remove possibly partially generated file if not successful + # If an exception occurs clean up a partially generated file if os.path.exists(path): os.remove(path) raise e @@ -1073,6 +1109,7 @@ def write_jma(self, path, verbose=False): """ raise NotImplementedError(("Writing out JMA files is not implemented ", "yet but is planned for a future release.")) + # try: # with open(path, 'w') as data_file: # for n in range(self.t.shape[0]): @@ -1124,7 +1161,7 @@ def write_tcvitals(self, path, verbose=False): # ========================================================================= # Other Useful Routines - + def category(self, categorization="NHC", cat_names=False): r"""Categorizes storm based on relevant storm data @@ -1149,7 +1186,7 @@ def category(self, categorization="NHC", cat_names=False): if categorization.upper() == "BEAUFORT": # Beaufort scale below uses knots speeds = units.convert(self.max_wind_speed, "m/s", "knots") - category = (numpy.zeros(speeds.shape) + + category = (np.zeros(speeds.shape) + (speeds >= 1) * (speeds < 4) * 1 + (speeds >= 4) * (speeds < 7) * 2 + (speeds >= 7) * (speeds < 11) * 3 + @@ -1162,16 +1199,16 @@ def category(self, categorization="NHC", cat_names=False): (speeds >= 48) * (speeds < 56) * 10 + (speeds >= 56) * (speeds < 64) * 11 + (speeds >= 64) * 12) - cat_map = { 0: "Calm", - 1: "Light air", - 2: "Light breeze", - 3: "Gentle breeze", - 4: "Moderate breeze", - 5: "Fresh breeze", - 6: "Strong breeze", - 7: "High wind", - 8: "Gale", - 9: "Strong gale", + cat_map = {0: "Calm", + 1: "Light air", + 2: "Light breeze", + 3: "Gentle breeze", + 4: "Moderate breeze", + 5: "Fresh breeze", + 6: "Strong breeze", + 7: "High wind", + 8: "Gale", + 9: "Strong gale", 10: "Whole gale", 11: "Violent storm", 12: "Hurricane"} @@ -1181,7 +1218,7 @@ def category(self, categorization="NHC", cat_names=False): # Definitely not in the correct format now # TODO: Add TD and TS designations speeds = units.convert(self.max_wind_speed, "m/s", "knots") - category = (numpy.zeros(speeds.shape) + + category = (np.zeros(speeds.shape) + (speeds < 30) * -1 + (speeds >= 64) * (speeds < 83) * 1 + (speeds >= 83) * (speeds < 96) * 2 + @@ -1189,12 +1226,12 @@ def category(self, categorization="NHC", cat_names=False): (speeds >= 113) * (speeds < 135) * 4 + (speeds >= 135) * 5) cat_map = {-1: "Tropical Depression", - 0: "Tropical Storm", - 1: "Category 1 Hurricane", - 2: "Category 2 Hurricane", - 3: "Category 3 Hurricane", - 4: "Category 4 Hurricane", - 5: "Category 5 Hurricane"} + 0: "Tropical Storm", + 1: "Category 1 Hurricane", + 2: "Category 2 Hurricane", + 3: "Category 3 Hurricane", + 4: "Category 4 Hurricane", + 5: "Category 5 Hurricane"} elif categorization.upper() == "JTWC": raise NotImplementedError("JTWC categorization not implemented.") @@ -1318,17 +1355,17 @@ def fill_rad_w_other_source(t, storm_targ, storm_fill, var, interp_kwargs={}): print("fill_rad_w_other_source currently requires xarray to work.") raise e - fill_da = xr.DataArray(getattr(storm_fill,var), - coords = {'t': getattr(storm_fill,'t')}, - dims = ('t',)) + fill_da = xr.DataArray(getattr(storm_fill, var), + coords={'t': getattr(storm_fill, 't')}, + dims=('t',)) # convert -1 to nan - fill_da = fill_da.where(fill_da>0,numpy.nan) + fill_da = fill_da.where(fill_da > 0, np.nan) # if not all missing, try using storm_fill to fill if fill_da.notnull().any(): - #remove duplicates + # remove duplicates fill_da = fill_da.groupby('t').first() # remove NaNs @@ -1339,19 +1376,19 @@ def fill_rad_w_other_source(t, storm_targ, storm_fill, var, interp_kwargs={}): # try replacing with storm_fill # (assuming atcf has more data points than ibtracs) - if not numpy.isnan(fill_interp): + if not np.isnan(fill_interp): return fill_interp # next, try just interpolating other ibtracs values - targ_da = xr.DataArray(getattr(storm_targ,var), - coords = {'t': getattr(storm_targ,'t')}, - dims = ('t',)) - targ_da = targ_da.where(targ_da>0,numpy.nan) + targ_da = xr.DataArray(getattr(storm_targ, var), + coords={'t': getattr(storm_targ, 't')}, + dims=('t',)) + targ_da = targ_da.where(targ_da > 0, np.nan) if targ_da.notnull().any(): targ_da = targ_da.groupby('t').first() targ_da = targ_da.dropna('t') targ_interp = targ_da.interp(t=[t], kwargs=interp_kwargs).item() - if not numpy.isnan(targ_interp): + if not np.isnan(targ_interp): return targ_interp # if nothing worked, return the missing value (-1) @@ -1399,19 +1436,23 @@ def make_multi_structure(path): fileWrite.writelines(line) else: fileWrite.close() - stormDict[curTime].update({curTrack: Storm(path=os.path.join(os.path.expandvars(os.getcwd()), "Clipped_ATCFs", curTime, curTrack), file_format="ATCF")}) + stormDict[curTime].update({curTrack: Storm(path=os.path.join(os.path.expandvars( + os.getcwd()), "Clipped_ATCFs", curTime, curTrack), file_format="ATCF")}) curTrack = lineArr[4] - fileWrite = open("Clipped_ATCFs/" + curTime + "/" + curTrack, 'w') + fileWrite = open("Clipped_ATCFs/" + + curTime + "/" + curTrack, 'w') fileWrite.writelines(line) else: if curTime != "test": fileWrite.close() - stormDict[curTime].update({curTrack: Storm(path=os.path.join(os.path.expandvars(os.getcwd()), "Clipped_ATCFs", curTime, curTrack), file_format="ATCF")}) + stormDict[curTime].update({curTrack: Storm(path=os.path.join(os.path.expandvars( + os.getcwd()), "Clipped_ATCFs", curTime, curTrack), file_format="ATCF")}) curTime = lineArr[2] curTrack = lineArr[4] stormDict[curTime] = {} os.mkdir("Clipped_ATCFs/" + curTime) - fileWrite = open("Clipped_ATCFs/" + curTime + "/" + curTrack, 'w') + fileWrite = open("Clipped_ATCFs/" + + curTime + "/" + curTrack, 'w') fileWrite.writelines(line) return stormDict diff --git a/src/python/geoclaw/util.py b/src/python/geoclaw/util.py index c9be0c0de..cbb41b802 100644 --- a/src/python/geoclaw/util.py +++ b/src/python/geoclaw/util.py @@ -361,7 +361,8 @@ def get_cache_path(product): end_date.strftime(cache_date_fmt)) filename = '{}_{}_{}'.format(time_zone, datum, units) abs_cache_dir = os.path.abspath(cache_dir) - return os.path.join(abs_cache_dir, product, station, dates, filename) + return os.path.join(abs_cache_dir, product, str(station), dates, + filename) def save_to_cache(cache_path, data): # make parent directories if they do not exist