New file |
| | |
| | | # -*- coding: utf-8 -*- |
| | | """ |
| | | Created on Fri Oct 20 16:15:23 2023 |
| | | |
| | | @author: ZMK |
| | | """ |
| | | |
| | | import numpy as np |
| | | import shutil |
| | | import os |
| | | |
| | | |
| | | prefix ='C:\\Users\\ZMK\\Desktop\\xsModel2\\' |
| | | |
| | | baseModel = prefix + 'verifyModel\\' |
| | | |
| | | predictModel= prefix + 'predictModel\\' |
| | | |
| | | predictParamModel= prefix + 'predictParamModel\\' |
| | | |
| | | model_dir = prefix + '0612Model\\' |
| | | |
| | | obswellpath = prefix + 'çæµäº.ini' |
| | | |
| | | obs_well = np.loadtxt(obswellpath, dtype=str,encoding='utf-8') |
| | | |
| | | district_path = prefix +"åºå¿.ini" |
| | | |
| | | district= np.loadtxt(district_path, dtype=str,encoding='utf-8') |
| | | |
| | | pumpwellpath = prefix +'æ½æ°´äº.ini' |
| | | |
| | | pump_well = np.loadtxt(pumpwellpath, dtype=str,encoding='utf-8') |
| | | |
| | | period_path = prefix +"period.json" |
| | | |
| | | areapath = prefix + 'ååº.ini' |
| | | area_array = np.loadtxt(areapath, dtype=str,encoding='utf-8') |
| | | |
| | | #æ°´åè¡¡è·¯å¾ |
| | | water_equ_path = prefix + 'water_equ.ini' |
| | | water_equ = np.loadtxt(water_equ_path, dtype=str,encoding='utf-8') |
| | | |
| | | water_equ_path2022 = prefix + 'water_equ2022.ini' |
| | | water_equ2022 = np.loadtxt(water_equ_path2022, dtype=str,encoding='utf-8') |
| | | |
| | | |
| | | model_config ='C:\\Users\\ZMK\\Desktop\\objclipdig\\ModelFlow_xishan\\config.ini' |
| | | |
| | | model3d_path='D:/javaCode/xishan/xishan/xishan/output2/' |
| | | |
| | | modeldata_csv_path ="C:/Users/ZMK/Desktop/xsModel2/0612Model/" |
| | | |
| | | exe_path = 'C:/Users/ZMK/Desktop/objclipdig/ModelFlow_xishan/ModelFlow_xishan.exe' |
| | | |
| | | #è°å¨ exe ç¨åº |
| | | def callModelexe(): |
| | | os.system(exe_path) |
| | | |
| | | |
| | | #æ´æ°æ¨¡åçexeé
ç½® |
| | | def updateModelConfig(model_name): |
| | | conf = np.loadtxt(model_config, dtype=str,encoding='utf-8') |
| | | outpath = "outpath=" + model3d_path + model_name |
| | | csvpath = "csvpath=" + modeldata_csv_path + model_name +"/output" |
| | | conf[1]=outpath |
| | | conf[2]=csvpath |
| | | np.savetxt(model_config,conf, newline='\n', fmt='%s' , encoding='utf-8') |
| | | |
| | | |
| | | |
| | | def getPumpWellName(row,column): |
| | | |
| | | for index, r, c,ids, qu ,name in pump_well: |
| | | if r==row and c == column: |
| | | return name |
| | | |
| | | return "NONE" |
| | | |
| | | |
| | | #è·åç©éµåç»çåå
¸ç»æ |
| | | def getAreas(): |
| | | arr = np.loadtxt(areapath, dtype=int) |
| | | dict ={} |
| | | for i in range(len(arr)): |
| | | for j in range(len(arr[i])): |
| | | zb = str(arr[i][j]) |
| | | if arr[i][j] == -9999: |
| | | continue |
| | | if zb not in dict: |
| | | dict[zb] = [(i,j)] |
| | | else: |
| | | dict[zb].append((i,j)) |
| | | return dict |
| | | |
| | | |
| | | def getAreaDictFirstIndex(): |
| | | arr = np.loadtxt(areapath, dtype=int) |
| | | dict ={} |
| | | for i in range(len(arr)): |
| | | for j in range(len(arr[i])): |
| | | if arr[i][j] == -9999: |
| | | continue |
| | | if arr[i][j] not in dict: |
| | | dict[arr[i][j]] = [(i,j)] |
| | | |
| | | return dict |
| | | |
| | | |
| | | #è·ååç»å°æ åå
¸æ°æ® |
| | | def getAreaDictIndexArray(): |
| | | arr = np.loadtxt(areapath, dtype=int) |
| | | dict_array={} |
| | | for i in range(len(arr)): |
| | | for j in range(len(arr[i])): |
| | | zb= str(arr[i][j]) |
| | | if arr[i][j] == -9999: |
| | | continue |
| | | if zb not in dict_array: |
| | | array= [] |
| | | index = getCellIdByRC(i+1,j+1) |
| | | array.append(index) |
| | | dict_array[zb] = array |
| | | else: |
| | | index = getCellIdByRC(i+1,j+1) |
| | | dict_array[zb].append(index) |
| | | |
| | | return dict_array |
| | | |
| | | |
| | | def getCellIdByRC(rowVal, columnVal): |
| | | return (rowVal - 1) * 114 + columnVal - 1; |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
New file |
| | |
| | | # -*- coding: utf-8 -*- |
| | | """ |
| | | Created on Tue Oct 31 16:12:55 2023 |
| | | |
| | | @author: ZMK |
| | | """ |
| | | |
| | | import flopy |
| | | import flopy.utils.binaryfile as bf |
| | | import csv |
| | | import Base as base |
| | | import os |
| | | import json |
| | | import ModelPeriod |
| | | import numpy as np |
| | | |
| | | |
| | | |
| | | def get_model_json(model_name): |
| | | period_json="" |
| | | prediction_path = base.model_dir + model_name +"\\prediction.json" |
| | | with open(prediction_path,encoding='utf-8') as f: |
| | | period_json = json.load(f) |
| | | |
| | | return period_json; |
| | | |
| | | def get_model_period(model_name): |
| | | period_json="" |
| | | prediction_path = base.model_dir + model_name +"\\prediction.json" |
| | | with open(prediction_path,encoding='utf-8') as f: |
| | | period_json = json.load(f) |
| | | |
| | | start_time = period_json["start_time"] |
| | | end_time = period_json["end_time"] |
| | | |
| | | months = ModelPeriod.get_months_in_range_ym(start_time, end_time) |
| | | return months; |
| | | |
| | | |
| | | #è§æµäºchart |
| | | def obsChartdata(model_name, row, column): |
| | | |
| | | row = int(row)-1 |
| | | column = int(column)-1 |
| | | dir = base.model_dir + model_name + "\\modflow.head" |
| | | |
| | | head = bf.HeadFile(dir) |
| | | alldata = head.get_alldata() |
| | | period = len(alldata) |
| | | |
| | | layer = 3 |
| | | |
| | | xdata = [] |
| | | ydata = [] |
| | | result = {} |
| | | for per in range(period): |
| | | for lay in range(layer): |
| | | if per % 3 == 0 and lay == 0: |
| | | md = (int)(lay / 3 + 1) |
| | | per_array = alldata[per][lay] |
| | | |
| | | cell_data = (float)(per_array[row][column]) |
| | | ydata.append(cell_data) |
| | | |
| | | period_json= get_model_json(model_name) |
| | | |
| | | start_time = period_json["start_time"] |
| | | end_time = period_json["end_time"] |
| | | |
| | | months = ModelPeriod.get_months_in_range_ym(start_time, end_time) |
| | | |
| | | result = {"y_data": ydata, "x_data": months} |
| | | return result |
| | | |
| | | def getRowCloumnById(index_id): |
| | | row = 104 |
| | | column =114 |
| | | count=0 |
| | | |
| | | for i in range(row): |
| | | for j in range(column): |
| | | if index_id == count: |
| | | return (i,j) |
| | | count = count +1 |
| | | return "" |
| | | |
| | | |
| | | |
| | | #å°ä¸æ°´ä¿¡æ¯ |
| | | def earthWaterChart(model_name, index_id): |
| | | |
| | | row_column = getRowCloumnById(index_id) |
| | | |
| | | row = row_column[0] |
| | | column = row_column[1] |
| | | dir = base.model_dir + model_name + "\\modflow.head" |
| | | |
| | | head = bf.HeadFile(dir) |
| | | alldata = head.get_alldata() |
| | | period = len(alldata) |
| | | |
| | | layer = 3 |
| | | |
| | | ydata = [] |
| | | result = {} |
| | | for per in range(period): |
| | | for lay in range(layer): |
| | | if per % 3 == 0 and lay == 0: |
| | | |
| | | per_array = alldata[per][lay] |
| | | |
| | | cell_data = (float)(per_array[row][column]) |
| | | ydata.append(cell_data) |
| | | |
| | | period_json= get_model_json(model_name) |
| | | |
| | | start_time = period_json["start_time"] |
| | | end_time = period_json["end_time"] |
| | | |
| | | months = ModelPeriod.get_months_in_range_ym(start_time, end_time) |
| | | |
| | | result = {"y_data": ydata, "x_data": months} |
| | | return result |
| | | |
| | | def heatmapdata(model_name,period): |
| | | dir = base.model_dir + model_name + "\\modflow.head" |
| | | |
| | | head = bf.HeadFile(dir) |
| | | |
| | | alldata = head.get_alldata() |
| | | |
| | | index = int(period)*3 |
| | | return alldata[index][0] |
| | | |
| | | |
| | | #æ°´åè¡¡è®¡ç® |
| | | def waterEqu(model_name): |
| | | if model_name == '202001_202212': |
| | | water_equ_path = base.prefix + "\\water_equ.json" |
| | | with open(water_equ_path,encoding='utf-8') as f: |
| | | data = json.load(f) |
| | | return data |
| | | else: |
| | | year = model_name[0:4] |
| | | title =[year] |
| | | dict ={"title":title} |
| | | |
| | | celldata = np.array(base.water_equ2022).tolist() |
| | | |
| | | predict_json= get_model_json(model_name) |
| | | |
| | | a1=float(celldata[0]) |
| | | a2=float(celldata[1]) |
| | | a3=float(celldata[2]) |
| | | a4=float(celldata[3]) |
| | | |
| | | b1=float(celldata[4]) |
| | | b2=float(celldata[5]) |
| | | b3=float(celldata[6]) |
| | | |
| | | if predict_json["rain"]: |
| | | a1= float(predict_json["rain"]["ratio"]) * float(celldata[0]) |
| | | a3= float(predict_json["rain"]["ratio"]) * float(celldata[2]) |
| | | a4= float(predict_json["rain"]["ratio"]) * float(celldata[3]) |
| | | b2= float(predict_json["rain"]["ratio"]) * float(celldata[5]) |
| | | b3= float(predict_json["rain"]["ratio"]) * float(celldata[6]) |
| | | if predict_json["river"]: |
| | | a2= float(predict_json["river"]["ratio"]) * float(celldata[1]) |
| | | |
| | | if predict_json["mine"]: |
| | | b1= b1 |
| | | |
| | | in_data= a1+a2+a3+a4 |
| | | out_data= b1 +b2 + b3 |
| | | float_data=[a1,a2,a3,a4,in_data,b1,b2,b3,out_data,in_data-out_data] |
| | | |
| | | inarray=[] |
| | | inarray.append({"name":"éæ°´å
¥æ¸é","value":a1}) |
| | | inarray.append({"name":"æ²³æµå
¥æ¸é","value":a2}) |
| | | inarray.append({"name":"L1ä¾§åè¡¥ç»é","value":a3}) |
| | | inarray.append({"name":"L3ä¾§åè¡¥ç»é","value":a4}) |
| | | outarray=[] |
| | | outarray.append({"name":"人工å¼éé","value":b1}) |
| | | outarray.append({"name":"L1ä¾§åæµåºé","value":b2}) |
| | | outarray.append({"name":"L3ä¾§åæµåºé","value":b3}) |
| | | pie1={str(year):inarray} |
| | | pie2={str(year):outarray} |
| | | |
| | | dict["pie1"]=pie1 |
| | | dict["pie2"]=pie2 |
| | | |
| | | array2d=[] |
| | | array2d.append([str(year)]) |
| | | for i in range(len(float_data)): |
| | | tmp=[] |
| | | tmp.append(str(float_data[i])) |
| | | array2d.append(tmp) |
| | | dict["data"]=array2d |
| | | return dict |
| | | |
| | | |
| | | #导åºcsvæä»¶ |
| | | def exportCsV(model_name): |
| | | |
| | | dir = base.model_dir + model_name + "\\modflow.head" |
| | | out_path = base.model_dir + model_name + "\\output\\" |
| | | if not os.path.exists(out_path): |
| | | os.mkdir(out_path) |
| | | |
| | | head = bf.HeadFile(dir) |
| | | |
| | | alldata = head.get_alldata() |
| | | month = len(alldata) |
| | | layer = 3 |
| | | |
| | | for i in range(month): |
| | | for j in range(layer): |
| | | if i % 3 == 0: |
| | | md = (int)(i / 3 + 1) |
| | | filename = out_path + str(md) + '-' + str(j+1) + '.csv' |
| | | f = open(filename, 'w', newline='') |
| | | writer = csv.writer(f) |
| | | for p in alldata[i][j]: |
| | | writer.writerow(p) |
| | | f.close() |
| | | |
| | | return out_path |
| | | |
| | | |
| | | |
| | | |
| | | |
New file |
| | |
| | | |
| | | # 导å
¥Flaskç±» |
| | | from flask import Flask |
| | | from flask import jsonify |
| | | from flask import request |
| | | from flask_cors import CORS |
| | | import sys |
| | | import numpy as np |
| | | import pandas as pd |
| | | import flopy |
| | | import flopy.utils.binaryfile as bf |
| | | import csv |
| | | import time |
| | | from openpyxl import load_workbook |
| | | import os |
| | | import shutil |
| | | import Base as base |
| | | import CalHead |
| | | import Predict |
| | | import json |
| | | import ModelPeriod |
| | | |
| | | # Flask彿°æ¥æ¶ä¸ä¸ªåæ°__name__ï¼å®ä¼æåç¨åºæå¨çå
|
| | | app = Flask(__name__) |
| | | CORS(app, supports_credentials=True, resources=r'/*') |
| | | |
| | | |
| | | #è¾¹çcellçæ°é |
| | | iboundCellSize = 240 |
| | | iboundCellSize2= 213 |
| | | |
| | | iboundCellSizeTotle= 453 |
| | | #æ²³æµcellçæ°é |
| | | riverCellSize = 109 |
| | | |
| | | iboundGroupSize = 5 |
| | | iboundGroup={1:[1,86],2:[87,111],3:[112,142],4:[143,170],5:[171,240]} |
| | | |
| | | iboundGroup3Size = 5 |
| | | iboundGroup3={1:[241,282],2:[283,354],3:[355,393],4:[394,436],5:[437,453]} |
| | | |
| | | riverGroupSize = 4 |
| | | riverGroup={1:[454,479],2:[480,505],3:[506,527],4:[528,562]} |
| | | |
| | | riverName=['éç¿
-è½å¡å²','è½å¡å²-é驾åº','é驾åº-ä¸å®¶åº','ä¸å®¶åº-墿²æ¡¥'] |
| | | |
| | | # è·årow colum layer Period åæ° |
| | | |
| | | def getModel(model_name): |
| | | model_ws="" |
| | | if not model_name: |
| | | model_ws = "202001_202212" |
| | | else: |
| | | model_ws = base.model_dir + model_name |
| | | |
| | | m = flopy.modflow.Modflow.load("modflow.nam", model_ws = model_ws, exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | return m |
| | | |
| | | |
| | | @app.route('/baseparam/', methods=['GET']) |
| | | def baseparam(): |
| | | |
| | | model_name = request.args.get('model_name') |
| | | ml= getModel(model_name) |
| | | nrclp = ml.get_nrow_ncol_nlay_nper() |
| | | dict = {"Row": nrclp[0], "Column": nrclp[1], |
| | | "Layer": nrclp[2], "period": nrclp[3]} |
| | | jsondata= CalHead.get_model_json(model_name) |
| | | start_time = jsondata["start_time"] |
| | | end_time = jsondata["end_time"] |
| | | |
| | | months = ModelPeriod.get_months_in_range_ym(start_time, end_time) |
| | | dict["months"]=months |
| | | return jsonify(dict) |
| | | |
| | | |
| | | #读åwelæä»¶ åæ°ä¸º Period |
| | | @app.route('/welList/', methods=['GET']) |
| | | def welList(): |
| | | |
| | | period = request.args.get('period') |
| | | model_name = request.args.get('model_name') |
| | | layerparam = request.args.get('layer') |
| | | ml= getModel(model_name) |
| | | |
| | | wel = [] |
| | | wel = ml.wel.stress_period_data.__getitem__(kper=period) |
| | | result = [] |
| | | welarray = [] |
| | | riverarray = [] |
| | | iboundarray = [] |
| | | |
| | | for Layer, Row, Column, Q in wel: |
| | | dict = {"Layer": str(Layer), "Row": str( |
| | | Row), "Column": str(Column), "Q": str(Q)} |
| | | result.append(dict) |
| | | |
| | | result_len = len(result) |
| | | |
| | | if layerparam == '1': |
| | | #è¾¹ç |
| | | for i in range(0, 240): |
| | | iboundarray.append(result[i]) |
| | | #æ²³æµ |
| | | for i in range(453, 562): |
| | | riverarray.append(result[i]) |
| | | |
| | | for i in range(562, result_len): |
| | | r = int (result[i]['Row'])+1 |
| | | c =int (result[i]['Column'])+1 |
| | | name = base.getPumpWellName(str(r), str(c)) |
| | | |
| | | result[i]['name']=name |
| | | welarray.append(result[i]) |
| | | |
| | | elif layerparam == '3': |
| | | for i in range(240, 453): |
| | | iboundarray.append(result[i]) |
| | | |
| | | |
| | | ibounddict = {"name": "ibound", "data": iboundarray} |
| | | riverdict = {"name": "river", "data": riverarray} |
| | | |
| | | weldict = {"name": "wel", "data": welarray} |
| | | |
| | | data = [] |
| | | data.append(riverdict) |
| | | data.append(ibounddict) |
| | | data.append(weldict) |
| | | return jsonify(data) |
| | | |
| | | #读åå个äºçæ°æ® |
| | | @app.route('/wel/', methods=['GET']) |
| | | def wel(): |
| | | row_param = request.args.get('Row') |
| | | column_param = request.args.get('Column') |
| | | model_name = request.args.get('model_name') |
| | | |
| | | ml= getModel(model_name) |
| | | result = [] |
| | | |
| | | periods =CalHead.get_model_period(model_name) |
| | | periods_len= len(periods) |
| | | |
| | | for i in range(periods_len): |
| | | wel = [] |
| | | wel = ml.wel.stress_period_data.__getitem__(kper=i) |
| | | for Layer, Row, Column, Q in wel: |
| | | if str(Row) == row_param and str(Column) == column_param: |
| | | |
| | | start_month = periods[i] + "-01" |
| | | end_month = ModelPeriod.last_day_of_month_start(periods[i]) |
| | | |
| | | dict = {"StartTime": start_month, "EndTime": end_month, |
| | | "Layer": str(Layer+1), "Row": str(Row), "Column": str(Column), "Q": str(Q)} |
| | | result.append(dict) |
| | | |
| | | return jsonify(result) |
| | | |
| | | |
| | | #ä¿®æ¹wel æä»¶ |
| | | @app.route('/welInput', methods=['POST']) |
| | | def welInput(): |
| | | |
| | | json = request.get_json() |
| | | row_param = str(json['Row']) |
| | | column_param = str(json['Column']) |
| | | |
| | | # model_name = request.args.get('model_name') |
| | | model_name = str(json['model_name']) |
| | | |
| | | |
| | | ml= getModel(model_name) |
| | | |
| | | #æåºçåºå卿å表 json |
| | | data = json['data'] |
| | | |
| | | periods =CalHead.get_model_period(model_name) |
| | | periods_len= len(periods) |
| | | #循ç¯è®¾ç½®welæä»¶ï¼æ´æ°æ°æ® |
| | | # lrcq = {0:[[2, 3, 4, -100.]], 1:[[2, 3, 4, -100.]]} |
| | | lrcq = {} |
| | | for per in range(periods_len): |
| | | wel = [] |
| | | wel = ml.wel.stress_period_data.__getitem__(kper=per) |
| | | |
| | | #å卿¯ä¸ªåºåæçæ°æ® |
| | | array2d = [] |
| | | |
| | | for Layer, Row, Column, Q in wel: |
| | | array = [] |
| | | if str(Row) == row_param and str(Column) == column_param: |
| | | |
| | | array = [Layer, Row, Column, data[per]['Q']] |
| | | else: |
| | | array = [Layer, Row, Column, Q] |
| | | |
| | | array2d.append(array) |
| | | |
| | | lrcq[per] = array2d |
| | | |
| | | flopy.modflow.ModflowWel(ml,stress_period_data=lrcq) |
| | | ml.write_input() |
| | | |
| | | return jsonify("æ°æ®æ´æ°å®æ¯ï¼") |
| | | |
| | | |
| | | #读å
¥çµåè¡¨æ ¼æä»¶çæwel æä»¶ |
| | | @app.route('/cellFileInput', methods=['POST']) |
| | | def cellFileInput(): |
| | | |
| | | path ='C:\\Users\\ZMK\\Desktop\\å¾
åé\\cellæä»¶.xlsx' |
| | | |
| | | data = get_cell_data(path) |
| | | |
| | | lrcq= get_cell_struct(data["excel1"],data["excel2"],data["excel3"]) |
| | | |
| | | model_name = request.args.get('model_name') |
| | | |
| | | ml= getModel(model_name) |
| | | |
| | | flopy.modflow.ModflowWel(ml,stress_period_data=lrcq) |
| | | ml.write_input() |
| | | |
| | | return jsonify("sucess") |
| | | |
| | | |
| | | def get_cell_struct(excel1,excel2,excel3): |
| | | lrcq={} |
| | | |
| | | #卿æ°é |
| | | period = 7 |
| | | start_row_index = 1 |
| | | |
| | | #è¾¹çæ°æ® excel |
| | | for col in range (0,period): |
| | | array =[] |
| | | for row in range(start_row_index, len(excel1)): |
| | | |
| | | arr = [excel1[row][2]-1,excel1[row][3]-1,excel1[row][4]-1,excel1[row][6+col]] |
| | | array.append(arr) |
| | | lrcq[col]= array |
| | | |
| | | #æ²³æµæ°æ® excel |
| | | for col in range (0,period): |
| | | array =[] |
| | | for row in range(start_row_index, len(excel2)): |
| | | |
| | | arr = [excel2[row][2]-1,excel2[row][3]-1,excel2[row][4]-1,excel2[row][6+col]] |
| | | array.append(arr) |
| | | |
| | | lrcq[col].extend(array) |
| | | |
| | | #æ½æ°´æ°æ® excel |
| | | for col in range (0,period): |
| | | |
| | | array =[] |
| | | for row in range(start_row_index, len(excel3)): |
| | | |
| | | arr = [excel3[row][1]-1,excel3[row][2]-1,excel3[row][3]-1,excel3[row][8+col]] |
| | | array.append(arr) |
| | | |
| | | lrcq[col].extend(array) |
| | | |
| | | return lrcq |
| | | |
| | | |
| | | |
| | | #è·åcellæä»¶ |
| | | #file_path æä»¶çè·¯å¾ |
| | | def get_cell_data(file_path): |
| | | |
| | | workbook = load_workbook(file_path) |
| | | sheetnames = workbook.get_sheet_names() |
| | | #read first sheet |
| | | sheet1 = workbook[sheetnames[0]] |
| | | sheet2 = workbook[sheetnames[1]] |
| | | sheet3 = workbook[sheetnames[2]] |
| | | |
| | | excel1 =[] |
| | | excel2=[] |
| | | excel3=[] |
| | | # éåè¯»åæ´ä¸ªå·¥ä½è¡¨ |
| | | for row in sheet1.iter_rows(values_only=True): |
| | | array=[] |
| | | for cell in row: |
| | | array.append(cell) |
| | | excel1.append(array) |
| | | |
| | | for row in sheet2.iter_rows(values_only=True): |
| | | array=[] |
| | | for cell in row: |
| | | array.append(cell) |
| | | excel2.append(array) |
| | | |
| | | for row in sheet3.iter_rows(values_only=True): |
| | | array=[] |
| | | for cell in row: |
| | | array.append(cell) |
| | | excel3.append(array) |
| | | |
| | | # å
³éExcelæä»¶ |
| | | workbook.close() |
| | | data={"excel1":excel1,"excel2":excel2,"excel3":excel3} |
| | | |
| | | return data |
| | | |
| | | |
| | | |
| | | #读åwelæä»¶ åæ°ä¸º Period |
| | | @app.route('/iboundList/', methods=['GET']) |
| | | def iboundList(): |
| | | |
| | | return jsonify(iboundGroupSize) |
| | | |
| | | |
| | | #è¾¹ççåç»æ°æ® |
| | | @app.route('/iboundData/', methods=['GET']) |
| | | def iboundData(): |
| | | |
| | | group_id = int(request.args.get('groupId')) |
| | | |
| | | model_name = request.args.get('model_name') |
| | | ml= getModel(model_name) |
| | | data=[] |
| | | index = iboundGroup[group_id] |
| | | start_index = index[0] |
| | | |
| | | periods =CalHead.get_model_period(model_name) |
| | | periods_len= len(periods) |
| | | |
| | | for per in range(periods_len): |
| | | wel = [] |
| | | wel = ml.wel.stress_period_data.__getitem__(kper = per) |
| | | |
| | | result = [] |
| | | |
| | | for Layer, Row, Column, Q in wel: |
| | | dict = {"Layer": str(Layer+1), "Row": str(Row), "Column": str(Column), "Q": str(Q)} |
| | | result.append(dict) |
| | | |
| | | start_month = periods[per] +"-01" |
| | | end_month = ModelPeriod.last_day_of_month_start(periods[per]) |
| | | |
| | | dict = {"StartTime": start_month, "EndTime": end_month, |
| | | "Layer": str(result[start_index]['Layer']), |
| | | "Q": str(result[start_index]['Q'])} |
| | | data.append(dict) |
| | | |
| | | return jsonify(data) |
| | | |
| | | |
| | | #è¾¹çæ°æ®ä¿®æ¹ |
| | | @app.route('/iboundInput', methods=['POST']) |
| | | def iboundInput(): |
| | | |
| | | json = request.get_json() |
| | | no = int(json['No']) |
| | | #æåºçåºå卿å表 json |
| | | data = json['data'] |
| | | |
| | | model_name = json['model_name'] |
| | | ml= getModel(model_name) |
| | | |
| | | index = iboundGroup[no] |
| | | start_index = index[0] |
| | | end_index = index[1] |
| | | |
| | | periods =CalHead.get_model_period(model_name) |
| | | periods_len= len(periods) |
| | | #循ç¯è®¾ç½®welæä»¶ï¼æ´æ°æ°æ® |
| | | # lrcq = {0:[[2, 3, 4, -100.]], 1:[[2, 3, 4, -100.]]} |
| | | lrcq = {} |
| | | for per in range(periods_len): |
| | | wel = [] |
| | | wel = ml.wel.stress_period_data.__getitem__(kper=per) |
| | | |
| | | #å卿¯ä¸ªåºåæçæ°æ® |
| | | array2d = [] |
| | | |
| | | count = 1 |
| | | for Layer, Row, Column, Q in wel: |
| | | array = [] |
| | | |
| | | if count>= start_index and count <= end_index: |
| | | array = [Layer, Row, Column, data[per]['Q']] |
| | | else: |
| | | array = [Layer, Row, Column, Q] |
| | | |
| | | array2d.append(array) |
| | | count +=1 |
| | | |
| | | lrcq[per] = array2d |
| | | |
| | | flopy.modflow.ModflowWel(ml,stress_period_data=lrcq) |
| | | ml.write_input() |
| | | return jsonify("æ°æ®æ´æ°å®æ¯ï¼") |
| | | |
| | | |
| | | #读åwelæä»¶ åæ°ä¸º Period |
| | | @app.route('/riverList/', methods=['GET']) |
| | | def riverList(): |
| | | |
| | | riverResult=[] |
| | | for i in range(len(riverName)): |
| | | item ={"id":i+1,"name":riverName[i]} |
| | | riverResult.append(item) |
| | | return jsonify(riverResult) |
| | | |
| | | |
| | | #æ²³æµçæ°æ® |
| | | @app.route('/riverData/', methods=['GET']) |
| | | def riverData(): |
| | | group_id = int(request.args.get('groupId')) |
| | | data=[] |
| | | index = riverGroup[group_id] |
| | | start_index = index[0] |
| | | |
| | | model_name = request.args.get('model_name') |
| | | ml= getModel(model_name) |
| | | |
| | | periods =CalHead.get_model_period(model_name) |
| | | periods_len= len(periods) |
| | | |
| | | for per in range(periods_len): |
| | | wel = [] |
| | | wel = ml.wel.stress_period_data.__getitem__(kper = per) |
| | | result = [] |
| | | for Layer, Row, Column, Q in wel: |
| | | dict = {"Layer": str(Layer+1), "Row": str( |
| | | Row), "Column": str(Column), "Q": str(Q)} |
| | | result.append(dict) |
| | | |
| | | |
| | | start_month = periods[per] +"-01" |
| | | end_month = ModelPeriod.last_day_of_month_start(periods[per]) |
| | | |
| | | dict = {"StartTime": start_month, "EndTime": end_month, |
| | | "Layer": str(result[start_index]['Layer']), |
| | | "Q": str(result[start_index]['Q'])} |
| | | data.append(dict) |
| | | |
| | | return jsonify(data) |
| | | |
| | | |
| | | |
| | | #æ²³æµæ°æ®ä¿®æ¹ |
| | | @app.route('/riverInput', methods=['POST']) |
| | | def riverInput(): |
| | | |
| | | json = request.get_json() |
| | | no = int(json['No']) |
| | | #æåºçåºå卿å表 json |
| | | data = json['data'] |
| | | |
| | | index = riverGroup[no] |
| | | start_index = index[0] |
| | | end_index = index[1] |
| | | model_name = json['model_name'] |
| | | |
| | | ml= getModel(model_name) |
| | | |
| | | periods =CalHead.get_model_period(model_name) |
| | | periods_len= len(periods) |
| | | |
| | | #循ç¯è®¾ç½®welæä»¶ï¼æ´æ°æ°æ® |
| | | # lrcq = {0:[[2, 3, 4, -100.]], 1:[[2, 3, 4, -100.]]} |
| | | lrcq = {} |
| | | for per in range(periods_len): |
| | | wel = [] |
| | | wel = ml.wel.stress_period_data.__getitem__(kper=per) |
| | | |
| | | #å卿¯ä¸ªåºåæçæ°æ® |
| | | array2d = [] |
| | | |
| | | count = 1 |
| | | for Layer, Row, Column, Q in wel: |
| | | array = [] |
| | | |
| | | if count>= start_index and count <= end_index: |
| | | array = [Layer, Row, Column, data[per]['Q']] |
| | | else: |
| | | array = [Layer, Row, Column, Q] |
| | | |
| | | array2d.append(array) |
| | | count +=1 |
| | | |
| | | lrcq[per] = array2d |
| | | |
| | | flopy.modflow.ModflowWel(ml,stress_period_data=lrcq) |
| | | ml.write_input() |
| | | return jsonify("æ°æ®æ´æ°å®æ¯ï¼") |
| | | |
| | | |
| | | #读åé¢ åºå |
| | | @app.route('/precipitation/', methods=['GET']) |
| | | def precipitation(): |
| | | model_name = request.args.get('model_name') |
| | | |
| | | ml= getModel(model_name) |
| | | period = request.args.get('period') |
| | | |
| | | per = int(period) |
| | | item = ml.rch.rech.__getitem__(kper=per) |
| | | value = item.get_value() |
| | | item_data = np.array(value).tolist() |
| | | |
| | | #åç»çåå
¸ä¸æ å¼{"1":[(i,j)]} |
| | | areadict1= base.getAreaDictFirstIndex() |
| | | |
| | | #åç»çåå
¸ä¸æ å¼{"1":[a,b,c,d]} |
| | | areadict = base.getAreaDictIndexArray() |
| | | |
| | | #åç»çåå
¸ä¸æ å¼{"1":data} |
| | | areadatadict={} |
| | | |
| | | for key in areadict1: |
| | | index1 = areadict1[key] |
| | | i = index1[0][0] |
| | | j= index1[0][1] |
| | | |
| | | data= item_data[i][j] |
| | | |
| | | areadatadict[str(key)]= format(data,'.8f') |
| | | |
| | | result =[] |
| | | result.append(areadatadict) |
| | | result.append(areadict) |
| | | return jsonify(result) |
| | | |
| | | |
| | | #éæ°´æ°æ®ä¿®æ¹ |
| | | # @app.route('/precipitationInput', methods=['POST']) |
| | | # def precipitationInput(): |
| | | |
| | | # json = request.get_json() |
| | | # model_name= str(json['model_name']) |
| | | # period = int(json['period']) |
| | | # #æåºçåºå卿å表 json |
| | | # data = json['data'] |
| | | # dict = {} |
| | | # for i in range(len(data)): |
| | | # q1 = data[i]['Q1'] |
| | | # q2 = data[i]['Q2'] |
| | | # dict[q1] = q2 |
| | | |
| | | # ml= getModel(model_name) |
| | | |
| | | # item = ml.rch.rech.__getitem__(kper=period) |
| | | # array2d = item.get_value() |
| | | |
| | | # count = 0 |
| | | |
| | | # array2d_len = len(array2d) |
| | | |
| | | # for i in range(array2d_len): |
| | | |
| | | # array_len = len(array2d[i]) |
| | | |
| | | # for j in range(array_len): |
| | | |
| | | # va = str(array2d[i][j]) |
| | | # if va in dict: |
| | | # count += 1 |
| | | # array2d[i][j] = float(dict[va]) |
| | | |
| | | # ml.rch.rech.__setitem__(key=period, value=array2d) |
| | | |
| | | # rch = flopy.modflow.ModflowRch(ml, rech=ml.rch.rech) |
| | | # rch.write_file(check=False) |
| | | # #ml.write_input() |
| | | |
| | | # return jsonify("éæ°´åæ°ä¿®æ¹å®æ¯ï¼") |
| | | |
| | | |
| | | @app.route('/precipitationInput', methods=['POST']) |
| | | def precipitationInput(): |
| | | |
| | | json = request.get_json() |
| | | model_name= str(json['model_name']) |
| | | period = int(json['period']) |
| | | #æåºçåºå卿å表 json |
| | | data = json['data'] |
| | | dict = {} |
| | | for i in range(len(data)): |
| | | q1 = data[i]['Q1'] |
| | | No = data[i]['No'] |
| | | dict[No] = q1 |
| | | |
| | | ml= getModel(model_name) |
| | | |
| | | item = ml.rch.rech.__getitem__(kper=period) |
| | | array2d = item.get_value() |
| | | areas= base.getAreas() |
| | | |
| | | for key in areas: |
| | | |
| | | tuples= areas[key] |
| | | zblen= len(tuples) |
| | | values = float(dict[key]) |
| | | for i in range(zblen): |
| | | x = tuples[i][0] |
| | | y = tuples[i][1] |
| | | array2d[x][y]= values |
| | | |
| | | ml.rch.rech.__setitem__(key=period, value=array2d) |
| | | |
| | | rch = flopy.modflow.ModflowRch(ml, rech = ml.rch.rech) |
| | | rch.write_file(check=False) |
| | | # ml.write_input() |
| | | |
| | | return jsonify("éæ°´åæ°ä¿®æ¹å®æ¯ï¼") |
| | | |
| | | #导å
¥csvæä»¶ |
| | | @app.route('/precipitationInputFile', methods=['POST']) |
| | | def precipitationInputFile(): |
| | | |
| | | model_name = request.args.get('model_name') |
| | | ml= getModel(model_name) |
| | | save_path = 'C:/Users/ZMK/Desktop/test1/' + "1111.xlsx" |
| | | file = request.files.get('file') |
| | | |
| | | if file: |
| | | file.save(save_path) |
| | | |
| | | #è·å读åçexcel è¡¨æ ¼æ°æ® |
| | | stations = get_station_struct(save_path) |
| | | |
| | | #循ç¯å¨æ |
| | | #perd卿åé |
| | | #array2d æ¯ä¸ªå¨æçäºç»´æ°ç» |
| | | for perd in range(0,36): |
| | | period = perd |
| | | item = ml.rch.rech.__getitem__(kper=period) |
| | | array2d = item.get_value() |
| | | |
| | | array2d_len = len(array2d) |
| | | count = 0 |
| | | #循ç¯ç«ç¹å°ä¸ä¸ªperiodçææstationsè¿è¡åå
¸åå¨ |
| | | dict = {} |
| | | for k in range(0,len(stations)): |
| | | row = stations[k]["row"] |
| | | column = stations[k]["column"] |
| | | |
| | | data_old = array2d[row][column] |
| | | data_new = stations[k]["data"][perd] |
| | | dict[data_old]= data_new |
| | | |
| | | |
| | | #循ç¯è®¾ç½®æ¯ä¸ªperiod çå¼ |
| | | for i in range(array2d_len): |
| | | |
| | | array_len = len(array2d[i]) |
| | | |
| | | for j in range(array_len): |
| | | |
| | | va = str(array2d[i][j]) |
| | | if va in dict: |
| | | array2d[i][j] = float(dict[va]) |
| | | |
| | | #å°array2déæ°set 对åºç item 卿å
|
| | | ml.rch.rech.__setitem__(key=period, value=array2d) |
| | | |
| | | rch = flopy.modflow.ModflowRch(ml, rech=ml.rch.rech) |
| | | rch.write_file(check=False) |
| | | # ml.write_input() |
| | | return 'æä»¶ä¸ä¼ æå' |
| | | else: |
| | | return 'ä¸ä¼ å¤±è´¥ï¼æªéæ©æä»¶' |
| | | |
| | | |
| | | #è·åç«ç¹çæ°æ®ï¼æé æ°æ®ç»æ |
| | | #file_path æä»¶çè·¯å¾ |
| | | def get_station_struct(file_path): |
| | | |
| | | workbook = load_workbook(file_path) |
| | | sheetnames = workbook.get_sheet_names() |
| | | #read first sheet |
| | | sheet = workbook[sheetnames[0]] |
| | | |
| | | array2d_excel=[] |
| | | # éåè¯»åæ´ä¸ªå·¥ä½è¡¨ |
| | | for row in sheet.iter_rows(values_only=True): |
| | | array=[] |
| | | for cell in row: |
| | | array.append(cell) |
| | | array2d_excel.append(array) |
| | | # å
³éExcelæä»¶ |
| | | workbook.close() |
| | | |
| | | #æ°æ®çå¼å§ä¸æ |
| | | data_start_index=6 |
| | | #ä¿¡æ¯çå¼å§è¡å· |
| | | start_row_index = 1 |
| | | #åå¨ç«ç¹ä¿¡æ¯ |
| | | stations = [] |
| | | for i in range (start_row_index,len(array2d_excel)): |
| | | st={"name":array2d_excel[i][1],"row":array2d_excel[i][4],"column":array2d_excel[i][5]} |
| | | data=[] |
| | | for j in range(data_start_index,len(array2d_excel[i])): |
| | | cell_data = array2d_excel[i][j] |
| | | cell_data= cell_data/100/30*0.15 |
| | | data.append(round(cell_data, 6)) |
| | | st["data"]= data |
| | | stations.append(st) |
| | | |
| | | return stations |
| | | |
| | | |
| | | #è¿è¡æ¨¡å |
| | | @app.route('/runModel/', methods=['GET']) |
| | | def runModel(): |
| | | model_name = request.args.get('model_name') |
| | | |
| | | msg= Predict.run_model_predict(model_name) |
| | | #导åºcsvæä»¶ |
| | | csvpath = CalHead.exportCsV(model_name) |
| | | |
| | | #æ´æ°æ¨¡åä¸ç»´ç½æ ¼é
ç½® |
| | | base.updateModelConfig(model_name) |
| | | |
| | | #å建模åçä¸ç»´ç½æ ¼ |
| | | filedir = base.model3d_path + model_name |
| | | |
| | | print(filedir) |
| | | if not os.path.exists(filedir): |
| | | os.makedirs(filedir, exist_ok=True) |
| | | base.callModelexe() |
| | | |
| | | return jsonify(msg) |
| | | |
| | | #çææ¨¡åcsv æä»¶ |
| | | @app.route('/runModelCsv/', methods=['GET']) |
| | | def runModelCsv(): |
| | | |
| | | model_name = request.args.get('model_name') |
| | | outpath = CalHead.exportCsV(model_name) |
| | | result={"code":200,"msg":"çæè®¡ç®ç»æCSVæä»¶å®æ¯ï¼","output_path":outpath} |
| | | return jsonify(result) |
| | | |
| | | |
| | | |
| | | #åå§æ°´ä½ä¿¡æ¯ |
| | | @app.route('/initWater/', methods=['GET']) |
| | | def initWater(): |
| | | |
| | | period = request.args.get('period') |
| | | |
| | | per = int(period) |
| | | |
| | | model_name = request.args.get('model_name') |
| | | |
| | | ml= getModel(model_name) |
| | | item = ml.rch.rech.__getitem__(kper=per) |
| | | value = item.get_value() |
| | | t = np.array(value).tolist() |
| | | return jsonify(t) |
| | | |
| | | #åå»ºæ°æ¨¡å |
| | | @app.route('/saveModel/', methods=['GET']) |
| | | def saveModel(): |
| | | |
| | | modelname = request.args.get('name') |
| | | startTime = request.args.get('startTime') |
| | | endTime = request.args.get('endTime') |
| | | file_list = os.listdir(base.model_dir) |
| | | for name in file_list: |
| | | if name == modelname: |
| | | return jsonify("模ååç§°å·²ç»åå¨ï¼ä¸å
许éå¤å建ï¼") |
| | | |
| | | dir = base.model_dir + modelname |
| | | shutil.copytree(base.predictModel,dir) |
| | | |
| | | jsondata={"model_name":modelname,"start_time":startTime,"end_time":endTime} |
| | | predictionJson = base.model_dir + modelname +"\\prediction.json" |
| | | with open(predictionJson, "w",encoding='utf-8') as outfile: |
| | | json.dump(jsondata, outfile,ensure_ascii=False) |
| | | |
| | | return jsonify("åå»ºæ°æ¨¡å宿¯ï¼") |
| | | |
| | | |
| | | #åå»ºæ°æ¨¡å |
| | | @app.route('/ModelList/', methods=['GET']) |
| | | def ModelList(): |
| | | |
| | | file_list = os.listdir(base.model_dir) |
| | | return jsonify(file_list) |
| | | |
| | | |
| | | #颿µåºæ¯åæ° |
| | | @app.route('/prediction', methods=['POST']) |
| | | def prediction(): |
| | | |
| | | jsondata = request.get_json() |
| | | model_name = str(jsondata['model_name']) |
| | | file_list = os.listdir(base.model_dir) |
| | | if model_name not in file_list: |
| | | return jsonify("模åä¸åå¨ï¼ä¿å失败ï¼") |
| | | |
| | | predictionJson = base.model_dir + model_name +"\\prediction.json" |
| | | with open(predictionJson, "w",encoding='utf-8') as outfile: |
| | | json.dump(jsondata, outfile,ensure_ascii=False) |
| | | |
| | | return jsonify("ä¿å颿µåºæ¯åæ°å®æ¯ï¼") |
| | | |
| | | |
| | | #颿µåºæ¯åæ° |
| | | @app.route('/predictionparam', methods=['GET']) |
| | | def predictionparam(): |
| | | |
| | | model_name = request.args.get('model_name') |
| | | file_list = os.listdir(base.model_dir) |
| | | if model_name not in file_list: |
| | | return jsonify("模åä¸åå¨ï¼") |
| | | |
| | | predictiondata="" |
| | | prediction_path = base.model_dir + model_name +"\\prediction.json" |
| | | if os.path.exists(prediction_path): |
| | | with open(prediction_path,encoding='utf-8') as f: |
| | | predictiondata = json.load(f) |
| | | |
| | | welldata="" |
| | | well_path = base.model_dir + model_name +"\\pump_well.json" |
| | | |
| | | if os.path.exists(well_path): |
| | | with open(well_path,encoding='utf-8') as f: |
| | | welldata = json.load(f) |
| | | |
| | | if not welldata and not predictiondata: |
| | | return jsonify([]) |
| | | |
| | | if not predictiondata: |
| | | return jsonify(welldata) |
| | | |
| | | if not welldata: |
| | | return jsonify(predictiondata) |
| | | |
| | | merged_dict = {**predictiondata, **welldata} |
| | | |
| | | return jsonify(merged_dict) |
| | | |
| | | |
| | | |
| | | #颿µåºæ¯--ä¿åäº |
| | | @app.route('/pumpsavewell', methods=['POST']) |
| | | def pump_savewell(): |
| | | |
| | | jsondata = request.get_json() |
| | | model_name = str(jsondata['model_name']) |
| | | file_list = os.listdir(base.model_dir) |
| | | if model_name not in file_list: |
| | | return jsonify("模åä¸åå¨ï¼ä¿å失败ï¼") |
| | | |
| | | pump_json = base.model_dir + model_name +"\\pump_well.json" |
| | | with open(pump_json, "w") as outfile: |
| | | json.dump(jsondata, outfile) |
| | | |
| | | return jsonify("ä¿åäºåæ°å®æ¯ï¼") |
| | | |
| | | #颿µåºæ¯--导å
¥äºæ°æ® |
| | | @app.route('/pumpimportdata', methods=['POST']) |
| | | def pump_importdata(): |
| | | |
| | | model_name = request.form.get('model_name') |
| | | |
| | | # ml= getModel(model_name) |
| | | file = request.files.get('file') |
| | | |
| | | save_path = base.model_dir + model_name +"\\extra_cell.xlsx" |
| | | |
| | | if file: |
| | | file.save(save_path) |
| | | |
| | | resultDict={"code":200,"msg":"ä¿åæ°æ®å®æ¯ï¼"} |
| | | return jsonify(resultDict) |
| | | |
| | | #è§æµäºå表 |
| | | @app.route('/obsWellList', methods=['GET']) |
| | | def obsWellList(): |
| | | obswell= base.obs_well |
| | | dict =[] |
| | | for name , row ,column in obswell: |
| | | obj ={"name":name,"row":row,"column":column,"Layer":1} |
| | | dict.append(obj) |
| | | |
| | | return jsonify(dict) |
| | | |
| | | |
| | | #è§æµäºchartæ°æ®æ¥å£ |
| | | @app.route('/obsWellChart', methods=['GET']) |
| | | def obsWellChart(): |
| | | model_name = request.args.get('model_name') |
| | | row = request.args.get('row') |
| | | column = request.args.get('column') |
| | | |
| | | result = CalHead.obsChartdata(model_name, row, column) |
| | | |
| | | return jsonify(result) |
| | | |
| | | |
| | | #颿µé¡µé¢ æ°¸å®æ²³å¾è¡¨ |
| | | @app.route('/predictRiverChart', methods=['GET']) |
| | | def predictRiverChart(): |
| | | base_year = request.args.get('base_year') |
| | | start_time = request.args.get('start_time') |
| | | end_time = request.args.get('end_time') |
| | | |
| | | return jsonify(Predict.predict_river_chart(base_year, start_time, end_time)) |
| | | |
| | | #颿µé¡µé¢ éæ°´å¾è¡¨ |
| | | @app.route('/predictWaterChart', methods=['GET']) |
| | | def predictWaterChart(): |
| | | base_year = request.args.get('base_year') |
| | | start_time = request.args.get('start_time') |
| | | end_time = request.args.get('end_time') |
| | | return jsonify(Predict.predict_water_chart(base_year, start_time, end_time)) |
| | | |
| | | @app.route('/heatmap', methods=['GET']) |
| | | def heatmap(): |
| | | model_name = request.args.get('model_name') |
| | | period = request.args.get('period') |
| | | data = CalHead.heatmapdata(model_name,period) |
| | | maximum_value = np.max(data) |
| | | return jsonify(np.array(data).tolist()) |
| | | |
| | | |
| | | #éæ°´åè¡¡ |
| | | @app.route('/waterEqu', methods=['GET']) |
| | | def waterEqu(): |
| | | model_name = request.args.get('model_name') |
| | | data = CalHead.waterEqu(model_name) |
| | | return jsonify(data) |
| | | |
| | | |
| | | #å°ç页颿°æ® |
| | | @app.route('/earthWaterChart', methods=['GET']) |
| | | def earthWaterChart(): |
| | | |
| | | indexId = int(request.args.get('index_id')) |
| | | data = CalHead.earthWaterChart("202001_202212",indexId) |
| | | return jsonify(data) |
| | | |
| | | if __name__ == '__main__': |
| | | #app.run() # å¯ä»¥æå®è¿è¡ç主æºIPå°åï¼ç«¯å£ï¼æ¯å¦å¼å¯è°è¯æ¨¡å¼ |
| | | app.run(host="192.168.0.122", port=5000) |
| | | |
| | | |
| | | |
| | | |
New file |
| | |
| | | # -*- coding: utf-8 -*- |
| | | """ |
| | | Created on Wed Nov 1 11:34:08 2023 |
| | | |
| | | @author: ZMK |
| | | """ |
| | | from datetime import datetime |
| | | import calendar |
| | | from dateutil import rrule |
| | | import json |
| | | import Base as base |
| | | import os |
| | | |
| | | |
| | | |
| | | #æ ¹æ®å¹´ææ¥è·åæåä¸å¤©çæ¥æ |
| | | def last_day_of_month(year, month, day): |
| | | |
| | | d = calendar.monthrange(year, month) |
| | | last_day = str(year) +"-" +str(month) + "-" + str(d[1]) |
| | | return last_day |
| | | |
| | | def last_day_of_month_start(month): |
| | | |
| | | start_date = month.split('-') |
| | | last_day= last_day_of_month(int(start_date[0]),int(start_date[1]),1) |
| | | return last_day |
| | | |
| | | |
| | | |
| | | def get_months_in_range_ym(start_time, end_time): |
| | | |
| | | start=datetime.strptime(start_time,'%Y-%m') |
| | | end=datetime.strptime(end_time,'%Y-%m') |
| | | |
| | | count =rrule.rrule(rrule.MONTHLY,dtstart=start,until=end).count() |
| | | |
| | | months=[] |
| | | for i in range(count) : |
| | | m = rrule.rrule(rrule.MONTHLY,dtstart=start,until=end).__getitem__(i) |
| | | formatted_date = m.strftime("%Y-%m") |
| | | months.append(formatted_date) |
| | | |
| | | return months |
| | | |
| | | |
| | | def get_months_in_range_count(start_time, end_time): |
| | | |
| | | start=datetime.strptime(start_time,'%Y-%m') |
| | | end=datetime.strptime(end_time,'%Y-%m') |
| | | count =rrule.rrule(rrule.MONTHLY,dtstart=start,until=end).count() |
| | | return count |
| | | |
| | | |
| | | def get_months_in_range(start_time, end_time): |
| | | |
| | | start=datetime.strptime(start_time,'%Y-%m') |
| | | end=datetime.strptime(end_time,'%Y-%m') |
| | | |
| | | count =rrule.rrule(rrule.MONTHLY,dtstart=start,until=end).count() |
| | | |
| | | months=[] |
| | | for i in range(count) : |
| | | m = rrule.rrule(rrule.MONTHLY,dtstart=start,until=end).__getitem__(i) |
| | | formatted_date = m.strftime("%Y-%m") |
| | | start_date = formatted_date.split('-') |
| | | last_day= last_day_of_month(int(start_date[0]),int(start_date[1]),1) |
| | | |
| | | months.append(formatted_date+"-01" +"," + last_day) |
| | | |
| | | return months |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
New file |
| | |
| | | |
| | | # 导å
¥Flaskç±» |
| | | from flask import Flask |
| | | from flask import jsonify |
| | | from flask import request |
| | | from flask_cors import CORS |
| | | import sys |
| | | import numpy as np |
| | | import pandas as pd |
| | | import flopy |
| | | import flopy.utils.binaryfile as bf |
| | | import csv |
| | | import time |
| | | from openpyxl import load_workbook |
| | | import os |
| | | import shutil |
| | | import json |
| | | import Base as base |
| | | import CalHead |
| | | import ModelPeriod |
| | | |
| | | |
| | | # strt = ml.bas6.strt |
| | | # # strs = ml.bas6.strt.__getitem__(1) |
| | | # # print(strs.get_value()) |
| | | |
| | | # mdBase = flopy.modflow.ModflowBas(ml,strt=1.0,ibound=ml.bas6.ibound) |
| | | # mdBase.write_file(check=False) |
| | | |
| | | |
| | | base_init_year=["2020","2021","2022"] |
| | | river_start_index = 454 |
| | | river_end_index =562 |
| | | |
| | | #颿µå¨ææ° |
| | | predict_per = 12 |
| | | |
| | | #éæ°´é |
| | | # def predict_water_chart(base_year,start_time ,end_time): |
| | | # model_ws = base.baseModel |
| | | # baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws, |
| | | # exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | # index = 0 |
| | | # if base_year in base_init_year: |
| | | # index = base_init_year.index(str(base_year)) |
| | | |
| | | # y_data=[] |
| | | # x_data=[] |
| | | # satrt_index = index*12 |
| | | # end_index = satrt_index+12 |
| | | # for per in range(satrt_index,end_index): |
| | | # item = baseMdoel.rch.rech.__getitem__(kper=per) |
| | | # value = item.get_value() |
| | | # value_float = np.array(value) |
| | | # avg = value_float.mean() |
| | | # y_data.append(float (avg)) |
| | | |
| | | # start_month = str(base_year) +"-01" |
| | | # end_month = str(base_year) +"-12" |
| | | # x_data= ModelPeriod.get_months_in_range_ym(start_month,end_month) |
| | | # result = {"y_data": y_data, "x_data": x_data} |
| | | # return result |
| | | |
| | | base_water = base.prefix + 'base_water.ini' |
| | | def predict_water_chart(base_year,start_time ,end_time): |
| | | |
| | | |
| | | water_array = np.loadtxt(base_water, dtype=str,encoding='utf-8') |
| | | print(water_array) |
| | | y_data=[] |
| | | x_data= ModelPeriod.get_months_in_range_ym("2022-01","2022-12") |
| | | water= water_array[0] |
| | | for e in water: |
| | | y_data.append(e) |
| | | |
| | | result = {"y_data": y_data, "x_data": x_data} |
| | | return result |
| | | |
| | | #æ²³æµçæçº¿å¾ |
| | | # def predict_river_chart(base_year,start_time ,end_time): |
| | | # model_ws = base.baseModel |
| | | # baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws, |
| | | # exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | # index = 0 |
| | | # if base_year in base_init_year: |
| | | # index = base_init_year.index(str(base_year)) |
| | | |
| | | # y_data=[] |
| | | # x_data=[] |
| | | # satrt_index = index*12 |
| | | # end_index = satrt_index+12 |
| | | # for per in range(satrt_index,end_index): |
| | | # wel = baseMdoel.wel.stress_period_data.__getitem__(kper=per) |
| | | # arr=[] |
| | | # for i in range(river_start_index, river_end_index): |
| | | # Q = wel[i][3] |
| | | # arr.append(float(Q)) |
| | | # avg = np.array(arr).mean() |
| | | # y_data.append(float(avg)) |
| | | # start_month = str(base_year) +"-01" |
| | | # end_month = str(base_year) +"-12" |
| | | # x_data= ModelPeriod.get_months_in_range_ym(start_month,end_month) |
| | | # result = {"y_data": y_data, "x_data": x_data} |
| | | # return result |
| | | |
| | | base_river = base.prefix + 'base_river.ini' |
| | | def predict_river_chart(base_year,start_time ,end_time): |
| | | |
| | | |
| | | river_array = np.loadtxt(base_river, dtype=str,encoding='utf-8') |
| | | print(river_array) |
| | | y_data=[] |
| | | x_data= ModelPeriod.get_months_in_range_ym("2022-01","2022-12") |
| | | for e in river_array: |
| | | y_data.append(e) |
| | | |
| | | result = {"y_data": y_data, "x_data": x_data} |
| | | return result |
| | | |
| | | |
| | | def run_model_predict(model_name): |
| | | |
| | | predictiondata="" |
| | | prediction_path = base.model_dir + model_name +"\\prediction.json" |
| | | if os.path.exists(prediction_path): |
| | | with open(prediction_path,encoding='utf-8') as f: |
| | | predictiondata = json.load(f) |
| | | |
| | | |
| | | if predictiondata: |
| | | |
| | | per = ModelPeriod.get_months_in_range_count( |
| | | predictiondata["start_time"], predictiondata["end_time"]) |
| | | |
| | | # updateDisFile(model_name,per) |
| | | |
| | | # updateBase6File(model_name,predictiondata) |
| | | |
| | | #updateRchFile(model_name,predictiondata) |
| | | |
| | | updateRiverFile(model_name,predictiondata) |
| | | |
| | | #updateMineFile(model_name,predictiondata) |
| | | else: |
| | | print("prediction.json 颿µåºæ¯æä»¶ä¸ºç©ºï¼æ éæ´æ¹ç¸åºæä»¶") |
| | | |
| | | |
| | | # model_ws = base.model_dir + model_name |
| | | |
| | | # ml = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws, |
| | | # exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | # ml.run_model(report = True) |
| | | return jsonify("è¿è¡æåï¼") |
| | | |
| | | |
| | | |
| | | |
| | | #æ´æ°ééåºçæ°æ®ï¼å为1.æç
§åºåï¼2.æç
§å
¨é¨ è¿è¡æ´æ° |
| | | def updateMineFile(model_name,predictiondata): |
| | | |
| | | start_time =predictiondata["start_time"] |
| | | end_time = predictiondata["end_time"] |
| | | base_year = predictiondata["mine"]["base_year"] |
| | | |
| | | base_start= str(base_year) + "-" + str(start_time.split("-")[1]) |
| | | base_end= str(base_year) + "-" + str(end_time.split("-")[1]) |
| | | |
| | | start_index = (int)(base.times_month_per_dict[base_start]) |
| | | end_index = (int)(base.times_month_per_dict[base_end]) |
| | | |
| | | pers= end_index-start_index + 1 |
| | | |
| | | area= predictiondata["mine"]["area"] |
| | | flag = check_mine_param(predictiondata) |
| | | |
| | | if flag == 'true': |
| | | baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= base.baseModel, |
| | | exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | update_model_ws = base.model_dir + model_name |
| | | updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws, |
| | | exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | district_dict = get_distric_dict() |
| | | |
| | | area_dict = get_area_dict(area) |
| | | |
| | | lrcq = {} |
| | | for per in range(pers): |
| | | wel = [] |
| | | wel = baseMdoel.wel.stress_period_data.__getitem__(kper = (per + start_index )) |
| | | array2d = [] |
| | | count = 1 |
| | | for Layer, Row, Column, Q in wel: |
| | | array = [] |
| | | # å¦ææ¯æ²³æµçæ°æ®èå´ |
| | | if count > river_end_index : |
| | | |
| | | r = (float) (get_row_column_ratio(Row, Column, district_dict, area_dict)) |
| | | |
| | | array = [Layer, Row, Column, Q * r] |
| | | |
| | | else: |
| | | array = [Layer, Row, Column, Q] |
| | | |
| | | array2d.append(array) |
| | | count +=1 |
| | | |
| | | lrcq[per] = array2d |
| | | |
| | | flopy.modflow.ModflowWel(updateMdoel,stress_period_data=lrcq) |
| | | updateMdoel.write_input() |
| | | else: |
| | | print("Well--Mineæä»¶æ éä¿®æ¹ï¼") |
| | | |
| | | |
| | | #è·å areaç name--> ratio çç»æ |
| | | def get_area_dict(area): |
| | | result ={} |
| | | |
| | | for i in range(len(area)): |
| | | name = area[i]["name"] |
| | | rt = area[i]["ratio"] |
| | | result[name]= rt |
| | | return result |
| | | |
| | | |
| | | #è·ååºå¿ç row+column --> nameç»æ |
| | | def get_distric_dict(): |
| | | data = base.district |
| | | result = {} |
| | | for row ,column ,id ,name in data: |
| | | key = str(row)+","+str(column) |
| | | result[key]= name |
| | | return result |
| | | |
| | | |
| | | #æ ¹æ® row clomn è·å ratio |
| | | def get_row_column_ratio(row, column ,district_dict, area_dict ): |
| | | key = str(row) +"," + str(column) |
| | | if area_dict.__contains__("å
¨é¨åºå"): |
| | | return area_dict["å
¨é¨åºå"] |
| | | |
| | | if district_dict.__contains__(key): |
| | | name = district_dict[key] |
| | | ratio = area_dict[name] |
| | | return float(ratio) |
| | | |
| | | return float(1.0) |
| | | |
| | | |
| | | |
| | | def check_mine_param(predictiondata): |
| | | |
| | | mine = predictiondata["mine"] |
| | | if not mine: |
| | | print("æ½æ°´äº 颿µåæ°ä¸ºç©ºï¼æ éè¦ä¿®æ¹") |
| | | return "false" |
| | | |
| | | base_year = predictiondata["mine"]["base_year"] |
| | | if not base_year : |
| | | print(" Mine : base_year颿µåæ°ä¸ºç©ºï¼æ éè¦ä¿®æ¹") |
| | | return "false" |
| | | |
| | | area= predictiondata["mine"]["area"] |
| | | if not area : |
| | | print(" Mine : area颿µåæ°ä¸ºç©ºï¼æ éè¦ä¿®æ¹") |
| | | return "false" |
| | | |
| | | return "true" |
| | | |
| | | |
| | | #æ´æ°æ²³æµçåæ° |
| | | # def updateRiverFile(model_name,predictiondata): |
| | | |
| | | # start_time =predictiondata["start_time"] |
| | | # end_time = predictiondata["end_time"] |
| | | # base_year = predictiondata["river"]["base_year"] |
| | | |
| | | # ratio= float(predictiondata["river"]["ratio"]) |
| | | |
| | | # base_start= str(base_year) + "-" + str(start_time.split("-")[1]) |
| | | # base_end= str(base_year) + "-" + str(end_time.split("-")[1]) |
| | | |
| | | # start_index = (int)(base.times_month_per_dict[base_start]) |
| | | # end_index = (int)(base.times_month_per_dict[base_end]) |
| | | |
| | | # pers= end_index-start_index + 1 |
| | | |
| | | |
| | | # flag = check_river_param(predictiondata) |
| | | |
| | | # if flag == "true": |
| | | |
| | | # baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= base.baseModel, |
| | | # exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | # update_model_ws = base.model_dir + model_name |
| | | # updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws, |
| | | # exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | # lrcq = {} |
| | | # for per in range(pers): |
| | | # wel = [] |
| | | |
| | | # wel = baseMdoel.wel.stress_period_data.__getitem__(kper = (per + start_index )) |
| | | # array2d = [] |
| | | |
| | | # count = 1 |
| | | |
| | | # for Layer, Row, Column, Q in wel: |
| | | # array = [] |
| | | # # å¦ææ¯æ²³æµçæ°æ®èå´ |
| | | # if count > river_start_index and count <= river_end_index: |
| | | # array = [Layer, Row, Column, Q * ratio] |
| | | # else: |
| | | # array = [Layer, Row, Column, Q] |
| | | |
| | | # array2d.append(array) |
| | | # count +=1 |
| | | |
| | | # lrcq[per] = array2d |
| | | |
| | | # flopy.modflow.ModflowWel(updateMdoel,stress_period_data=lrcq) |
| | | # updateMdoel.write_input() |
| | | |
| | | # else: |
| | | # print("Well--Riveræä»¶æ éä¿®æ¹ï¼") |
| | | |
| | | |
| | | def updateRiverFile(model_name,predictiondata): |
| | | |
| | | start_time =predictiondata["start_time"] |
| | | end_time = predictiondata["end_time"] |
| | | |
| | | |
| | | river_ratio= float(predictiondata["river"]["ratio"]) |
| | | |
| | | rain_ratio = float(predictiondata["rain"]["ratio"]) |
| | | rain_base_year = predictiondata["rain"]["base_year"] |
| | | |
| | | area= predictiondata["mine"]["area"] |
| | | |
| | | flag = check_river_param(predictiondata) |
| | | |
| | | if flag == "true": |
| | | |
| | | ws = base.predictParamModel + rain_base_year |
| | | |
| | | baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= ws, |
| | | exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | update_model_ws = base.model_dir + model_name |
| | | updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws, |
| | | exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | district_dict = get_distric_dict() |
| | | |
| | | area_dict = get_area_dict(area) |
| | | |
| | | lrcq = {} |
| | | |
| | | for per in range(predict_per): |
| | | wel = [] |
| | | |
| | | wel = baseMdoel.wel.stress_period_data.__getitem__(kper = per) |
| | | wel_len = len(wel) |
| | | |
| | | #ä¾§åè¾¹ç |
| | | for i in range (0,453): |
| | | wel[i][3] = wel[i][3] * rain_ratio |
| | | |
| | | #æ²³æµ |
| | | for i in range(453, 562): |
| | | wel[i][3] = wel[i][3] * river_ratio |
| | | |
| | | #æ½æ°´äº |
| | | for i in range(562,wel_len): |
| | | |
| | | r = (float) (get_row_column_ratio(wel[i][1], wel[i][2], district_dict, area_dict)) |
| | | wel[i][3] = wel[i][3] * r |
| | | |
| | | lrcq[per] = wel |
| | | |
| | | flopy.modflow.ModflowWel(updateMdoel,stress_period_data=lrcq) |
| | | updateMdoel.write_input() |
| | | |
| | | else: |
| | | print("Well--Riveræä»¶æ éä¿®æ¹ï¼") |
| | | |
| | | def check_river_param(predictiondata): |
| | | |
| | | river = predictiondata["river"] |
| | | if not river: |
| | | print("River颿µåæ°ä¸ºç©ºï¼æ éè¦ä¿®æ¹") |
| | | return "false" |
| | | |
| | | base_year = predictiondata["river"]["base_year"] |
| | | if not base_year : |
| | | print(" River : base_year颿µåæ°ä¸ºç©ºï¼æ éè¦ä¿®æ¹") |
| | | return "false" |
| | | |
| | | ratio= predictiondata["river"]["ratio"] |
| | | if not ratio or ratio == "1" : |
| | | print(" River : ratio颿µåæ°ä¸ºç©ºï¼æ éè¦ä¿®æ¹") |
| | | return "false" |
| | | |
| | | return "true" |
| | | |
| | | |
| | | # def updateRchFile(model_name,predictiondata): |
| | | |
| | | # start_time =predictiondata["start_time"] |
| | | # end_time = predictiondata["end_time"] |
| | | |
| | | # base_year = predictiondata["rain"]["base_year"] |
| | | # ratio= float(predictiondata["rain"]["ratio"]) |
| | | |
| | | # base_start= str(base_year) + "-" + str(start_time.split("-")[1]) |
| | | # base_end= str(base_year) + "-" + str(end_time.split("-")[1]) |
| | | |
| | | # start_index = (int)(base.times_month_per_dict[base_start]) |
| | | # end_index = (int)(base.times_month_per_dict[base_end]) |
| | | # pers= end_index-start_index + 1 |
| | | |
| | | |
| | | # flag = check_rain_param(predictiondata) |
| | | |
| | | # if flag == "true": |
| | | |
| | | # baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= base.baseModel, |
| | | # exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | # update_model_ws = base.model_dir + model_name |
| | | # updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws, |
| | | # exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | # for per in range(pers): |
| | | |
| | | # item = baseMdoel.rch.rech.__getitem__(kper = (per + start_index)) |
| | | # array2d = item.get_value() |
| | | # array2d_len = len(array2d) |
| | | |
| | | # for i in range(array2d_len): |
| | | |
| | | # array_len = len(array2d[i]) |
| | | # for j in range(array_len): |
| | | |
| | | # if str(base.area_array[i][j]) != '-9999': |
| | | |
| | | # array2d[i][j] = array2d[i][j] * ratio |
| | | |
| | | # updateMdoel.rch.rech.__setitem__(key = per, value=array2d) |
| | | |
| | | # rch = flopy.modflow.ModflowRch(updateMdoel, rech=updateMdoel.rch.rech) |
| | | # rch.write_file(check=False) |
| | | |
| | | # else: |
| | | |
| | | # print("Rchæä»¶æ éä¿®æ¹ï¼") |
| | | |
| | | |
| | | def updateRchFile(model_name,predictiondata): |
| | | |
| | | start_time =predictiondata["start_time"] |
| | | end_time = predictiondata["end_time"] |
| | | |
| | | #丰水年 æ¯æ°´å¹´ |
| | | base_year = predictiondata["rain"]["base_year"] |
| | | ratio= float(predictiondata["rain"]["ratio"]) |
| | | |
| | | |
| | | flag = check_rain_param(predictiondata) |
| | | |
| | | #æ°æ®æ¥æºç模åæä»¶å¤¹ |
| | | base_ws= base.predictParamModel + base_year |
| | | |
| | | if flag == "true": |
| | | |
| | | baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= base_ws, |
| | | exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | update_model_ws = base.model_dir + model_name |
| | | updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws, |
| | | exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | for per in range(predict_per): |
| | | |
| | | item = baseMdoel.rch.rech.__getitem__(kper = per) |
| | | array2d = item.get_value() |
| | | array2d_len = len(array2d) |
| | | |
| | | for i in range(array2d_len): |
| | | |
| | | array_len = len(array2d[i]) |
| | | for j in range(array_len): |
| | | |
| | | if str(base.area_array[i][j]) != '-9999': |
| | | |
| | | array2d[i][j] = array2d[i][j] * ratio |
| | | |
| | | updateMdoel.rch.rech.__setitem__(key = per, value=array2d) |
| | | |
| | | rch = flopy.modflow.ModflowRch(updateMdoel, rech=updateMdoel.rch.rech) |
| | | rch.write_file(check=False) |
| | | |
| | | else: |
| | | |
| | | print("Rchæä»¶æ éä¿®æ¹ï¼") |
| | | |
| | | def check_rain_param(predictiondata): |
| | | |
| | | rain = predictiondata["rain"] |
| | | if not rain: |
| | | print("Rch颿µåæ°ä¸ºç©ºï¼æ éè¦ä¿®æ¹") |
| | | return "false" |
| | | |
| | | base_year = predictiondata["rain"]["base_year"] |
| | | if not base_year : |
| | | print(" Rch : base_year颿µåæ°ä¸ºç©ºï¼æ éè¦ä¿®æ¹") |
| | | return "false" |
| | | |
| | | ratio= predictiondata["rain"]["ratio"] |
| | | if not ratio or ratio == "1" : |
| | | print(" Rch : ratio颿µåæ°ä¸ºç©ºï¼æ éè¦ä¿®æ¹") |
| | | return "false" |
| | | |
| | | return "true" |
| | | |
| | | |
| | | #æ´æ°bas6æä»¶ åå§æ°´å¤´ä¿¡æ¯ |
| | | def updateBase6File(model_name,predictdata): |
| | | model_ws = base.model_dir + model_name |
| | | ml = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws, |
| | | exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | |
| | | #åå§æ°´å¤´ |
| | | init_header = predictdata["initHeader"] |
| | | |
| | | dir = base.model_dir + init_header + "\\modflow.head" |
| | | head = bf.HeadFile(dir) |
| | | alldata = head.get_alldata() |
| | | |
| | | lens = len(alldata) |
| | | last_index = lens-3 |
| | | |
| | | last_array3= alldata[last_index] |
| | | |
| | | strt = ml.bas6.strt |
| | | # strs = ml.bas6.strt.__getitem__(2) |
| | | # print(strs.get_value()) |
| | | strt.__setitem__(0,last_array3[0]) |
| | | strt.__setitem__(1,last_array3[1]) |
| | | strt.__setitem__(2,last_array3[2]) |
| | | |
| | | |
| | | mfBase6 = flopy.modflow.ModflowBas( |
| | | ml, |
| | | strt= strt, |
| | | ibound=ml.bas6.ibound, |
| | | hnoflo=ml.bas6.hnoflo, |
| | | extension="bas6",) |
| | | |
| | | mfBase6.write_file(check=False) |
| | | |
| | | |
| | | #ä¿®æ¹dis æä»¶ |
| | | def updateDisFile(model_name, per): |
| | | |
| | | model_ws = base.model_dir + model_name |
| | | ml = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws, |
| | | exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | mfDis = flopy.modflow.ModflowDis( |
| | | ml, |
| | | nlay=ml.dis.nlay, |
| | | nrow=ml.dis.nrow, |
| | | ncol=ml.dis.ncol, |
| | | nper=per, |
| | | delr=ml.dis.delr, |
| | | delc=ml.dis.delc, |
| | | top=ml.dis.top, |
| | | botm=ml.dis.botm, |
| | | perlen=ml.dis.perlen, |
| | | nstp=ml.dis.nstp, |
| | | tsmult=ml.dis.tsmult, |
| | | steady=ml.dis.steady, |
| | | itmuni=ml.dis.itmuni, |
| | | lenuni=ml.dis.lenuni, |
| | | extension="dis") |
| | | |
| | | mfDis.write_file(check=False) |
New file |
| | |
| | | |
| | | # 导å
¥Flaskç±» |
| | | from flask import Flask |
| | | from flask import jsonify |
| | | from flask import request |
| | | from flask_cors import CORS |
| | | import sys |
| | | import numpy as np |
| | | import pandas as pd |
| | | import flopy |
| | | import flopy.utils.binaryfile as bf |
| | | import csv |
| | | import time |
| | | from openpyxl import load_workbook |
| | | import os |
| | | import shutil |
| | | import json |
| | | import Base as base |
| | | import CalHead |
| | | import ModelPeriod |
| | | |
| | | |
| | | base_init_year=["2020","2021","2022"] |
| | | river_start_index = 454 |
| | | river_end_index =562 |
| | | |
| | | #颿µå¨ææ° |
| | | predict_per = 12 |
| | | |
| | | #éæ°´é |
| | | |
| | | base_water = base.prefix + 'base_water.ini' |
| | | def predict_water_chart(base_year,start_time ,end_time): |
| | | |
| | | |
| | | water_array = np.loadtxt(base_water, dtype=str,encoding='utf-8') |
| | | |
| | | y_data=[] |
| | | x_data= ModelPeriod.get_months_in_range_ym("2022-01","2022-12") |
| | | water= water_array[0] |
| | | for e in water: |
| | | y_data.append(e) |
| | | |
| | | result = {"y_data": y_data, "x_data": x_data} |
| | | return result |
| | | |
| | | #æ²³æµçæçº¿å¾ |
| | | |
| | | base_river = base.prefix + 'base_river.ini' |
| | | def predict_river_chart(base_year,start_time ,end_time): |
| | | |
| | | |
| | | river_array = np.loadtxt(base_river, dtype=str,encoding='utf-8') |
| | | |
| | | y_data=[] |
| | | x_data= ModelPeriod.get_months_in_range_ym("2022-01","2022-12") |
| | | for e in river_array: |
| | | y_data.append(e) |
| | | |
| | | result = {"y_data": y_data, "x_data": x_data} |
| | | return result |
| | | |
| | | |
| | | def run_model_predict(model_name): |
| | | |
| | | predictiondata="" |
| | | prediction_path = base.model_dir + model_name +"\\prediction.json" |
| | | if os.path.exists(prediction_path): |
| | | with open(prediction_path,encoding='utf-8') as f: |
| | | predictiondata = json.load(f) |
| | | |
| | | if predictiondata: |
| | | |
| | | try: |
| | | updateDisFile(model_name,predict_per) |
| | | |
| | | updateBase6File(model_name,predictiondata) |
| | | |
| | | updateRchFile(model_name,predictiondata) |
| | | |
| | | updateRiverFile(model_name,predictiondata) |
| | | except: |
| | | |
| | | return "è¯·æ£æ¥åå§æ°´å¤´ãéæ°´éãæ°¸å®æ²³å
¥æ¸éãå¼ééçåæ°æ¯å¦å¡«å宿´ï¼" |
| | | |
| | | |
| | | else: |
| | | print("prediction.json 颿µåºæ¯æä»¶ä¸ºç©ºï¼æ éæ´æ¹ç¸åºæä»¶") |
| | | |
| | | |
| | | model_ws = base.model_dir + model_name |
| | | |
| | | ml = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws, |
| | | exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | ml.run_model(report = True) |
| | | return "颿µæ¨¡åè¿è¡æåï¼" |
| | | |
| | | |
| | | |
| | | #è·å areaç name--> ratio çç»æ |
| | | def get_area_dict(area): |
| | | result ={} |
| | | |
| | | for i in range(len(area)): |
| | | name = area[i]["name"] |
| | | rt = area[i]["ratio"] |
| | | result[name]= rt |
| | | return result |
| | | |
| | | |
| | | #è·ååºå¿ç row+column --> nameç»æ |
| | | def get_distric_dict(): |
| | | data = base.district |
| | | result = {} |
| | | for row ,column ,id ,name in data: |
| | | key = str(row)+","+str(column) |
| | | result[key]= name |
| | | return result |
| | | |
| | | |
| | | #æ ¹æ® row clomn è·å ratio |
| | | def get_row_column_ratio(row, column ,district_dict, area_dict ): |
| | | key = str(row) +"," + str(column) |
| | | if area_dict.__contains__("å
¨é¨åºå"): |
| | | return area_dict["å
¨é¨åºå"] |
| | | |
| | | if district_dict.__contains__(key): |
| | | name = district_dict[key] |
| | | ratio = area_dict[name] |
| | | return float(ratio) |
| | | |
| | | return float(1.0) |
| | | |
| | | |
| | | |
| | | |
| | | def updateRiverFile(model_name,predictiondata): |
| | | |
| | | flag = check_rain_param(predictiondata) |
| | | |
| | | if flag == "true": |
| | | |
| | | rain_ratio = float(predictiondata["rain"]["ratio"]) |
| | | rain_base_year = predictiondata["rain"]["base_year"] |
| | | |
| | | river_ratio= float(predictiondata["river"]["ratio"]) |
| | | area= predictiondata["mine"]["area"] |
| | | |
| | | ws = base.predictParamModel + rain_base_year |
| | | |
| | | baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= ws, |
| | | exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | update_model_ws = base.model_dir + model_name |
| | | updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws, |
| | | exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | district_dict = get_distric_dict() |
| | | |
| | | area_dict = get_area_dict(area) |
| | | |
| | | lrcq = {} |
| | | |
| | | for per in range(predict_per): |
| | | wel = [] |
| | | array2d = [] |
| | | |
| | | wel = baseMdoel.wel.stress_period_data.__getitem__(kper = per) |
| | | wel_len = len(wel) |
| | | |
| | | #ä¾§åè¾¹ç |
| | | for i in range (0,453): |
| | | wel[i][3] = wel[i][3] * rain_ratio |
| | | |
| | | #æ²³æµ |
| | | for i in range(453, 562): |
| | | wel[i][3] = wel[i][3] * river_ratio |
| | | |
| | | #æ½æ°´äº |
| | | for i in range(562,wel_len): |
| | | |
| | | r = (float) (get_row_column_ratio(wel[i][1], wel[i][2], district_dict, area_dict)) |
| | | wel[i][3] = wel[i][3] * r |
| | | |
| | | |
| | | #éç½®æ°ç» |
| | | for Layer, Row, Column, Q in wel: |
| | | array = [Layer, Row, Column, Q] |
| | | array2d.append(array) |
| | | |
| | | flex_data= getFlexdata(model_name) |
| | | |
| | | for i in range(len(flex_data)): |
| | | array2d.append(flex_data[i]) |
| | | |
| | | lrcq[per] = array2d |
| | | |
| | | flopy.modflow.ModflowWel(updateMdoel,stress_period_data=lrcq) |
| | | updateMdoel.write_input() |
| | | |
| | | else: |
| | | print("Well--Riveræä»¶æ éä¿®æ¹ï¼") |
| | | |
| | | #追å å¤æåæ±é¡¹ä¿¡æ¯ |
| | | def getFlexdata(model_name): |
| | | welldata="" |
| | | well_path = base.model_dir + model_name +"\\pump_well.json" |
| | | data=[] |
| | | if os.path.exists(well_path): |
| | | with open(well_path,encoding='utf-8') as f: |
| | | welldata = json.load(f) |
| | | wel= welldata["well"] |
| | | |
| | | for i in range (len(wel)): |
| | | layer = int (wel[i]['layer'])-1 |
| | | row= int(wel[i]['row'])-1 |
| | | column = int(wel[i]['column'])-1 |
| | | v = float(wel[i]['value']) |
| | | arr = [layer,row, column, v] |
| | | data.append(arr) |
| | | |
| | | return data |
| | | |
| | | |
| | | def updateRchFile(model_name,predictiondata): |
| | | flag = check_rain_param(predictiondata) |
| | | if flag == "true": |
| | | #丰水年 æ¯æ°´å¹´ |
| | | base_year = predictiondata["rain"]["base_year"] |
| | | ratio= float(predictiondata["rain"]["ratio"]) |
| | | |
| | | #æ°æ®æ¥æºç模åæä»¶å¤¹ |
| | | base_ws= base.predictParamModel + base_year |
| | | |
| | | baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= base_ws, |
| | | exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | update_model_ws = base.model_dir + model_name |
| | | updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws, |
| | | exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | for per in range(predict_per): |
| | | |
| | | item = baseMdoel.rch.rech.__getitem__(kper = per) |
| | | array2d = item.get_value() |
| | | array2d_len = len(array2d) |
| | | |
| | | for i in range(array2d_len): |
| | | |
| | | array_len = len(array2d[i]) |
| | | for j in range(array_len): |
| | | |
| | | if str(base.area_array[i][j]) != '-9999': |
| | | |
| | | array2d[i][j] = array2d[i][j] * ratio |
| | | |
| | | updateMdoel.rch.rech.__setitem__(key = per, value=array2d) |
| | | |
| | | rch = flopy.modflow.ModflowRch(updateMdoel, rech=updateMdoel.rch.rech) |
| | | rch.write_file(check=False) |
| | | |
| | | else: |
| | | print("Rchæä»¶æ éä¿®æ¹ï¼") |
| | | |
| | | def check_rain_param(predictiondata): |
| | | |
| | | rain = predictiondata["rain"] |
| | | if not rain: |
| | | print("Rch颿µåæ°ä¸ºç©ºï¼æ éè¦ä¿®æ¹") |
| | | return "false" |
| | | |
| | | base_year = predictiondata["rain"]["base_year"] |
| | | if not base_year : |
| | | print(" Rch : base_year颿µåæ°ä¸ºç©ºï¼æ éè¦ä¿®æ¹") |
| | | return "false" |
| | | |
| | | ratio= predictiondata["rain"]["ratio"] |
| | | if not ratio : |
| | | print(" Rch : ratio颿µåæ°ä¸ºç©ºï¼æ éè¦ä¿®æ¹") |
| | | return "false" |
| | | |
| | | return "true" |
| | | |
| | | |
| | | #æ´æ°bas6æä»¶ åå§æ°´å¤´ä¿¡æ¯ |
| | | def updateBase6File(model_name,predictdata): |
| | | model_ws = base.model_dir + model_name |
| | | ml = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws, |
| | | exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | |
| | | #åå§æ°´å¤´ |
| | | init_header = predictdata["initHeader"] |
| | | |
| | | dir = base.model_dir + init_header + "\\modflow.head" |
| | | head = bf.HeadFile(dir) |
| | | alldata = head.get_alldata() |
| | | |
| | | lens = len(alldata) |
| | | last_index = lens-3 |
| | | |
| | | last_array3= alldata[last_index] |
| | | |
| | | strt = ml.bas6.strt |
| | | # strs = ml.bas6.strt.__getitem__(2) |
| | | # print(strs.get_value()) |
| | | strt.__setitem__(0,last_array3[0]) |
| | | strt.__setitem__(1,last_array3[1]) |
| | | strt.__setitem__(2,last_array3[2]) |
| | | |
| | | |
| | | mfBase6 = flopy.modflow.ModflowBas( |
| | | ml, |
| | | strt= strt, |
| | | ibound=ml.bas6.ibound, |
| | | hnoflo=ml.bas6.hnoflo, |
| | | extension="bas6",) |
| | | |
| | | mfBase6.write_file(check=False) |
| | | |
| | | |
| | | #ä¿®æ¹dis æä»¶ |
| | | def updateDisFile(model_name, per): |
| | | |
| | | model_ws = base.model_dir + model_name |
| | | ml = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws, |
| | | exe_name="mf2005", verbose=True, version="mf2005", check=False) |
| | | |
| | | mfDis = flopy.modflow.ModflowDis( |
| | | ml, |
| | | nlay=ml.dis.nlay, |
| | | nrow=ml.dis.nrow, |
| | | ncol=ml.dis.ncol, |
| | | nper=per, |
| | | delr=ml.dis.delr, |
| | | delc=ml.dis.delc, |
| | | top=ml.dis.top, |
| | | botm=ml.dis.botm, |
| | | perlen=ml.dis.perlen, |
| | | nstp=ml.dis.nstp, |
| | | tsmult=ml.dis.tsmult, |
| | | steady=ml.dis.steady, |
| | | itmuni=ml.dis.itmuni, |
| | | lenuni=ml.dis.lenuni, |
| | | extension="dis") |
| | | |
| | | mfDis.write_file(check=False) |
New file |
| | |
| | | # -*- coding: utf-8 -*- |
| | | """ |
| | | Created on Thu Dec 21 12:02:24 2023 |
| | | |
| | | @author: ZMK |
| | | """ |
| | | |
| | | import Base |
| | | |
| | | import numpy as np |
| | | |
| | | # model_config ='C:\\Users\\ZMK\\Desktop\\objclipdig\\ModelFlow_xishan\\config.ini' |
| | | |
| | | # conf = np.loadtxt(model_config, dtype=str,encoding='utf-8') |
| | | |
| | | |
| | | # conf[1]='1' |
| | | |
| | | # np.savetxt(model_config,conf,fmt='%100s',encoding='utf-8') |
| | | |
| | | # print(conf) |
| | | Base.updateModelConfig("202301_202312") |