From 736a7162bc5e5da76cafacecd821a46efc09f143 Mon Sep 17 00:00:00 2001
From: zmk <496160012@qq.com>
Date: 星期二, 26 十二月 2023 18:15:03 +0800
Subject: [PATCH] 提交代码
---
Base.py | 140 +++
test.py | 22
MainAPI.py | 949 +++++++++++++++++++++++
Predict - 副本.py | 592 ++++++++++++++
Predict.py | 347 ++++++++
ModelPeriod.py | 89 ++
CalHead.py | 232 +++++
7 files changed, 2,371 insertions(+), 0 deletions(-)
diff --git a/Base.py b/Base.py
new file mode 100644
index 0000000..f4542fb
--- /dev/null
+++ b/Base.py
@@ -0,0 +1,140 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Oct 20 16:15:23 2023
+
+@author: ZMK
+"""
+
+import numpy as np
+import shutil
+import os
+
+
+prefix ='C:\\Users\\ZMK\\Desktop\\xsModel2\\'
+
+baseModel = prefix + 'verifyModel\\'
+
+predictModel= prefix + 'predictModel\\'
+
+predictParamModel= prefix + 'predictParamModel\\'
+
+model_dir = prefix + '0612Model\\'
+
+obswellpath = prefix + '鐩戞祴浜�.ini'
+
+obs_well = np.loadtxt(obswellpath, dtype=str,encoding='utf-8')
+
+district_path = prefix +"鍖哄幙.ini"
+
+district= np.loadtxt(district_path, dtype=str,encoding='utf-8')
+
+pumpwellpath = prefix +'鎶芥按浜�.ini'
+
+pump_well = np.loadtxt(pumpwellpath, dtype=str,encoding='utf-8')
+
+period_path = prefix +"period.json"
+
+areapath = prefix + '鍒嗗尯.ini'
+area_array = np.loadtxt(areapath, dtype=str,encoding='utf-8')
+
+#姘村潎琛¤矾寰�
+water_equ_path = prefix + 'water_equ.ini'
+water_equ = np.loadtxt(water_equ_path, dtype=str,encoding='utf-8')
+
+water_equ_path2022 = prefix + 'water_equ2022.ini'
+water_equ2022 = np.loadtxt(water_equ_path2022, dtype=str,encoding='utf-8')
+
+
+model_config ='C:\\Users\\ZMK\\Desktop\\objclipdig\\ModelFlow_xishan\\config.ini'
+
+model3d_path='D:/javaCode/xishan/xishan/xishan/output2/'
+
+modeldata_csv_path ="C:/Users/ZMK/Desktop/xsModel2/0612Model/"
+
+exe_path = 'C:/Users/ZMK/Desktop/objclipdig/ModelFlow_xishan/ModelFlow_xishan.exe'
+
+#璋冨姩 exe 绋嬪簭
+def callModelexe():
+ os.system(exe_path)
+
+
+#鏇存柊妯″瀷鐨別xe閰嶇疆
+def updateModelConfig(model_name):
+ conf = np.loadtxt(model_config, dtype=str,encoding='utf-8')
+ outpath = "outpath=" + model3d_path + model_name
+ csvpath = "csvpath=" + modeldata_csv_path + model_name +"/output"
+ conf[1]=outpath
+ conf[2]=csvpath
+ np.savetxt(model_config,conf, newline='\n', fmt='%s' , encoding='utf-8')
+
+
+
+def getPumpWellName(row,column):
+
+ for index, r, c,ids, qu ,name in pump_well:
+ if r==row and c == column:
+ return name
+
+ return "NONE"
+
+
+#鑾峰彇鐭╅樀鍒嗙粍鐨勫瓧鍏哥粨鏋�
+def getAreas():
+ arr = np.loadtxt(areapath, dtype=int)
+ dict ={}
+ for i in range(len(arr)):
+ for j in range(len(arr[i])):
+ zb = str(arr[i][j])
+ if arr[i][j] == -9999:
+ continue
+ if zb not in dict:
+ dict[zb] = [(i,j)]
+ else:
+ dict[zb].append((i,j))
+ return dict
+
+
+def getAreaDictFirstIndex():
+ arr = np.loadtxt(areapath, dtype=int)
+ dict ={}
+ for i in range(len(arr)):
+ for j in range(len(arr[i])):
+ if arr[i][j] == -9999:
+ continue
+ if arr[i][j] not in dict:
+ dict[arr[i][j]] = [(i,j)]
+
+ return dict
+
+
+#鑾峰彇鍒嗙粍灏忔爣瀛楀吀鏁版嵁
+def getAreaDictIndexArray():
+ arr = np.loadtxt(areapath, dtype=int)
+ dict_array={}
+ for i in range(len(arr)):
+ for j in range(len(arr[i])):
+ zb= str(arr[i][j])
+ if arr[i][j] == -9999:
+ continue
+ if zb not in dict_array:
+ array= []
+ index = getCellIdByRC(i+1,j+1)
+ array.append(index)
+ dict_array[zb] = array
+ else:
+ index = getCellIdByRC(i+1,j+1)
+ dict_array[zb].append(index)
+
+ return dict_array
+
+
+def getCellIdByRC(rowVal, columnVal):
+ return (rowVal - 1) * 114 + columnVal - 1;
+
+
+
+
+
+
+
+
diff --git a/CalHead.py b/CalHead.py
new file mode 100644
index 0000000..55ecad8
--- /dev/null
+++ b/CalHead.py
@@ -0,0 +1,232 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Oct 31 16:12:55 2023
+
+@author: ZMK
+"""
+
+import flopy
+import flopy.utils.binaryfile as bf
+import csv
+import Base as base
+import os
+import json
+import ModelPeriod
+import numpy as np
+
+
+
+def get_model_json(model_name):
+ period_json=""
+ prediction_path = base.model_dir + model_name +"\\prediction.json"
+ with open(prediction_path,encoding='utf-8') as f:
+ period_json = json.load(f)
+
+ return period_json;
+
+def get_model_period(model_name):
+ period_json=""
+ prediction_path = base.model_dir + model_name +"\\prediction.json"
+ with open(prediction_path,encoding='utf-8') as f:
+ period_json = json.load(f)
+
+ start_time = period_json["start_time"]
+ end_time = period_json["end_time"]
+
+ months = ModelPeriod.get_months_in_range_ym(start_time, end_time)
+ return months;
+
+
+#瑙傛祴浜昪hart
+def obsChartdata(model_name, row, column):
+
+ row = int(row)-1
+ column = int(column)-1
+ dir = base.model_dir + model_name + "\\modflow.head"
+
+ head = bf.HeadFile(dir)
+ alldata = head.get_alldata()
+ period = len(alldata)
+
+ layer = 3
+
+ xdata = []
+ ydata = []
+ result = {}
+ for per in range(period):
+ for lay in range(layer):
+ if per % 3 == 0 and lay == 0:
+ md = (int)(lay / 3 + 1)
+ per_array = alldata[per][lay]
+
+ cell_data = (float)(per_array[row][column])
+ ydata.append(cell_data)
+
+ period_json= get_model_json(model_name)
+
+ start_time = period_json["start_time"]
+ end_time = period_json["end_time"]
+
+ months = ModelPeriod.get_months_in_range_ym(start_time, end_time)
+
+ result = {"y_data": ydata, "x_data": months}
+ return result
+
+def getRowCloumnById(index_id):
+ row = 104
+ column =114
+ count=0
+
+ for i in range(row):
+ for j in range(column):
+ if index_id == count:
+ return (i,j)
+ count = count +1
+ return ""
+
+
+
+#鍦颁笅姘翠俊鎭�
+def earthWaterChart(model_name, index_id):
+
+ row_column = getRowCloumnById(index_id)
+
+ row = row_column[0]
+ column = row_column[1]
+ dir = base.model_dir + model_name + "\\modflow.head"
+
+ head = bf.HeadFile(dir)
+ alldata = head.get_alldata()
+ period = len(alldata)
+
+ layer = 3
+
+ ydata = []
+ result = {}
+ for per in range(period):
+ for lay in range(layer):
+ if per % 3 == 0 and lay == 0:
+
+ per_array = alldata[per][lay]
+
+ cell_data = (float)(per_array[row][column])
+ ydata.append(cell_data)
+
+ period_json= get_model_json(model_name)
+
+ start_time = period_json["start_time"]
+ end_time = period_json["end_time"]
+
+ months = ModelPeriod.get_months_in_range_ym(start_time, end_time)
+
+ result = {"y_data": ydata, "x_data": months}
+ return result
+
+def heatmapdata(model_name,period):
+ dir = base.model_dir + model_name + "\\modflow.head"
+
+ head = bf.HeadFile(dir)
+
+ alldata = head.get_alldata()
+
+ index = int(period)*3
+ return alldata[index][0]
+
+
+#姘村潎琛¤绠�
+def waterEqu(model_name):
+ if model_name == '202001_202212':
+ water_equ_path = base.prefix + "\\water_equ.json"
+ with open(water_equ_path,encoding='utf-8') as f:
+ data = json.load(f)
+ return data
+ else:
+ year = model_name[0:4]
+ title =[year]
+ dict ={"title":title}
+
+ celldata = np.array(base.water_equ2022).tolist()
+
+ predict_json= get_model_json(model_name)
+
+ a1=float(celldata[0])
+ a2=float(celldata[1])
+ a3=float(celldata[2])
+ a4=float(celldata[3])
+
+ b1=float(celldata[4])
+ b2=float(celldata[5])
+ b3=float(celldata[6])
+
+ if predict_json["rain"]:
+ a1= float(predict_json["rain"]["ratio"]) * float(celldata[0])
+ a3= float(predict_json["rain"]["ratio"]) * float(celldata[2])
+ a4= float(predict_json["rain"]["ratio"]) * float(celldata[3])
+ b2= float(predict_json["rain"]["ratio"]) * float(celldata[5])
+ b3= float(predict_json["rain"]["ratio"]) * float(celldata[6])
+ if predict_json["river"]:
+ a2= float(predict_json["river"]["ratio"]) * float(celldata[1])
+
+ if predict_json["mine"]:
+ b1= b1
+
+ in_data= a1+a2+a3+a4
+ out_data= b1 +b2 + b3
+ float_data=[a1,a2,a3,a4,in_data,b1,b2,b3,out_data,in_data-out_data]
+
+ inarray=[]
+ inarray.append({"name":"闄嶆按鍏ユ笚閲�","value":a1})
+ inarray.append({"name":"娌虫祦鍏ユ笚閲�","value":a2})
+ inarray.append({"name":"L1渚у悜琛ョ粰閲�","value":a3})
+ inarray.append({"name":"L3渚у悜琛ョ粰閲�","value":a4})
+ outarray=[]
+ outarray.append({"name":"浜哄伐寮�閲囬噺","value":b1})
+ outarray.append({"name":"L1渚у悜娴佸嚭閲�","value":b2})
+ outarray.append({"name":"L3渚у悜娴佸嚭閲�","value":b3})
+ pie1={str(year):inarray}
+ pie2={str(year):outarray}
+
+ dict["pie1"]=pie1
+ dict["pie2"]=pie2
+
+ array2d=[]
+ array2d.append([str(year)])
+ for i in range(len(float_data)):
+ tmp=[]
+ tmp.append(str(float_data[i]))
+ array2d.append(tmp)
+ dict["data"]=array2d
+ return dict
+
+
+#瀵煎嚭csv鏂囦欢
+def exportCsV(model_name):
+
+ dir = base.model_dir + model_name + "\\modflow.head"
+ out_path = base.model_dir + model_name + "\\output\\"
+ if not os.path.exists(out_path):
+ os.mkdir(out_path)
+
+ head = bf.HeadFile(dir)
+
+ alldata = head.get_alldata()
+ month = len(alldata)
+ layer = 3
+
+ for i in range(month):
+ for j in range(layer):
+ if i % 3 == 0:
+ md = (int)(i / 3 + 1)
+ filename = out_path + str(md) + '-' + str(j+1) + '.csv'
+ f = open(filename, 'w', newline='')
+ writer = csv.writer(f)
+ for p in alldata[i][j]:
+ writer.writerow(p)
+ f.close()
+
+ return out_path
+
+
+
+
+
diff --git a/MainAPI.py b/MainAPI.py
new file mode 100644
index 0000000..78b64b3
--- /dev/null
+++ b/MainAPI.py
@@ -0,0 +1,949 @@
+
+# 瀵煎叆Flask绫�
+from flask import Flask
+from flask import jsonify
+from flask import request
+from flask_cors import CORS
+import sys
+import numpy as np
+import pandas as pd
+import flopy
+import flopy.utils.binaryfile as bf
+import csv
+import time
+from openpyxl import load_workbook
+import os
+import shutil
+import Base as base
+import CalHead
+import Predict
+import json
+import ModelPeriod
+
+# Flask鍑芥暟鎺ユ敹涓�涓弬鏁癬_name__锛屽畠浼氭寚鍚戠▼搴忔墍鍦ㄧ殑鍖�
+app = Flask(__name__)
+CORS(app, supports_credentials=True, resources=r'/*')
+
+
+#杈圭晫cell鐨勬暟閲�
+iboundCellSize = 240
+iboundCellSize2= 213
+
+iboundCellSizeTotle= 453
+#娌虫祦cell鐨勬暟閲�
+riverCellSize = 109
+
+iboundGroupSize = 5
+iboundGroup={1:[1,86],2:[87,111],3:[112,142],4:[143,170],5:[171,240]}
+
+iboundGroup3Size = 5
+iboundGroup3={1:[241,282],2:[283,354],3:[355,393],4:[394,436],5:[437,453]}
+
+riverGroupSize = 4
+riverGroup={1:[454,479],2:[480,505],3:[506,527],4:[528,562]}
+
+riverName=['闆佺繀-钀藉潯宀�','钀藉潯宀�-闄囬┚搴�','闄囬┚搴�-涓夊搴�','涓夊搴�-鍗㈡矡妗�']
+
+# 鑾峰彇row colum layer Period 鍙傛暟
+
+def getModel(model_name):
+ model_ws=""
+ if not model_name:
+ model_ws = "202001_202212"
+ else:
+ model_ws = base.model_dir + model_name
+
+ m = flopy.modflow.Modflow.load("modflow.nam", model_ws = model_ws, exe_name="mf2005", verbose=True, version="mf2005", check=False)
+ return m
+
+
+@app.route('/baseparam/', methods=['GET'])
+def baseparam():
+
+ model_name = request.args.get('model_name')
+ ml= getModel(model_name)
+ nrclp = ml.get_nrow_ncol_nlay_nper()
+ dict = {"Row": nrclp[0], "Column": nrclp[1],
+ "Layer": nrclp[2], "period": nrclp[3]}
+ jsondata= CalHead.get_model_json(model_name)
+ start_time = jsondata["start_time"]
+ end_time = jsondata["end_time"]
+
+ months = ModelPeriod.get_months_in_range_ym(start_time, end_time)
+ dict["months"]=months
+ return jsonify(dict)
+
+
+#璇诲彇wel鏂囦欢 鍙傛暟涓� Period
+@app.route('/welList/', methods=['GET'])
+def welList():
+
+ period = request.args.get('period')
+ model_name = request.args.get('model_name')
+ layerparam = request.args.get('layer')
+ ml= getModel(model_name)
+
+ wel = []
+ wel = ml.wel.stress_period_data.__getitem__(kper=period)
+ result = []
+ welarray = []
+ riverarray = []
+ iboundarray = []
+
+ for Layer, Row, Column, Q in wel:
+ dict = {"Layer": str(Layer), "Row": str(
+ Row), "Column": str(Column), "Q": str(Q)}
+ result.append(dict)
+
+ result_len = len(result)
+
+ if layerparam == '1':
+ #杈圭晫
+ for i in range(0, 240):
+ iboundarray.append(result[i])
+ #娌虫祦
+ for i in range(453, 562):
+ riverarray.append(result[i])
+
+ for i in range(562, result_len):
+ r = int (result[i]['Row'])+1
+ c =int (result[i]['Column'])+1
+ name = base.getPumpWellName(str(r), str(c))
+
+ result[i]['name']=name
+ welarray.append(result[i])
+
+ elif layerparam == '3':
+ for i in range(240, 453):
+ iboundarray.append(result[i])
+
+
+ ibounddict = {"name": "ibound", "data": iboundarray}
+ riverdict = {"name": "river", "data": riverarray}
+
+ weldict = {"name": "wel", "data": welarray}
+
+ data = []
+ data.append(riverdict)
+ data.append(ibounddict)
+ data.append(weldict)
+ return jsonify(data)
+
+#璇诲彇鍗曚釜浜曠殑鏁版嵁
+@app.route('/wel/', methods=['GET'])
+def wel():
+ row_param = request.args.get('Row')
+ column_param = request.args.get('Column')
+ model_name = request.args.get('model_name')
+
+ ml= getModel(model_name)
+ result = []
+
+ periods =CalHead.get_model_period(model_name)
+ periods_len= len(periods)
+
+ for i in range(periods_len):
+ wel = []
+ wel = ml.wel.stress_period_data.__getitem__(kper=i)
+ for Layer, Row, Column, Q in wel:
+ if str(Row) == row_param and str(Column) == column_param:
+
+ start_month = periods[i] + "-01"
+ end_month = ModelPeriod.last_day_of_month_start(periods[i])
+
+ dict = {"StartTime": start_month, "EndTime": end_month,
+ "Layer": str(Layer+1), "Row": str(Row), "Column": str(Column), "Q": str(Q)}
+ result.append(dict)
+
+ return jsonify(result)
+
+
+#淇敼wel 鏂囦欢
+@app.route('/welInput', methods=['POST'])
+def welInput():
+
+ json = request.get_json()
+ row_param = str(json['Row'])
+ column_param = str(json['Column'])
+
+ # model_name = request.args.get('model_name')
+ model_name = str(json['model_name'])
+
+
+ ml= getModel(model_name)
+
+ #鏈夊簭鐨勫簲鍔涘懆鏈熷垪琛� json
+ data = json['data']
+
+ periods =CalHead.get_model_period(model_name)
+ periods_len= len(periods)
+ #寰幆璁剧疆wel鏂囦欢锛屾洿鏂版暟鎹�
+ # lrcq = {0:[[2, 3, 4, -100.]], 1:[[2, 3, 4, -100.]]}
+ lrcq = {}
+ for per in range(periods_len):
+ wel = []
+ wel = ml.wel.stress_period_data.__getitem__(kper=per)
+
+ #瀛樺偍姣忎釜搴斿姏鏈熺殑鏁版嵁
+ array2d = []
+
+ for Layer, Row, Column, Q in wel:
+ array = []
+ if str(Row) == row_param and str(Column) == column_param:
+
+ array = [Layer, Row, Column, data[per]['Q']]
+ else:
+ array = [Layer, Row, Column, Q]
+
+ array2d.append(array)
+
+ lrcq[per] = array2d
+
+ flopy.modflow.ModflowWel(ml,stress_period_data=lrcq)
+ ml.write_input()
+
+ return jsonify("鏁版嵁鏇存柊瀹屾瘯锛�")
+
+
+#璇诲叆鐢靛瓙琛ㄦ牸鏂囦欢鐢熸垚wel 鏂囦欢
+@app.route('/cellFileInput', methods=['POST'])
+def cellFileInput():
+
+ path ='C:\\Users\\ZMK\\Desktop\\寰呭彂閫乗\cell鏂囦欢.xlsx'
+
+ data = get_cell_data(path)
+
+ lrcq= get_cell_struct(data["excel1"],data["excel2"],data["excel3"])
+
+ model_name = request.args.get('model_name')
+
+ ml= getModel(model_name)
+
+ flopy.modflow.ModflowWel(ml,stress_period_data=lrcq)
+ ml.write_input()
+
+ return jsonify("sucess")
+
+
+def get_cell_struct(excel1,excel2,excel3):
+ lrcq={}
+
+ #鍛ㄦ湡鏁伴噺
+ period = 7
+ start_row_index = 1
+
+ #杈圭晫鏁版嵁 excel
+ for col in range (0,period):
+ array =[]
+ for row in range(start_row_index, len(excel1)):
+
+ arr = [excel1[row][2]-1,excel1[row][3]-1,excel1[row][4]-1,excel1[row][6+col]]
+ array.append(arr)
+ lrcq[col]= array
+
+ #娌虫祦鏁版嵁 excel
+ for col in range (0,period):
+ array =[]
+ for row in range(start_row_index, len(excel2)):
+
+ arr = [excel2[row][2]-1,excel2[row][3]-1,excel2[row][4]-1,excel2[row][6+col]]
+ array.append(arr)
+
+ lrcq[col].extend(array)
+
+ #鎶芥按鏁版嵁 excel
+ for col in range (0,period):
+
+ array =[]
+ for row in range(start_row_index, len(excel3)):
+
+ arr = [excel3[row][1]-1,excel3[row][2]-1,excel3[row][3]-1,excel3[row][8+col]]
+ array.append(arr)
+
+ lrcq[col].extend(array)
+
+ return lrcq
+
+
+
+#鑾峰彇cell鏂囦欢
+#file_path 鏂囦欢鐨勮矾寰�
+def get_cell_data(file_path):
+
+ workbook = load_workbook(file_path)
+ sheetnames = workbook.get_sheet_names()
+ #read first sheet
+ sheet1 = workbook[sheetnames[0]]
+ sheet2 = workbook[sheetnames[1]]
+ sheet3 = workbook[sheetnames[2]]
+
+ excel1 =[]
+ excel2=[]
+ excel3=[]
+ # 閬嶅巻璇诲彇鏁翠釜宸ヤ綔琛�
+ for row in sheet1.iter_rows(values_only=True):
+ array=[]
+ for cell in row:
+ array.append(cell)
+ excel1.append(array)
+
+ for row in sheet2.iter_rows(values_only=True):
+ array=[]
+ for cell in row:
+ array.append(cell)
+ excel2.append(array)
+
+ for row in sheet3.iter_rows(values_only=True):
+ array=[]
+ for cell in row:
+ array.append(cell)
+ excel3.append(array)
+
+ # 鍏抽棴Excel鏂囦欢
+ workbook.close()
+ data={"excel1":excel1,"excel2":excel2,"excel3":excel3}
+
+ return data
+
+
+
+#璇诲彇wel鏂囦欢 鍙傛暟涓� Period
+@app.route('/iboundList/', methods=['GET'])
+def iboundList():
+
+ return jsonify(iboundGroupSize)
+
+
+#杈圭晫鐨勫垎缁勬暟鎹�
+@app.route('/iboundData/', methods=['GET'])
+def iboundData():
+
+ group_id = int(request.args.get('groupId'))
+
+ model_name = request.args.get('model_name')
+ ml= getModel(model_name)
+ data=[]
+ index = iboundGroup[group_id]
+ start_index = index[0]
+
+ periods =CalHead.get_model_period(model_name)
+ periods_len= len(periods)
+
+ for per in range(periods_len):
+ wel = []
+ wel = ml.wel.stress_period_data.__getitem__(kper = per)
+
+ result = []
+
+ for Layer, Row, Column, Q in wel:
+ dict = {"Layer": str(Layer+1), "Row": str(Row), "Column": str(Column), "Q": str(Q)}
+ result.append(dict)
+
+ start_month = periods[per] +"-01"
+ end_month = ModelPeriod.last_day_of_month_start(periods[per])
+
+ dict = {"StartTime": start_month, "EndTime": end_month,
+ "Layer": str(result[start_index]['Layer']),
+ "Q": str(result[start_index]['Q'])}
+ data.append(dict)
+
+ return jsonify(data)
+
+
+#杈圭晫鏁版嵁淇敼
+@app.route('/iboundInput', methods=['POST'])
+def iboundInput():
+
+ json = request.get_json()
+ no = int(json['No'])
+ #鏈夊簭鐨勫簲鍔涘懆鏈熷垪琛� json
+ data = json['data']
+
+ model_name = json['model_name']
+ ml= getModel(model_name)
+
+ index = iboundGroup[no]
+ start_index = index[0]
+ end_index = index[1]
+
+ periods =CalHead.get_model_period(model_name)
+ periods_len= len(periods)
+ #寰幆璁剧疆wel鏂囦欢锛屾洿鏂版暟鎹�
+ # lrcq = {0:[[2, 3, 4, -100.]], 1:[[2, 3, 4, -100.]]}
+ lrcq = {}
+ for per in range(periods_len):
+ wel = []
+ wel = ml.wel.stress_period_data.__getitem__(kper=per)
+
+ #瀛樺偍姣忎釜搴斿姏鏈熺殑鏁版嵁
+ array2d = []
+
+ count = 1
+ for Layer, Row, Column, Q in wel:
+ array = []
+
+ if count>= start_index and count <= end_index:
+ array = [Layer, Row, Column, data[per]['Q']]
+ else:
+ array = [Layer, Row, Column, Q]
+
+ array2d.append(array)
+ count +=1
+
+ lrcq[per] = array2d
+
+ flopy.modflow.ModflowWel(ml,stress_period_data=lrcq)
+ ml.write_input()
+ return jsonify("鏁版嵁鏇存柊瀹屾瘯锛�")
+
+
+#璇诲彇wel鏂囦欢 鍙傛暟涓� Period
+@app.route('/riverList/', methods=['GET'])
+def riverList():
+
+ riverResult=[]
+ for i in range(len(riverName)):
+ item ={"id":i+1,"name":riverName[i]}
+ riverResult.append(item)
+ return jsonify(riverResult)
+
+
+#娌虫祦鐨勬暟鎹�
+@app.route('/riverData/', methods=['GET'])
+def riverData():
+ group_id = int(request.args.get('groupId'))
+ data=[]
+ index = riverGroup[group_id]
+ start_index = index[0]
+
+ model_name = request.args.get('model_name')
+ ml= getModel(model_name)
+
+ periods =CalHead.get_model_period(model_name)
+ periods_len= len(periods)
+
+ for per in range(periods_len):
+ wel = []
+ wel = ml.wel.stress_period_data.__getitem__(kper = per)
+ result = []
+ for Layer, Row, Column, Q in wel:
+ dict = {"Layer": str(Layer+1), "Row": str(
+ Row), "Column": str(Column), "Q": str(Q)}
+ result.append(dict)
+
+
+ start_month = periods[per] +"-01"
+ end_month = ModelPeriod.last_day_of_month_start(periods[per])
+
+ dict = {"StartTime": start_month, "EndTime": end_month,
+ "Layer": str(result[start_index]['Layer']),
+ "Q": str(result[start_index]['Q'])}
+ data.append(dict)
+
+ return jsonify(data)
+
+
+
+#娌虫祦鏁版嵁淇敼
+@app.route('/riverInput', methods=['POST'])
+def riverInput():
+
+ json = request.get_json()
+ no = int(json['No'])
+ #鏈夊簭鐨勫簲鍔涘懆鏈熷垪琛� json
+ data = json['data']
+
+ index = riverGroup[no]
+ start_index = index[0]
+ end_index = index[1]
+ model_name = json['model_name']
+
+ ml= getModel(model_name)
+
+ periods =CalHead.get_model_period(model_name)
+ periods_len= len(periods)
+
+ #寰幆璁剧疆wel鏂囦欢锛屾洿鏂版暟鎹�
+ # lrcq = {0:[[2, 3, 4, -100.]], 1:[[2, 3, 4, -100.]]}
+ lrcq = {}
+ for per in range(periods_len):
+ wel = []
+ wel = ml.wel.stress_period_data.__getitem__(kper=per)
+
+ #瀛樺偍姣忎釜搴斿姏鏈熺殑鏁版嵁
+ array2d = []
+
+ count = 1
+ for Layer, Row, Column, Q in wel:
+ array = []
+
+ if count>= start_index and count <= end_index:
+ array = [Layer, Row, Column, data[per]['Q']]
+ else:
+ array = [Layer, Row, Column, Q]
+
+ array2d.append(array)
+ count +=1
+
+ lrcq[per] = array2d
+
+ flopy.modflow.ModflowWel(ml,stress_period_data=lrcq)
+ ml.write_input()
+ return jsonify("鏁版嵁鏇存柊瀹屾瘯锛�")
+
+
+#璇诲彇闈� 鍖哄煙
+@app.route('/precipitation/', methods=['GET'])
+def precipitation():
+ model_name = request.args.get('model_name')
+
+ ml= getModel(model_name)
+ period = request.args.get('period')
+
+ per = int(period)
+ item = ml.rch.rech.__getitem__(kper=per)
+ value = item.get_value()
+ item_data = np.array(value).tolist()
+
+ #鍒嗙粍鐨勫瓧鍏镐笅鏍囧�納"1":[(i,j)]}
+ areadict1= base.getAreaDictFirstIndex()
+
+ #鍒嗙粍鐨勫瓧鍏镐笅鏍囧�納"1":[a,b,c,d]}
+ areadict = base.getAreaDictIndexArray()
+
+ #鍒嗙粍鐨勫瓧鍏镐笅鏍囧�納"1":data}
+ areadatadict={}
+
+ for key in areadict1:
+ index1 = areadict1[key]
+ i = index1[0][0]
+ j= index1[0][1]
+
+ data= item_data[i][j]
+
+ areadatadict[str(key)]= format(data,'.8f')
+
+ result =[]
+ result.append(areadatadict)
+ result.append(areadict)
+ return jsonify(result)
+
+
+#闄嶆按鏁版嵁淇敼
+# @app.route('/precipitationInput', methods=['POST'])
+# def precipitationInput():
+
+# json = request.get_json()
+# model_name= str(json['model_name'])
+# period = int(json['period'])
+# #鏈夊簭鐨勫簲鍔涘懆鏈熷垪琛� json
+# data = json['data']
+# dict = {}
+# for i in range(len(data)):
+# q1 = data[i]['Q1']
+# q2 = data[i]['Q2']
+# dict[q1] = q2
+
+# ml= getModel(model_name)
+
+# item = ml.rch.rech.__getitem__(kper=period)
+# array2d = item.get_value()
+
+# count = 0
+
+# array2d_len = len(array2d)
+
+# for i in range(array2d_len):
+
+# array_len = len(array2d[i])
+
+# for j in range(array_len):
+
+# va = str(array2d[i][j])
+# if va in dict:
+# count += 1
+# array2d[i][j] = float(dict[va])
+
+# ml.rch.rech.__setitem__(key=period, value=array2d)
+
+# rch = flopy.modflow.ModflowRch(ml, rech=ml.rch.rech)
+# rch.write_file(check=False)
+# #ml.write_input()
+
+# return jsonify("闄嶆按鍙傛暟淇敼瀹屾瘯锛�")
+
+
+@app.route('/precipitationInput', methods=['POST'])
+def precipitationInput():
+
+ json = request.get_json()
+ model_name= str(json['model_name'])
+ period = int(json['period'])
+ #鏈夊簭鐨勫簲鍔涘懆鏈熷垪琛� json
+ data = json['data']
+ dict = {}
+ for i in range(len(data)):
+ q1 = data[i]['Q1']
+ No = data[i]['No']
+ dict[No] = q1
+
+ ml= getModel(model_name)
+
+ item = ml.rch.rech.__getitem__(kper=period)
+ array2d = item.get_value()
+ areas= base.getAreas()
+
+ for key in areas:
+
+ tuples= areas[key]
+ zblen= len(tuples)
+ values = float(dict[key])
+ for i in range(zblen):
+ x = tuples[i][0]
+ y = tuples[i][1]
+ array2d[x][y]= values
+
+ ml.rch.rech.__setitem__(key=period, value=array2d)
+
+ rch = flopy.modflow.ModflowRch(ml, rech = ml.rch.rech)
+ rch.write_file(check=False)
+ # ml.write_input()
+
+ return jsonify("闄嶆按鍙傛暟淇敼瀹屾瘯锛�")
+
+#瀵煎叆csv鏂囦欢
+@app.route('/precipitationInputFile', methods=['POST'])
+def precipitationInputFile():
+
+ model_name = request.args.get('model_name')
+ ml= getModel(model_name)
+ save_path = 'C:/Users/ZMK/Desktop/test1/' + "1111.xlsx"
+ file = request.files.get('file')
+
+ if file:
+ file.save(save_path)
+
+ #鑾峰彇璇诲彇鐨別xcel 琛ㄦ牸鏁版嵁
+ stations = get_station_struct(save_path)
+
+ #寰幆鍛ㄦ湡
+ #perd鍛ㄦ湡鍙橀噺
+ #array2d 姣忎釜鍛ㄦ湡鐨勪簩缁存暟缁�
+ for perd in range(0,36):
+ period = perd
+ item = ml.rch.rech.__getitem__(kper=period)
+ array2d = item.get_value()
+
+ array2d_len = len(array2d)
+ count = 0
+ #寰幆绔欑偣灏嗕竴涓猵eriod鐨勬墍鏈塻tations杩涜瀛楀吀瀛樺偍
+ dict = {}
+ for k in range(0,len(stations)):
+ row = stations[k]["row"]
+ column = stations[k]["column"]
+
+ data_old = array2d[row][column]
+ data_new = stations[k]["data"][perd]
+ dict[data_old]= data_new
+
+
+ #寰幆璁剧疆姣忎釜period 鐨勫��
+ for i in range(array2d_len):
+
+ array_len = len(array2d[i])
+
+ for j in range(array_len):
+
+ va = str(array2d[i][j])
+ if va in dict:
+ array2d[i][j] = float(dict[va])
+
+ #灏哸rray2d閲嶆柊set 瀵瑰簲鐨� item 鍛ㄦ湡鍐�
+ ml.rch.rech.__setitem__(key=period, value=array2d)
+
+ rch = flopy.modflow.ModflowRch(ml, rech=ml.rch.rech)
+ rch.write_file(check=False)
+ # ml.write_input()
+ return '鏂囦欢涓婁紶鎴愬姛'
+ else:
+ return '涓婁紶澶辫触锛屾湭閫夋嫨鏂囦欢'
+
+
+#鑾峰彇绔欑偣鐨勬暟鎹紝鏋勯�犳暟鎹粨鏋�
+#file_path 鏂囦欢鐨勮矾寰�
+def get_station_struct(file_path):
+
+ workbook = load_workbook(file_path)
+ sheetnames = workbook.get_sheet_names()
+ #read first sheet
+ sheet = workbook[sheetnames[0]]
+
+ array2d_excel=[]
+ # 閬嶅巻璇诲彇鏁翠釜宸ヤ綔琛�
+ for row in sheet.iter_rows(values_only=True):
+ array=[]
+ for cell in row:
+ array.append(cell)
+ array2d_excel.append(array)
+ # 鍏抽棴Excel鏂囦欢
+ workbook.close()
+
+ #鏁版嵁鐨勫紑濮嬩笅鏍�
+ data_start_index=6
+ #淇℃伅鐨勫紑濮嬭鍙�
+ start_row_index = 1
+ #瀛樺偍绔欑偣淇℃伅
+ stations = []
+ for i in range (start_row_index,len(array2d_excel)):
+ st={"name":array2d_excel[i][1],"row":array2d_excel[i][4],"column":array2d_excel[i][5]}
+ data=[]
+ for j in range(data_start_index,len(array2d_excel[i])):
+ cell_data = array2d_excel[i][j]
+ cell_data= cell_data/100/30*0.15
+ data.append(round(cell_data, 6))
+ st["data"]= data
+ stations.append(st)
+
+ return stations
+
+
+#杩愯妯″瀷
+@app.route('/runModel/', methods=['GET'])
+def runModel():
+ model_name = request.args.get('model_name')
+
+ msg= Predict.run_model_predict(model_name)
+ #瀵煎嚭csv鏂囦欢
+ csvpath = CalHead.exportCsV(model_name)
+
+ #鏇存柊妯″瀷涓夌淮缃戞牸閰嶇疆
+ base.updateModelConfig(model_name)
+
+ #鍒涘缓妯″瀷鐨勪笁缁寸綉鏍�
+ filedir = base.model3d_path + model_name
+
+ print(filedir)
+ if not os.path.exists(filedir):
+ os.makedirs(filedir, exist_ok=True)
+ base.callModelexe()
+
+ return jsonify(msg)
+
+#鐢熸垚妯″瀷csv 鏂囦欢
+@app.route('/runModelCsv/', methods=['GET'])
+def runModelCsv():
+
+ model_name = request.args.get('model_name')
+ outpath = CalHead.exportCsV(model_name)
+ result={"code":200,"msg":"鐢熸垚璁$畻缁撴灉CSV鏂囦欢瀹屾瘯锛�","output_path":outpath}
+ return jsonify(result)
+
+
+
+#鍒濆姘翠綅淇℃伅
+@app.route('/initWater/', methods=['GET'])
+def initWater():
+
+ period = request.args.get('period')
+
+ per = int(period)
+
+ model_name = request.args.get('model_name')
+
+ ml= getModel(model_name)
+ item = ml.rch.rech.__getitem__(kper=per)
+ value = item.get_value()
+ t = np.array(value).tolist()
+ return jsonify(t)
+
+#鍒涘缓鏂版ā鍨�
+@app.route('/saveModel/', methods=['GET'])
+def saveModel():
+
+ modelname = request.args.get('name')
+ startTime = request.args.get('startTime')
+ endTime = request.args.get('endTime')
+ file_list = os.listdir(base.model_dir)
+ for name in file_list:
+ if name == modelname:
+ return jsonify("妯″瀷鍚嶇О宸茬粡瀛樺湪锛屼笉鍏佽閲嶅鍒涘缓锛�")
+
+ dir = base.model_dir + modelname
+ shutil.copytree(base.predictModel,dir)
+
+ jsondata={"model_name":modelname,"start_time":startTime,"end_time":endTime}
+ predictionJson = base.model_dir + modelname +"\\prediction.json"
+ with open(predictionJson, "w",encoding='utf-8') as outfile:
+ json.dump(jsondata, outfile,ensure_ascii=False)
+
+ return jsonify("鍒涘缓鏂版ā鍨嬪畬姣曪紒")
+
+
+#鍒涘缓鏂版ā鍨�
+@app.route('/ModelList/', methods=['GET'])
+def ModelList():
+
+ file_list = os.listdir(base.model_dir)
+ return jsonify(file_list)
+
+
+#棰勬祴鍦烘櫙鍙傛暟
+@app.route('/prediction', methods=['POST'])
+def prediction():
+
+ jsondata = request.get_json()
+ model_name = str(jsondata['model_name'])
+ file_list = os.listdir(base.model_dir)
+ if model_name not in file_list:
+ return jsonify("妯″瀷涓嶅瓨鍦紝淇濆瓨澶辫触锛�")
+
+ predictionJson = base.model_dir + model_name +"\\prediction.json"
+ with open(predictionJson, "w",encoding='utf-8') as outfile:
+ json.dump(jsondata, outfile,ensure_ascii=False)
+
+ return jsonify("淇濆瓨棰勬祴鍦烘櫙鍙傛暟瀹屾瘯锛�")
+
+
+#棰勬祴鍦烘櫙鍙傛暟
+@app.route('/predictionparam', methods=['GET'])
+def predictionparam():
+
+ model_name = request.args.get('model_name')
+ file_list = os.listdir(base.model_dir)
+ if model_name not in file_list:
+ return jsonify("妯″瀷涓嶅瓨鍦紒")
+
+ predictiondata=""
+ prediction_path = base.model_dir + model_name +"\\prediction.json"
+ if os.path.exists(prediction_path):
+ with open(prediction_path,encoding='utf-8') as f:
+ predictiondata = json.load(f)
+
+ welldata=""
+ well_path = base.model_dir + model_name +"\\pump_well.json"
+
+ if os.path.exists(well_path):
+ with open(well_path,encoding='utf-8') as f:
+ welldata = json.load(f)
+
+ if not welldata and not predictiondata:
+ return jsonify([])
+
+ if not predictiondata:
+ return jsonify(welldata)
+
+ if not welldata:
+ return jsonify(predictiondata)
+
+ merged_dict = {**predictiondata, **welldata}
+
+ return jsonify(merged_dict)
+
+
+
+#棰勬祴鍦烘櫙--淇濆瓨浜�
+@app.route('/pumpsavewell', methods=['POST'])
+def pump_savewell():
+
+ jsondata = request.get_json()
+ model_name = str(jsondata['model_name'])
+ file_list = os.listdir(base.model_dir)
+ if model_name not in file_list:
+ return jsonify("妯″瀷涓嶅瓨鍦紝淇濆瓨澶辫触锛�")
+
+ pump_json = base.model_dir + model_name +"\\pump_well.json"
+ with open(pump_json, "w") as outfile:
+ json.dump(jsondata, outfile)
+
+ return jsonify("淇濆瓨浜曞弬鏁板畬姣曪紒")
+
+#棰勬祴鍦烘櫙--瀵煎叆浜曟暟鎹�
+@app.route('/pumpimportdata', methods=['POST'])
+def pump_importdata():
+
+ model_name = request.form.get('model_name')
+
+ # ml= getModel(model_name)
+ file = request.files.get('file')
+
+ save_path = base.model_dir + model_name +"\\extra_cell.xlsx"
+
+ if file:
+ file.save(save_path)
+
+ resultDict={"code":200,"msg":"淇濆瓨鏁版嵁瀹屾瘯锛�"}
+ return jsonify(resultDict)
+
+#瑙傛祴浜曞垪琛�
+@app.route('/obsWellList', methods=['GET'])
+def obsWellList():
+ obswell= base.obs_well
+ dict =[]
+ for name , row ,column in obswell:
+ obj ={"name":name,"row":row,"column":column,"Layer":1}
+ dict.append(obj)
+
+ return jsonify(dict)
+
+
+#瑙傛祴浜昪hart鏁版嵁鎺ュ彛
+@app.route('/obsWellChart', methods=['GET'])
+def obsWellChart():
+ model_name = request.args.get('model_name')
+ row = request.args.get('row')
+ column = request.args.get('column')
+
+ result = CalHead.obsChartdata(model_name, row, column)
+
+ return jsonify(result)
+
+
+#棰勬祴椤甸潰 姘稿畾娌冲浘琛�
+@app.route('/predictRiverChart', methods=['GET'])
+def predictRiverChart():
+ base_year = request.args.get('base_year')
+ start_time = request.args.get('start_time')
+ end_time = request.args.get('end_time')
+
+ return jsonify(Predict.predict_river_chart(base_year, start_time, end_time))
+
+#棰勬祴椤甸潰 闄嶆按鍥捐〃
+@app.route('/predictWaterChart', methods=['GET'])
+def predictWaterChart():
+ base_year = request.args.get('base_year')
+ start_time = request.args.get('start_time')
+ end_time = request.args.get('end_time')
+ return jsonify(Predict.predict_water_chart(base_year, start_time, end_time))
+
+@app.route('/heatmap', methods=['GET'])
+def heatmap():
+ model_name = request.args.get('model_name')
+ period = request.args.get('period')
+ data = CalHead.heatmapdata(model_name,period)
+ maximum_value = np.max(data)
+ return jsonify(np.array(data).tolist())
+
+
+#闄嶆按鍧囪
+@app.route('/waterEqu', methods=['GET'])
+def waterEqu():
+ model_name = request.args.get('model_name')
+ data = CalHead.waterEqu(model_name)
+ return jsonify(data)
+
+
+#鍦扮悆椤甸潰鏁版嵁
+@app.route('/earthWaterChart', methods=['GET'])
+def earthWaterChart():
+
+ indexId = int(request.args.get('index_id'))
+ data = CalHead.earthWaterChart("202001_202212",indexId)
+ return jsonify(data)
+
+if __name__ == '__main__':
+ #app.run() # 鍙互鎸囧畾杩愯鐨勪富鏈篒P鍦板潃锛岀鍙o紝鏄惁寮�鍚皟璇曟ā寮�
+ app.run(host="192.168.0.122", port=5000)
+
+
+
+
diff --git a/ModelPeriod.py b/ModelPeriod.py
new file mode 100644
index 0000000..7c9a407
--- /dev/null
+++ b/ModelPeriod.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Wed Nov 1 11:34:08 2023
+
+@author: ZMK
+"""
+from datetime import datetime
+import calendar
+from dateutil import rrule
+import json
+import Base as base
+import os
+
+
+
+#鏍规嵁骞存湀鏃ヨ幏鍙栨渶鍚庝竴澶╃殑鏃ユ湡
+def last_day_of_month(year, month, day):
+
+ d = calendar.monthrange(year, month)
+ last_day = str(year) +"-" +str(month) + "-" + str(d[1])
+ return last_day
+
+def last_day_of_month_start(month):
+
+ start_date = month.split('-')
+ last_day= last_day_of_month(int(start_date[0]),int(start_date[1]),1)
+ return last_day
+
+
+
+def get_months_in_range_ym(start_time, end_time):
+
+ start=datetime.strptime(start_time,'%Y-%m')
+ end=datetime.strptime(end_time,'%Y-%m')
+
+ count =rrule.rrule(rrule.MONTHLY,dtstart=start,until=end).count()
+
+ months=[]
+ for i in range(count) :
+ m = rrule.rrule(rrule.MONTHLY,dtstart=start,until=end).__getitem__(i)
+ formatted_date = m.strftime("%Y-%m")
+ months.append(formatted_date)
+
+ return months
+
+
+def get_months_in_range_count(start_time, end_time):
+
+ start=datetime.strptime(start_time,'%Y-%m')
+ end=datetime.strptime(end_time,'%Y-%m')
+ count =rrule.rrule(rrule.MONTHLY,dtstart=start,until=end).count()
+ return count
+
+
+def get_months_in_range(start_time, end_time):
+
+ start=datetime.strptime(start_time,'%Y-%m')
+ end=datetime.strptime(end_time,'%Y-%m')
+
+ count =rrule.rrule(rrule.MONTHLY,dtstart=start,until=end).count()
+
+ months=[]
+ for i in range(count) :
+ m = rrule.rrule(rrule.MONTHLY,dtstart=start,until=end).__getitem__(i)
+ formatted_date = m.strftime("%Y-%m")
+ start_date = formatted_date.split('-')
+ last_day= last_day_of_month(int(start_date[0]),int(start_date[1]),1)
+
+ months.append(formatted_date+"-01" +"," + last_day)
+
+ return months
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git "a/Predict - \345\211\257\346\234\254.py" "b/Predict - \345\211\257\346\234\254.py"
new file mode 100644
index 0000000..bbbfccc
--- /dev/null
+++ "b/Predict - \345\211\257\346\234\254.py"
@@ -0,0 +1,592 @@
+
+# 瀵煎叆Flask绫�
+from flask import Flask
+from flask import jsonify
+from flask import request
+from flask_cors import CORS
+import sys
+import numpy as np
+import pandas as pd
+import flopy
+import flopy.utils.binaryfile as bf
+import csv
+import time
+from openpyxl import load_workbook
+import os
+import shutil
+import json
+import Base as base
+import CalHead
+import ModelPeriod
+
+
+# strt = ml.bas6.strt
+# # strs = ml.bas6.strt.__getitem__(1)
+# # print(strs.get_value())
+
+# mdBase = flopy.modflow.ModflowBas(ml,strt=1.0,ibound=ml.bas6.ibound)
+# mdBase.write_file(check=False)
+
+
+base_init_year=["2020","2021","2022"]
+river_start_index = 454
+river_end_index =562
+
+#棰勬祴鍛ㄦ湡鏁�
+predict_per = 12
+
+#闄嶆按閲�
+# def predict_water_chart(base_year,start_time ,end_time):
+# model_ws = base.baseModel
+# baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws,
+# exe_name="mf2005", verbose=True, version="mf2005", check=False)
+# index = 0
+# if base_year in base_init_year:
+# index = base_init_year.index(str(base_year))
+
+# y_data=[]
+# x_data=[]
+# satrt_index = index*12
+# end_index = satrt_index+12
+# for per in range(satrt_index,end_index):
+# item = baseMdoel.rch.rech.__getitem__(kper=per)
+# value = item.get_value()
+# value_float = np.array(value)
+# avg = value_float.mean()
+# y_data.append(float (avg))
+
+# start_month = str(base_year) +"-01"
+# end_month = str(base_year) +"-12"
+# x_data= ModelPeriod.get_months_in_range_ym(start_month,end_month)
+# result = {"y_data": y_data, "x_data": x_data}
+# return result
+
+base_water = base.prefix + 'base_water.ini'
+def predict_water_chart(base_year,start_time ,end_time):
+
+
+ water_array = np.loadtxt(base_water, dtype=str,encoding='utf-8')
+ print(water_array)
+ y_data=[]
+ x_data= ModelPeriod.get_months_in_range_ym("2022-01","2022-12")
+ water= water_array[0]
+ for e in water:
+ y_data.append(e)
+
+ result = {"y_data": y_data, "x_data": x_data}
+ return result
+
+#娌虫祦鐨勬姌绾垮浘
+# def predict_river_chart(base_year,start_time ,end_time):
+# model_ws = base.baseModel
+# baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws,
+# exe_name="mf2005", verbose=True, version="mf2005", check=False)
+# index = 0
+# if base_year in base_init_year:
+# index = base_init_year.index(str(base_year))
+
+# y_data=[]
+# x_data=[]
+# satrt_index = index*12
+# end_index = satrt_index+12
+# for per in range(satrt_index,end_index):
+# wel = baseMdoel.wel.stress_period_data.__getitem__(kper=per)
+# arr=[]
+# for i in range(river_start_index, river_end_index):
+# Q = wel[i][3]
+# arr.append(float(Q))
+# avg = np.array(arr).mean()
+# y_data.append(float(avg))
+# start_month = str(base_year) +"-01"
+# end_month = str(base_year) +"-12"
+# x_data= ModelPeriod.get_months_in_range_ym(start_month,end_month)
+# result = {"y_data": y_data, "x_data": x_data}
+# return result
+
+base_river = base.prefix + 'base_river.ini'
+def predict_river_chart(base_year,start_time ,end_time):
+
+
+ river_array = np.loadtxt(base_river, dtype=str,encoding='utf-8')
+ print(river_array)
+ y_data=[]
+ x_data= ModelPeriod.get_months_in_range_ym("2022-01","2022-12")
+ for e in river_array:
+ y_data.append(e)
+
+ result = {"y_data": y_data, "x_data": x_data}
+ return result
+
+
+def run_model_predict(model_name):
+
+ predictiondata=""
+ prediction_path = base.model_dir + model_name +"\\prediction.json"
+ if os.path.exists(prediction_path):
+ with open(prediction_path,encoding='utf-8') as f:
+ predictiondata = json.load(f)
+
+
+ if predictiondata:
+
+ per = ModelPeriod.get_months_in_range_count(
+ predictiondata["start_time"], predictiondata["end_time"])
+
+ # updateDisFile(model_name,per)
+
+ # updateBase6File(model_name,predictiondata)
+
+ #updateRchFile(model_name,predictiondata)
+
+ updateRiverFile(model_name,predictiondata)
+
+ #updateMineFile(model_name,predictiondata)
+ else:
+ print("prediction.json 棰勬祴鍦烘櫙鏂囦欢涓虹┖锛屾棤闇�鏇存敼鐩稿簲鏂囦欢")
+
+
+ # model_ws = base.model_dir + model_name
+
+ # ml = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws,
+ # exe_name="mf2005", verbose=True, version="mf2005", check=False)
+ # ml.run_model(report = True)
+ return jsonify("杩愯鎴愬姛锛�")
+
+
+
+
+#鏇存柊閲囬泦鍖虹殑鏁版嵁锛屽垎涓�1.鎸夌収鍖哄煙锛�2.鎸夌収鍏ㄩ儴 杩涜鏇存柊
+def updateMineFile(model_name,predictiondata):
+
+ start_time =predictiondata["start_time"]
+ end_time = predictiondata["end_time"]
+ base_year = predictiondata["mine"]["base_year"]
+
+ base_start= str(base_year) + "-" + str(start_time.split("-")[1])
+ base_end= str(base_year) + "-" + str(end_time.split("-")[1])
+
+ start_index = (int)(base.times_month_per_dict[base_start])
+ end_index = (int)(base.times_month_per_dict[base_end])
+
+ pers= end_index-start_index + 1
+
+ area= predictiondata["mine"]["area"]
+ flag = check_mine_param(predictiondata)
+
+ if flag == 'true':
+ baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= base.baseModel,
+ exe_name="mf2005", verbose=True, version="mf2005", check=False)
+ update_model_ws = base.model_dir + model_name
+ updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws,
+ exe_name="mf2005", verbose=True, version="mf2005", check=False)
+
+ district_dict = get_distric_dict()
+
+ area_dict = get_area_dict(area)
+
+ lrcq = {}
+ for per in range(pers):
+ wel = []
+ wel = baseMdoel.wel.stress_period_data.__getitem__(kper = (per + start_index ))
+ array2d = []
+ count = 1
+ for Layer, Row, Column, Q in wel:
+ array = []
+ # 濡傛灉鏄渤娴佺殑鏁版嵁鑼冨洿
+ if count > river_end_index :
+
+ r = (float) (get_row_column_ratio(Row, Column, district_dict, area_dict))
+
+ array = [Layer, Row, Column, Q * r]
+
+ else:
+ array = [Layer, Row, Column, Q]
+
+ array2d.append(array)
+ count +=1
+
+ lrcq[per] = array2d
+
+ flopy.modflow.ModflowWel(updateMdoel,stress_period_data=lrcq)
+ updateMdoel.write_input()
+ else:
+ print("Well--Mine鏂囦欢鏃犻渶淇敼锛�")
+
+
+#鑾峰彇 area鐨� name--> ratio 鐨勭粨鏋�
+def get_area_dict(area):
+ result ={}
+
+ for i in range(len(area)):
+ name = area[i]["name"]
+ rt = area[i]["ratio"]
+ result[name]= rt
+ return result
+
+
+#鑾峰彇鍖哄幙鐨� row+column --> name缁撴瀯
+def get_distric_dict():
+ data = base.district
+ result = {}
+ for row ,column ,id ,name in data:
+ key = str(row)+","+str(column)
+ result[key]= name
+ return result
+
+
+#鏍规嵁 row clomn 鑾峰彇 ratio
+def get_row_column_ratio(row, column ,district_dict, area_dict ):
+ key = str(row) +"," + str(column)
+ if area_dict.__contains__("鍏ㄩ儴鍖哄煙"):
+ return area_dict["鍏ㄩ儴鍖哄煙"]
+
+ if district_dict.__contains__(key):
+ name = district_dict[key]
+ ratio = area_dict[name]
+ return float(ratio)
+
+ return float(1.0)
+
+
+
+def check_mine_param(predictiondata):
+
+ mine = predictiondata["mine"]
+ if not mine:
+ print("鎶芥按浜� 棰勬祴鍙傛暟涓虹┖锛屾棤闇�瑕佷慨鏀�")
+ return "false"
+
+ base_year = predictiondata["mine"]["base_year"]
+ if not base_year :
+ print(" Mine : base_year棰勬祴鍙傛暟涓虹┖锛屾棤闇�瑕佷慨鏀�")
+ return "false"
+
+ area= predictiondata["mine"]["area"]
+ if not area :
+ print(" Mine : area棰勬祴鍙傛暟涓虹┖锛屾棤闇�瑕佷慨鏀�")
+ return "false"
+
+ return "true"
+
+
+#鏇存柊娌虫祦鐨勫�嶆暟
+# def updateRiverFile(model_name,predictiondata):
+
+# start_time =predictiondata["start_time"]
+# end_time = predictiondata["end_time"]
+# base_year = predictiondata["river"]["base_year"]
+
+# ratio= float(predictiondata["river"]["ratio"])
+
+# base_start= str(base_year) + "-" + str(start_time.split("-")[1])
+# base_end= str(base_year) + "-" + str(end_time.split("-")[1])
+
+# start_index = (int)(base.times_month_per_dict[base_start])
+# end_index = (int)(base.times_month_per_dict[base_end])
+
+# pers= end_index-start_index + 1
+
+
+# flag = check_river_param(predictiondata)
+
+# if flag == "true":
+
+# baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= base.baseModel,
+# exe_name="mf2005", verbose=True, version="mf2005", check=False)
+
+# update_model_ws = base.model_dir + model_name
+# updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws,
+# exe_name="mf2005", verbose=True, version="mf2005", check=False)
+
+# lrcq = {}
+# for per in range(pers):
+# wel = []
+
+# wel = baseMdoel.wel.stress_period_data.__getitem__(kper = (per + start_index ))
+# array2d = []
+
+# count = 1
+
+# for Layer, Row, Column, Q in wel:
+# array = []
+# # 濡傛灉鏄渤娴佺殑鏁版嵁鑼冨洿
+# if count > river_start_index and count <= river_end_index:
+# array = [Layer, Row, Column, Q * ratio]
+# else:
+# array = [Layer, Row, Column, Q]
+
+# array2d.append(array)
+# count +=1
+
+# lrcq[per] = array2d
+
+# flopy.modflow.ModflowWel(updateMdoel,stress_period_data=lrcq)
+# updateMdoel.write_input()
+
+# else:
+# print("Well--River鏂囦欢鏃犻渶淇敼锛�")
+
+
+def updateRiverFile(model_name,predictiondata):
+
+ start_time =predictiondata["start_time"]
+ end_time = predictiondata["end_time"]
+
+
+ river_ratio= float(predictiondata["river"]["ratio"])
+
+ rain_ratio = float(predictiondata["rain"]["ratio"])
+ rain_base_year = predictiondata["rain"]["base_year"]
+
+ area= predictiondata["mine"]["area"]
+
+ flag = check_river_param(predictiondata)
+
+ if flag == "true":
+
+ ws = base.predictParamModel + rain_base_year
+
+ baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= ws,
+ exe_name="mf2005", verbose=True, version="mf2005", check=False)
+
+ update_model_ws = base.model_dir + model_name
+ updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws,
+ exe_name="mf2005", verbose=True, version="mf2005", check=False)
+
+ district_dict = get_distric_dict()
+
+ area_dict = get_area_dict(area)
+
+ lrcq = {}
+
+ for per in range(predict_per):
+ wel = []
+
+ wel = baseMdoel.wel.stress_period_data.__getitem__(kper = per)
+ wel_len = len(wel)
+
+ #渚у悜杈圭晫
+ for i in range (0,453):
+ wel[i][3] = wel[i][3] * rain_ratio
+
+ #娌虫祦
+ for i in range(453, 562):
+ wel[i][3] = wel[i][3] * river_ratio
+
+ #鎶芥按浜�
+ for i in range(562,wel_len):
+
+ r = (float) (get_row_column_ratio(wel[i][1], wel[i][2], district_dict, area_dict))
+ wel[i][3] = wel[i][3] * r
+
+ lrcq[per] = wel
+
+ flopy.modflow.ModflowWel(updateMdoel,stress_period_data=lrcq)
+ updateMdoel.write_input()
+
+ else:
+ print("Well--River鏂囦欢鏃犻渶淇敼锛�")
+
+def check_river_param(predictiondata):
+
+ river = predictiondata["river"]
+ if not river:
+ print("River棰勬祴鍙傛暟涓虹┖锛屾棤闇�瑕佷慨鏀�")
+ return "false"
+
+ base_year = predictiondata["river"]["base_year"]
+ if not base_year :
+ print(" River : base_year棰勬祴鍙傛暟涓虹┖锛屾棤闇�瑕佷慨鏀�")
+ return "false"
+
+ ratio= predictiondata["river"]["ratio"]
+ if not ratio or ratio == "1" :
+ print(" River : ratio棰勬祴鍙傛暟涓虹┖锛屾棤闇�瑕佷慨鏀�")
+ return "false"
+
+ return "true"
+
+
+# def updateRchFile(model_name,predictiondata):
+
+# start_time =predictiondata["start_time"]
+# end_time = predictiondata["end_time"]
+
+# base_year = predictiondata["rain"]["base_year"]
+# ratio= float(predictiondata["rain"]["ratio"])
+
+# base_start= str(base_year) + "-" + str(start_time.split("-")[1])
+# base_end= str(base_year) + "-" + str(end_time.split("-")[1])
+
+# start_index = (int)(base.times_month_per_dict[base_start])
+# end_index = (int)(base.times_month_per_dict[base_end])
+# pers= end_index-start_index + 1
+
+
+# flag = check_rain_param(predictiondata)
+
+# if flag == "true":
+
+# baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= base.baseModel,
+# exe_name="mf2005", verbose=True, version="mf2005", check=False)
+
+# update_model_ws = base.model_dir + model_name
+# updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws,
+# exe_name="mf2005", verbose=True, version="mf2005", check=False)
+
+# for per in range(pers):
+
+# item = baseMdoel.rch.rech.__getitem__(kper = (per + start_index))
+# array2d = item.get_value()
+# array2d_len = len(array2d)
+
+# for i in range(array2d_len):
+
+# array_len = len(array2d[i])
+# for j in range(array_len):
+
+# if str(base.area_array[i][j]) != '-9999':
+
+# array2d[i][j] = array2d[i][j] * ratio
+
+# updateMdoel.rch.rech.__setitem__(key = per, value=array2d)
+
+# rch = flopy.modflow.ModflowRch(updateMdoel, rech=updateMdoel.rch.rech)
+# rch.write_file(check=False)
+
+# else:
+
+# print("Rch鏂囦欢鏃犻渶淇敼锛�")
+
+
+def updateRchFile(model_name,predictiondata):
+
+ start_time =predictiondata["start_time"]
+ end_time = predictiondata["end_time"]
+
+ #涓版按骞� 鏋按骞�
+ base_year = predictiondata["rain"]["base_year"]
+ ratio= float(predictiondata["rain"]["ratio"])
+
+
+ flag = check_rain_param(predictiondata)
+
+ #鏁版嵁鏉ユ簮鐨勬ā鍨嬫枃浠跺す
+ base_ws= base.predictParamModel + base_year
+
+ if flag == "true":
+
+ baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= base_ws,
+ exe_name="mf2005", verbose=True, version="mf2005", check=False)
+
+ update_model_ws = base.model_dir + model_name
+ updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws,
+ exe_name="mf2005", verbose=True, version="mf2005", check=False)
+
+ for per in range(predict_per):
+
+ item = baseMdoel.rch.rech.__getitem__(kper = per)
+ array2d = item.get_value()
+ array2d_len = len(array2d)
+
+ for i in range(array2d_len):
+
+ array_len = len(array2d[i])
+ for j in range(array_len):
+
+ if str(base.area_array[i][j]) != '-9999':
+
+ array2d[i][j] = array2d[i][j] * ratio
+
+ updateMdoel.rch.rech.__setitem__(key = per, value=array2d)
+
+ rch = flopy.modflow.ModflowRch(updateMdoel, rech=updateMdoel.rch.rech)
+ rch.write_file(check=False)
+
+ else:
+
+ print("Rch鏂囦欢鏃犻渶淇敼锛�")
+
+def check_rain_param(predictiondata):
+
+ rain = predictiondata["rain"]
+ if not rain:
+ print("Rch棰勬祴鍙傛暟涓虹┖锛屾棤闇�瑕佷慨鏀�")
+ return "false"
+
+ base_year = predictiondata["rain"]["base_year"]
+ if not base_year :
+ print(" Rch : base_year棰勬祴鍙傛暟涓虹┖锛屾棤闇�瑕佷慨鏀�")
+ return "false"
+
+ ratio= predictiondata["rain"]["ratio"]
+ if not ratio or ratio == "1" :
+ print(" Rch : ratio棰勬祴鍙傛暟涓虹┖锛屾棤闇�瑕佷慨鏀�")
+ return "false"
+
+ return "true"
+
+
+ #鏇存柊bas6鏂囦欢 鍒濆姘村ご淇℃伅
+def updateBase6File(model_name,predictdata):
+ model_ws = base.model_dir + model_name
+ ml = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws,
+ exe_name="mf2005", verbose=True, version="mf2005", check=False)
+
+
+ #鍒濆姘村ご
+ init_header = predictdata["initHeader"]
+
+ dir = base.model_dir + init_header + "\\modflow.head"
+ head = bf.HeadFile(dir)
+ alldata = head.get_alldata()
+
+ lens = len(alldata)
+ last_index = lens-3
+
+ last_array3= alldata[last_index]
+
+ strt = ml.bas6.strt
+ # strs = ml.bas6.strt.__getitem__(2)
+ # print(strs.get_value())
+ strt.__setitem__(0,last_array3[0])
+ strt.__setitem__(1,last_array3[1])
+ strt.__setitem__(2,last_array3[2])
+
+
+ mfBase6 = flopy.modflow.ModflowBas(
+ ml,
+ strt= strt,
+ ibound=ml.bas6.ibound,
+ hnoflo=ml.bas6.hnoflo,
+ extension="bas6",)
+
+ mfBase6.write_file(check=False)
+
+
+#淇敼dis 鏂囦欢
+def updateDisFile(model_name, per):
+
+ model_ws = base.model_dir + model_name
+ ml = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws,
+ exe_name="mf2005", verbose=True, version="mf2005", check=False)
+
+ mfDis = flopy.modflow.ModflowDis(
+ ml,
+ nlay=ml.dis.nlay,
+ nrow=ml.dis.nrow,
+ ncol=ml.dis.ncol,
+ nper=per,
+ delr=ml.dis.delr,
+ delc=ml.dis.delc,
+ top=ml.dis.top,
+ botm=ml.dis.botm,
+ perlen=ml.dis.perlen,
+ nstp=ml.dis.nstp,
+ tsmult=ml.dis.tsmult,
+ steady=ml.dis.steady,
+ itmuni=ml.dis.itmuni,
+ lenuni=ml.dis.lenuni,
+ extension="dis")
+
+ mfDis.write_file(check=False)
diff --git a/Predict.py b/Predict.py
new file mode 100644
index 0000000..36ea1ce
--- /dev/null
+++ b/Predict.py
@@ -0,0 +1,347 @@
+
+# 瀵煎叆Flask绫�
+from flask import Flask
+from flask import jsonify
+from flask import request
+from flask_cors import CORS
+import sys
+import numpy as np
+import pandas as pd
+import flopy
+import flopy.utils.binaryfile as bf
+import csv
+import time
+from openpyxl import load_workbook
+import os
+import shutil
+import json
+import Base as base
+import CalHead
+import ModelPeriod
+
+
+base_init_year=["2020","2021","2022"]
+river_start_index = 454
+river_end_index =562
+
+#棰勬祴鍛ㄦ湡鏁�
+predict_per = 12
+
+#闄嶆按閲�
+
+base_water = base.prefix + 'base_water.ini'
+def predict_water_chart(base_year,start_time ,end_time):
+
+
+ water_array = np.loadtxt(base_water, dtype=str,encoding='utf-8')
+
+ y_data=[]
+ x_data= ModelPeriod.get_months_in_range_ym("2022-01","2022-12")
+ water= water_array[0]
+ for e in water:
+ y_data.append(e)
+
+ result = {"y_data": y_data, "x_data": x_data}
+ return result
+
+#娌虫祦鐨勬姌绾垮浘
+
+base_river = base.prefix + 'base_river.ini'
+def predict_river_chart(base_year,start_time ,end_time):
+
+
+ river_array = np.loadtxt(base_river, dtype=str,encoding='utf-8')
+
+ y_data=[]
+ x_data= ModelPeriod.get_months_in_range_ym("2022-01","2022-12")
+ for e in river_array:
+ y_data.append(e)
+
+ result = {"y_data": y_data, "x_data": x_data}
+ return result
+
+
+def run_model_predict(model_name):
+
+ predictiondata=""
+ prediction_path = base.model_dir + model_name +"\\prediction.json"
+ if os.path.exists(prediction_path):
+ with open(prediction_path,encoding='utf-8') as f:
+ predictiondata = json.load(f)
+
+ if predictiondata:
+
+ try:
+ updateDisFile(model_name,predict_per)
+
+ updateBase6File(model_name,predictiondata)
+
+ updateRchFile(model_name,predictiondata)
+
+ updateRiverFile(model_name,predictiondata)
+ except:
+
+ return "璇锋鏌ュ垵濮嬫按澶淬�侀檷姘撮噺銆佹案瀹氭渤鍏ユ笚閲忋�佸紑閲囬噺绛夊弬鏁版槸鍚﹀~鍐欏畬鏁达紒"
+
+
+ else:
+ print("prediction.json 棰勬祴鍦烘櫙鏂囦欢涓虹┖锛屾棤闇�鏇存敼鐩稿簲鏂囦欢")
+
+
+ model_ws = base.model_dir + model_name
+
+ ml = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws,
+ exe_name="mf2005", verbose=True, version="mf2005", check=False)
+ ml.run_model(report = True)
+ return "棰勬祴妯″瀷杩愯鎴愬姛锛�"
+
+
+
+#鑾峰彇 area鐨� name--> ratio 鐨勭粨鏋�
+def get_area_dict(area):
+ result ={}
+
+ for i in range(len(area)):
+ name = area[i]["name"]
+ rt = area[i]["ratio"]
+ result[name]= rt
+ return result
+
+
+#鑾峰彇鍖哄幙鐨� row+column --> name缁撴瀯
+def get_distric_dict():
+ data = base.district
+ result = {}
+ for row ,column ,id ,name in data:
+ key = str(row)+","+str(column)
+ result[key]= name
+ return result
+
+
+#鏍规嵁 row clomn 鑾峰彇 ratio
+def get_row_column_ratio(row, column ,district_dict, area_dict ):
+ key = str(row) +"," + str(column)
+ if area_dict.__contains__("鍏ㄩ儴鍖哄煙"):
+ return area_dict["鍏ㄩ儴鍖哄煙"]
+
+ if district_dict.__contains__(key):
+ name = district_dict[key]
+ ratio = area_dict[name]
+ return float(ratio)
+
+ return float(1.0)
+
+
+
+
+def updateRiverFile(model_name,predictiondata):
+
+ flag = check_rain_param(predictiondata)
+
+ if flag == "true":
+
+ rain_ratio = float(predictiondata["rain"]["ratio"])
+ rain_base_year = predictiondata["rain"]["base_year"]
+
+ river_ratio= float(predictiondata["river"]["ratio"])
+ area= predictiondata["mine"]["area"]
+
+ ws = base.predictParamModel + rain_base_year
+
+ baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= ws,
+ exe_name="mf2005", verbose=True, version="mf2005", check=False)
+
+ update_model_ws = base.model_dir + model_name
+ updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws,
+ exe_name="mf2005", verbose=True, version="mf2005", check=False)
+
+ district_dict = get_distric_dict()
+
+ area_dict = get_area_dict(area)
+
+ lrcq = {}
+
+ for per in range(predict_per):
+ wel = []
+ array2d = []
+
+ wel = baseMdoel.wel.stress_period_data.__getitem__(kper = per)
+ wel_len = len(wel)
+
+ #渚у悜杈圭晫
+ for i in range (0,453):
+ wel[i][3] = wel[i][3] * rain_ratio
+
+ #娌虫祦
+ for i in range(453, 562):
+ wel[i][3] = wel[i][3] * river_ratio
+
+ #鎶芥按浜�
+ for i in range(562,wel_len):
+
+ r = (float) (get_row_column_ratio(wel[i][1], wel[i][2], district_dict, area_dict))
+ wel[i][3] = wel[i][3] * r
+
+
+ #閲嶇疆鏁扮粍
+ for Layer, Row, Column, Q in wel:
+ array = [Layer, Row, Column, Q]
+ array2d.append(array)
+
+ flex_data= getFlexdata(model_name)
+
+ for i in range(len(flex_data)):
+ array2d.append(flex_data[i])
+
+ lrcq[per] = array2d
+
+ flopy.modflow.ModflowWel(updateMdoel,stress_period_data=lrcq)
+ updateMdoel.write_input()
+
+ else:
+ print("Well--River鏂囦欢鏃犻渶淇敼锛�")
+
+#杩藉姞澶嶆潅鍘熸眹椤逛俊鎭�
+def getFlexdata(model_name):
+ welldata=""
+ well_path = base.model_dir + model_name +"\\pump_well.json"
+ data=[]
+ if os.path.exists(well_path):
+ with open(well_path,encoding='utf-8') as f:
+ welldata = json.load(f)
+ wel= welldata["well"]
+
+ for i in range (len(wel)):
+ layer = int (wel[i]['layer'])-1
+ row= int(wel[i]['row'])-1
+ column = int(wel[i]['column'])-1
+ v = float(wel[i]['value'])
+ arr = [layer,row, column, v]
+ data.append(arr)
+
+ return data
+
+
+def updateRchFile(model_name,predictiondata):
+ flag = check_rain_param(predictiondata)
+ if flag == "true":
+ #涓版按骞� 鏋按骞�
+ base_year = predictiondata["rain"]["base_year"]
+ ratio= float(predictiondata["rain"]["ratio"])
+
+ #鏁版嵁鏉ユ簮鐨勬ā鍨嬫枃浠跺す
+ base_ws= base.predictParamModel + base_year
+
+ baseMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= base_ws,
+ exe_name="mf2005", verbose=True, version="mf2005", check=False)
+
+ update_model_ws = base.model_dir + model_name
+ updateMdoel = flopy.modflow.Modflow.load("modflow.nam", model_ws= update_model_ws,
+ exe_name="mf2005", verbose=True, version="mf2005", check=False)
+
+ for per in range(predict_per):
+
+ item = baseMdoel.rch.rech.__getitem__(kper = per)
+ array2d = item.get_value()
+ array2d_len = len(array2d)
+
+ for i in range(array2d_len):
+
+ array_len = len(array2d[i])
+ for j in range(array_len):
+
+ if str(base.area_array[i][j]) != '-9999':
+
+ array2d[i][j] = array2d[i][j] * ratio
+
+ updateMdoel.rch.rech.__setitem__(key = per, value=array2d)
+
+ rch = flopy.modflow.ModflowRch(updateMdoel, rech=updateMdoel.rch.rech)
+ rch.write_file(check=False)
+
+ else:
+ print("Rch鏂囦欢鏃犻渶淇敼锛�")
+
+def check_rain_param(predictiondata):
+
+ rain = predictiondata["rain"]
+ if not rain:
+ print("Rch棰勬祴鍙傛暟涓虹┖锛屾棤闇�瑕佷慨鏀�")
+ return "false"
+
+ base_year = predictiondata["rain"]["base_year"]
+ if not base_year :
+ print(" Rch : base_year棰勬祴鍙傛暟涓虹┖锛屾棤闇�瑕佷慨鏀�")
+ return "false"
+
+ ratio= predictiondata["rain"]["ratio"]
+ if not ratio :
+ print(" Rch : ratio棰勬祴鍙傛暟涓虹┖锛屾棤闇�瑕佷慨鏀�")
+ return "false"
+
+ return "true"
+
+
+ #鏇存柊bas6鏂囦欢 鍒濆姘村ご淇℃伅
+def updateBase6File(model_name,predictdata):
+ model_ws = base.model_dir + model_name
+ ml = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws,
+ exe_name="mf2005", verbose=True, version="mf2005", check=False)
+
+
+ #鍒濆姘村ご
+ init_header = predictdata["initHeader"]
+
+ dir = base.model_dir + init_header + "\\modflow.head"
+ head = bf.HeadFile(dir)
+ alldata = head.get_alldata()
+
+ lens = len(alldata)
+ last_index = lens-3
+
+ last_array3= alldata[last_index]
+
+ strt = ml.bas6.strt
+ # strs = ml.bas6.strt.__getitem__(2)
+ # print(strs.get_value())
+ strt.__setitem__(0,last_array3[0])
+ strt.__setitem__(1,last_array3[1])
+ strt.__setitem__(2,last_array3[2])
+
+
+ mfBase6 = flopy.modflow.ModflowBas(
+ ml,
+ strt= strt,
+ ibound=ml.bas6.ibound,
+ hnoflo=ml.bas6.hnoflo,
+ extension="bas6",)
+
+ mfBase6.write_file(check=False)
+
+
+#淇敼dis 鏂囦欢
+def updateDisFile(model_name, per):
+
+ model_ws = base.model_dir + model_name
+ ml = flopy.modflow.Modflow.load("modflow.nam", model_ws=model_ws,
+ exe_name="mf2005", verbose=True, version="mf2005", check=False)
+
+ mfDis = flopy.modflow.ModflowDis(
+ ml,
+ nlay=ml.dis.nlay,
+ nrow=ml.dis.nrow,
+ ncol=ml.dis.ncol,
+ nper=per,
+ delr=ml.dis.delr,
+ delc=ml.dis.delc,
+ top=ml.dis.top,
+ botm=ml.dis.botm,
+ perlen=ml.dis.perlen,
+ nstp=ml.dis.nstp,
+ tsmult=ml.dis.tsmult,
+ steady=ml.dis.steady,
+ itmuni=ml.dis.itmuni,
+ lenuni=ml.dis.lenuni,
+ extension="dis")
+
+ mfDis.write_file(check=False)
diff --git a/test.py b/test.py
new file mode 100644
index 0000000..192d800
--- /dev/null
+++ b/test.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Thu Dec 21 12:02:24 2023
+
+@author: ZMK
+"""
+
+import Base
+
+import numpy as np
+
+# model_config ='C:\\Users\\ZMK\\Desktop\\objclipdig\\ModelFlow_xishan\\config.ini'
+
+# conf = np.loadtxt(model_config, dtype=str,encoding='utf-8')
+
+
+# conf[1]='1'
+
+# np.savetxt(model_config,conf,fmt='%100s',encoding='utf-8')
+
+# print(conf)
+Base.updateModelConfig("202301_202312")
--
Gitblit v1.9.1