代码拉取完成,页面将自动刷新
import pandas as pd
import numpy as np
import pendulum
import setup_log
import logging
from database import Session
from sqlalchemy import and_
from sqlalchemy.exc import DBAPIError
from model.ModWorkRecord import WorkRecord, workRecordSchema
from model.ModPendingProDet import PendingProDet,pendingProDetSchema
def StatisticsBureauMonthlyReport():
from model.ModBureauMonRepSum import BureauMonRepSum
_logger = logging.getLogger('TIMED_STAT_BMR')
session = Session()
try:
now = pendulum.now()
first_day_of_month = now.start_of('month')
last_day_of_month = now.end_of('month')
source_data = session.query(WorkRecord) \
.filter(and_(WorkRecord.opentime >= first_day_of_month,
WorkRecord.opentime <= last_day_of_month)).all()
if len(source_data) == 0:
raise ValueError('没有记录数据')
source_data_frame = pd.DataFrame(workRecordSchema.dump(source_data, many=True).data)
bureaus = source_data_frame[['bureauid', 'bureauname']].drop_duplicates()
for bureau in bureaus.iterrows():
report = session.query(BureauMonRepSum) \
.filter(and_(BureauMonRepSum.bureauid == bureau[1]['bureauid'],
BureauMonRepSum.monthnum == first_day_of_month)) \
.one_or_none()
all_pros = source_data_frame[source_data_frame['bureauid'] == bureau[1]['bureauid']].index.size
stats = source_data_frame.loc[source_data_frame['bureauid'] == bureau[1]['bureauid'],
['serial_number', 'proname']] \
.groupby('proname') \
.agg(['count', lambda x: round(x.size / all_pros * 100, 2)]) \
.rename(index=str, columns={'count': 'amount', '<lambda>': 'percent'})
if report is None:
report = BureauMonRepSum(
monthnum=first_day_of_month,
bureauid=bureau[1]['bureauid'],
bureauname=bureau[1]['bureauname'],
content_array=[{'f1': stat[0],
'f2': int(stat[1]['serial_number']['amount']),
'f3': stat[1]['serial_number']['percent']}
for stat in stats.iterrows()],
flag='0')
session.add(report)
else:
report.content_array = [{'f1': stat[0],
'f2': int(stat[1]['serial_number']['amount']),
'f3': stat[1]['serial_number']['percent']}
for stat in stats.iterrows()]
session.commit()
_logger.info(f'项目部月报生成成功。')
except (DBAPIError, KeyError, ValueError) as e:
_logger.info(f'项目部月报生成出现错误。{e}')
session.rollback()
finally:
session.close()
def StatisticsBureauWeeklyReport():
from model.ModBureauWeeklyRepSum import BureauWeeklyRepSum
_logger = logging.getLogger('TIMED_STAT_BWR')
session = Session()
try:
now = pendulum.now()
first_day_of_month = now.start_of('month')
first_day_of_week = now.start_of('week')
last_day_of_week = now.end_of('week')
week_of_year = now.week_of_year
year_data = now.year
print(year_data)
source_data = session.query(WorkRecord) \
.filter(and_(WorkRecord.opentime >= first_day_of_week,
WorkRecord.opentime <= last_day_of_week)) \
.all()
if len(source_data) == 0:
raise ValueError('没有记录数据')
source_data_frame = pd.DataFrame(workRecordSchema.dump(source_data, many=True).data)
bureaus = source_data_frame[['bureauid', 'bureauname']].drop_duplicates()
for bureau in bureaus.iterrows():
report = session.query(BureauWeeklyRepSum) \
.filter(and_(BureauWeeklyRepSum.weeknum == week_of_year,
BureauWeeklyRepSum.bureauid == bureau[1]['bureauid'])).one_or_none()
all_pros = source_data_frame[source_data_frame['bureauid'] == bureau[1]['bureauid']].index.size
stats = source_data_frame.loc[source_data_frame['bureauid'] == bureau[1]['bureauid'],
['serial_number', 'proname']] \
.groupby('proname') \
.agg(['count', lambda x: round(x.size / all_pros * 100, 2)]) \
.rename(index=str, columns={'count': 'amount', '<lambda>': 'percent'})
if report is None:
report = BureauWeeklyRepSum(
bureauid=bureau[1]['bureauid'],
bureauname=bureau[1]['bureauname'],
monthnum=first_day_of_month,
weeknum=week_of_year,
content_array=[{'f1': stat[0],
'f2': int(stat[1]['serial_number']['amount']),
'f3': stat[1]['serial_number']['percent']}
for stat in stats.iterrows()],
flag='0')
session.add(report)
else:
report.content_array = [{'f1': stat[0],
'f2': int(stat[1]['serial_number']['amount']),
'f3': stat[1]['serial_number']['percent']}
for stat in stats.iterrows()]
session.commit()
_logger.info(f'项目部周报生成成功。')
except (DBAPIError, KeyError, ValueError) as e:
_logger.info(f'项目部周报生成出现错误。{e}')
session.rollback()
finally:
session.close()
def StatisticsUnresolvedIssueDetail():
from model.ModPendingProDet import PendingProDet
_logger = logging.getLogger('TIMED_STAT_UID')
session = Session()
try:
initial_record = session.query(PendingProDet).all()
unresolved_record = session.query(WorkRecord) \
.filter(WorkRecord.final_result_flag == '0') \
.filter(WorkRecord.confirm_openid.isnot(None)) \
.filter(WorkRecord.work_department == '1') \
.filter(~WorkRecord.meter_barcode.in_([record.meter_barcode for record in initial_record])).all()
all_record_unresolved = session.query(WorkRecord) \
.filter(WorkRecord.final_result_flag == '0') \
.filter(WorkRecord.confirm_openid.isnot(None)) \
.filter(WorkRecord.work_department == '1').all()
session.query(PendingProDet) \
.filter(~PendingProDet.meter_barcode.in_([record.meter_barcode for record in all_record_unresolved])) \
.delete(synchronize_session=False)
source_data = pd.DataFrame(workRecordSchema.dump(unresolved_record, many=True).data)
grouped_data = source_data[['bureauid', 'bureauname', 'stationid', 'stationname',
'meter_barcode', 'problemid', 'proname', 'opentime',
'photo_fingerprint', 'resolventid', 'resolname', 'remark']] \
.groupby(['bureauid', 'bureauname', 'stationid', 'stationname',
'meter_barcode'],
as_index=False) \
.agg({'problemid': np.max,
'proname': np.max,
'opentime': np.min,
'photo_fingerprint': np.max,
'resolventid': np.max,
'resolname': np.max,
'remark': np.max
})
for row in grouped_data.iterrows():
record_time = pendulum.parse(row[1]['opentime'], tz='Asia/Shanghai')
new_record = PendingProDet(
bureauid=row[1]['bureauid'],
bureauname=row[1]['bureauname'],
stationid=row[1]['stationid'],
stationname=row[1]['stationname'],
meter_barcode=row[1]['meter_barcode'],
problemid=row[1]['problemid'],
problemname=row[1]['proname'],
initial_time=row[1]['opentime'],
pending_days=pendulum.now().diff(record_time).in_days(),
operationNo=0,
directorNo=0,
leaderNo=0,
photo_fingerprint=row[1]['photo_fingerprint'],
monitorNo=0,
resolventid=row[1]['resolventid'],
resolname=row[1]['resolname'],
remark=row[1]['remark']
)
session.add(new_record)
session.commit()
_logger.info(f'需县局处理的问题明细统计生成成功。')
except (DBAPIError, KeyError, ValueError) as e:
_logger.info(f'需县局处理的问题明细统计出现错误。{e}')
session.rollback()
finally:
session.close()
def StatisticsUnresolvedIssueReport():
from model.ModPendingProRep import PendingProRep
_logger = logging.getLogger('TIMED_STAT_UIR')
session = Session()
try:
session.query(PendingProRep).delete()
source_list = session.query(WorkRecord) \
.filter(WorkRecord.final_result_flag == '0') \
.filter(WorkRecord.confirm_openid.isnot(None)) \
.all()
source_data = pd.DataFrame(workRecordSchema.dump(source_list, many=True).data)
grouped_data = source_data[['bureauid', 'bureauname', 'stationid', 'stationname',
'work_department', 'serial_number']] \
.groupby(['bureauid', 'bureauname', 'stationid', 'stationname', 'work_department'],
as_index=False) \
.agg({'serial_number': pd.Series.nunique})
for row in grouped_data.iterrows():
new_record = PendingProRep(
bureauid=row[1]['bureauid'],
bureauname=row[1]['bureauname'],
stationid=row[1]['stationid'],
stationname=row[1]['stationname'],
pending_number=row[1]['serial_number'],
flag=row[1]['work_department'])
session.add(new_record)
session.commit()
_logger.info(f'未处理问题分类统计生成成功。')
except (DBAPIError, KeyError, ValueError) as e:
_logger.info(f'未处理问题分类统计过程出错。{e}')
session.rollback()
finally:
session.close()
def StatisticsUnresolvedIssueUrgeByStation():
from model.ModPendingProDet import PendingProDet, pendingProDetSchema
from model.ModPendingProSupSta import PendingProSupSta
_logger = logging.getLogger('TIMED_STAT_UIUS')
session = Session()
try:
session.query(PendingProSupSta).delete()
source_list = session.query(PendingProDet).all()
source_data = pd.DataFrame(pendingProDetSchema.dump(source_list, many=True).data)
source_data['sumNo'] = source_data['directorNo'] + source_data['operationNo'] + source_data['leaderNo'] \
+ source_data['monitorNo']
grouped_data = source_data \
.groupby(['bureauid', 'bureauname', 'stationid', 'stationname'],
as_index=False) \
.agg({'pending_days': pd.Series.max,
'initial_time': pd.Series.nunique,
'sumNo': pd.Series.sum}) \
.rename(index=str, columns={'initial_time': 'amount'})
# grouped_data.columns = grouped_data.columns.droplevel(1)
sum_amount_data = grouped_data \
.groupby(['bureauid'],
as_index=False) \
.agg({'amount': pd.Series.sum}) \
.rename(index=str, columns={'amount': 'sum_amount'})
# sum_amount_data.columns = sum_amount_data.columns.droplevel(1)
grouped_data = pd.merge(grouped_data, sum_amount_data, how='left', left_on='bureauid', right_on='bureauid')
grouped_data['pending_percent'] = round(grouped_data['amount'] / grouped_data['sum_amount'] * 100, 2)
for row in grouped_data.iterrows():
new_record = PendingProSupSta(
bureauid=row[1]['bureauid'],
bureauname=row[1]['bureauname'],
stationid=row[1]['stationid'],
stationname=row[1]['stationname'],
pending_number=row[1]['amount'],
max_supervision_no=row[1]['sumNo'],
max_pending_days=row[1]['pending_days'],
pending_percent=row[1]['pending_percent'])
session.add(new_record)
session.commit()
_logger.info(f'待处理问题汇总统计生成成功。')
except (DBAPIError, KeyError, ValueError) as e:
_logger.info(f'待处理问题汇总统计过程出错。{e}')
session.rollback()
finally:
session.close()
def StatisticsUnresolvedIssueByType():
from model.ModPendingProSupType import PendingProSupType
from model.ModPendingProDet import PendingProDet, pendingProDetSchema
_logger = logging.getLogger('TIMED_STAT_UIT')
session = Session()
try:
session.query(PendingProSupType).delete()
source_list = session.query(PendingProDet).all()
source_data = pd.DataFrame(pendingProDetSchema.dump(source_list, many=True).data)
grouped_data = source_data \
.groupby(['bureauid', 'bureauname', 'stationid', 'stationname', 'problemid', 'problemname'],
as_index=False) \
.agg({'initial_time': pd.Series.nunique}) \
.rename(index=str, columns={'initial_time': 'amount'})
sum_data = grouped_data \
.groupby(['bureauid'],
as_index=False) \
.agg({'amount': pd.Series.sum}) \
.rename(index=str, columns={'amount': 'sum_amount'})
grouped_data = pd.merge(grouped_data, sum_data, how='inner', left_on='bureauid', right_on='bureauid')
grouped_data['pending_percent'] = round(grouped_data['amount'] / grouped_data['sum_amount'] * 100, 2)
for row in grouped_data.iterrows():
new_record = PendingProSupType(
bureauid=row[1]['bureauid'],
bureauname=row[1]['bureauname'],
stationid=row[1]['stationid'],
stationname=row[1]['stationname'],
problemid=row[1]['problemid'],
problemname=row[1]['problemname'],
pending_number=row[1]['amount'],
pending_percent=row[1]['pending_percent'])
session.add(new_record)
session.commit()
_logger.info(f'按分类待处理问题汇总统计生成成功。')
except (DBAPIError, KeyError, ValueError) as e:
_logger.info(f'按分类待处理问题汇总统计过程出错。{e}')
session.rollback()
finally:
session.close()
def StatisticsVendorIssues():
from model.ModEquipVenderProRep import EquipVenderProRep
from model.ModEquipmentVenderList import EquipmentVenderList
from model.ModEquipmentClassifyList import EquipmentClassifyList
_logger = logging.getLogger('TIMED_STAT_VI')
session = Session()
try:
session.query(EquipVenderProRep).delete()
records = session.query(WorkRecord).all()
source_data = pd.DataFrame(workRecordSchema.dump(records, many=True).data)
classify_list = session.query(EquipmentVenderList, EquipmentClassifyList) \
.filter(EquipmentVenderList.classifyid == EquipmentClassifyList.classifyid) \
.all()
classify_data = pd.DataFrame([{'venderid': vender.venderid,
'vendername': vender.vendername,
'classifyid': classify.classifyid,
'classifyname': classify.classifyname}
for vender, classify in classify_list])
joined_data = pd.merge(source_data,
classify_data,
how='left',
left_on='equ_venderid',
right_on='venderid')
grouped_source_data = joined_data[['bureauid', 'bureauname', 'equ_venderid', 'equ_vendername',
'problemid', 'proname', 'classifyid', 'classifyname', 'meter_barcode']] \
.drop_duplicates() \
.groupby(['bureauid', 'bureauname', 'equ_venderid', 'problemid', 'classifyid', 'classifyname',
'proname', 'equ_vendername'],
as_index=False) \
.agg('count')
for row in grouped_source_data.iterrows():
new_record = EquipVenderProRep(
bureauid=row[1]['bureauid'],
bureauname=row[1]['bureauname'],
equ_venderid=row[1]['equ_venderid'],
pro_classifyid=row[1]['problemid'],
classifyid=row[1]['classifyid'],
classifyname=row[1]['classifyname'],
pro_classifyname=row[1]['proname'],
equ_vendername=row[1]['equ_vendername'],
problem_number=int(row[1]['meter_barcode']))
session.add(new_record)
session.commit()
_logger.info(f'厂家问题分类统计生成成功。')
except (DBAPIError, KeyError, ValueError) as e:
_logger.info(f'厂家问题分类统计过程出现错误。{e}')
session.rollback()
finally:
session.close()
if __name__ == "__main__":
setup_log.setup()
StatisticsBureauMonthlyReport()
StatisticsBureauWeeklyReport()
StatisticsUnresolvedIssueDetail()
# StatisticsUnresolvedIssueReport()
# StatisticsUnresolvedIssueUrgeByStation()
# StatisticsUnresolvedIssueByType()
# StatisticsVendorIssues()
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。