mirror of https://github.com/veops/cmdb.git
Merge branch 'master' of github.com:veops/cmdb into dev_ui
This commit is contained in:
commit
9b7d6d8f12
|
@ -51,7 +51,7 @@
|
|||
|
||||
- 服务树
|
||||
|
||||

|
||||

|
||||
|
||||
[查看更多展示](docs/screenshot.md)
|
||||
|
||||
|
@ -83,6 +83,6 @@ docker-compose up -d
|
|||
|
||||
---
|
||||
|
||||
_**欢迎关注我们的公众号,点击联系我们,加入微信、qq运维群(336164978),获得更多产品、行业相关资讯**_
|
||||
_**欢迎关注我们的公众号,点击联系我们,加入微信、QQ群(336164978),获得更多产品、行业相关资讯**_
|
||||
|
||||

|
||||

|
||||
|
|
|
@ -1,16 +0,0 @@
|
|||
default: help
|
||||
|
||||
test: ## test in local environment
|
||||
pytest -s --html=test-output/test/index.html --cov-report html:test-output/coverage --cov=api tests
|
||||
|
||||
clean_test: ## clean test output
|
||||
rm -f .coverage
|
||||
rm -rf .pytest_cache
|
||||
rm -rf test-output
|
||||
|
||||
|
||||
docker_test: ## test all case in docker container
|
||||
@echo "TODO"
|
||||
|
||||
help:
|
||||
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' ./Makefile | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
|
@ -24,7 +24,7 @@ supervisor = "==4.0.3"
|
|||
Flask-Login = "==0.4.1"
|
||||
Flask-Bcrypt = "==0.7.1"
|
||||
Flask-Cors = ">=3.0.8"
|
||||
python-ldap = "==3.2.0"
|
||||
python-ldap = "==3.4.0"
|
||||
pycryptodome = "==3.12.0"
|
||||
# Caching
|
||||
Flask-Caching = ">=1.0.0"
|
||||
|
@ -39,13 +39,11 @@ kombu = "==4.4.0"
|
|||
# common setting
|
||||
Flask-APScheduler = "==1.12.4"
|
||||
timeout-decorator = "==0.5.0"
|
||||
numpy = "==1.18.5"
|
||||
pandas = "==1.3.2"
|
||||
WTForms = "==3.0.0"
|
||||
email-validator = "==1.3.1"
|
||||
treelib = "==1.6.1"
|
||||
flasgger = "==0.9.5"
|
||||
Pillow = "==8.3.2"
|
||||
Pillow = "==9.3.0"
|
||||
# other
|
||||
six = "==1.12.0"
|
||||
bs4 = ">=0.0.1"
|
||||
|
|
|
@ -158,10 +158,8 @@ def register_error_handlers(app):
|
|||
error_code = getattr(error, "code", 500)
|
||||
if not str(error_code).isdigit():
|
||||
error_code = 400
|
||||
if error_code != 500:
|
||||
return make_response(jsonify(message=str(error)), error_code)
|
||||
else:
|
||||
return make_response(jsonify(message=traceback.format_exc(-1)), error_code)
|
||||
|
||||
return make_response(jsonify(message=str(error)), error_code)
|
||||
|
||||
for errcode in app.config.get("ERROR_CODES") or [400, 401, 403, 404, 405, 500, 502]:
|
||||
app.errorhandler(errcode)(render_error)
|
||||
|
|
|
@ -8,40 +8,24 @@ from wtforms import StringField
|
|||
from wtforms import validators
|
||||
|
||||
from api.lib.common_setting.resp_format import ErrFormat
|
||||
from api.lib.common_setting.utils import get_df_from_read_sql
|
||||
from api.lib.perm.acl.role import RoleCRUD
|
||||
from api.models.common_setting import Department, Employee
|
||||
|
||||
sub_departments_column_name = 'sub_departments'
|
||||
|
||||
|
||||
def drop_ts_column(df):
|
||||
columns = list(df.columns)
|
||||
remove_columns = []
|
||||
for column in ['created_at', 'updated_at', 'deleted_at', 'last_login']:
|
||||
targets = list(filter(lambda c: c.startswith(column), columns))
|
||||
if targets:
|
||||
remove_columns.extend(targets)
|
||||
|
||||
remove_columns = list(set(remove_columns))
|
||||
|
||||
return df.drop(remove_columns, axis=1) if len(remove_columns) > 0 else df
|
||||
|
||||
|
||||
def get_department_df():
|
||||
def get_all_department_list(to_dict=True):
|
||||
criterion = [
|
||||
Department.deleted == 0,
|
||||
]
|
||||
query = Department.query.filter(
|
||||
*criterion
|
||||
)
|
||||
df = get_df_from_read_sql(query)
|
||||
if df.empty:
|
||||
return
|
||||
return drop_ts_column(df)
|
||||
).order_by(Department.department_id.asc())
|
||||
results = query.all()
|
||||
return [r.to_dict() for r in results] if to_dict else results
|
||||
|
||||
|
||||
def get_all_employee_df(block=0):
|
||||
def get_all_employee_list(block=0, to_dict=True):
|
||||
criterion = [
|
||||
Employee.deleted == 0,
|
||||
]
|
||||
|
@ -50,112 +34,110 @@ def get_all_employee_df(block=0):
|
|||
Employee.block == block
|
||||
)
|
||||
|
||||
entities = [getattr(Employee, c) for c in Employee.get_columns(
|
||||
).keys() if c not in ['deleted', 'deleted_at']]
|
||||
query = Employee.query.with_entities(
|
||||
*entities
|
||||
).filter(
|
||||
*criterion
|
||||
)
|
||||
df = get_df_from_read_sql(query)
|
||||
if df.empty:
|
||||
return df
|
||||
return drop_ts_column(df)
|
||||
results = db.session.query(Employee).filter(*criterion).all()
|
||||
|
||||
DepartmentTreeEmployeeColumns = [
|
||||
'acl_rid',
|
||||
'employee_id',
|
||||
'username',
|
||||
'nickname',
|
||||
'email',
|
||||
'mobile',
|
||||
'direct_supervisor_id',
|
||||
'annual_leave',
|
||||
'block',
|
||||
'department_id',
|
||||
]
|
||||
|
||||
def format_columns(e):
|
||||
return {column: getattr(e, column) for column in DepartmentTreeEmployeeColumns}
|
||||
|
||||
return [format_columns(r) for r in results] if to_dict else results
|
||||
|
||||
|
||||
class DepartmentTree(object):
|
||||
def __init__(self, append_employee=False, block=-1):
|
||||
self.append_employee = append_employee
|
||||
self.block = block
|
||||
self.d_df = get_department_df()
|
||||
self.employee_df = get_all_employee_df(
|
||||
self.all_department_list = get_all_department_list()
|
||||
self.all_employee_list = get_all_employee_list(
|
||||
block) if append_employee else None
|
||||
|
||||
def prepare(self):
|
||||
pass
|
||||
|
||||
def get_employees_by_d_id(self, d_id):
|
||||
_df = self.employee_df[
|
||||
self.employee_df['department_id'].eq(d_id)
|
||||
].sort_values(by=['direct_supervisor_id'], ascending=True)
|
||||
if _df.empty:
|
||||
block = self.block
|
||||
|
||||
def filter_department_id(e):
|
||||
if self.block != -1:
|
||||
return e['department_id'] == d_id and e['block'] == block
|
||||
return e.department_id == d_id
|
||||
|
||||
results = list(filter(lambda e: filter_department_id(e), self.all_employee_list))
|
||||
|
||||
return results
|
||||
|
||||
def get_department_by_parent_id(self, parent_id):
|
||||
results = list(filter(lambda d: d['department_parent_id'] == parent_id, self.all_department_list))
|
||||
if not results:
|
||||
return []
|
||||
|
||||
if self.block != -1:
|
||||
_df = _df[
|
||||
_df['block'].eq(self.block)
|
||||
]
|
||||
|
||||
return _df.to_dict('records')
|
||||
return results
|
||||
|
||||
def get_tree_departments(self):
|
||||
# 一级部门
|
||||
top_df = self.d_df[self.d_df['department_parent_id'].eq(-1)]
|
||||
if top_df.empty:
|
||||
top_departments = self.get_department_by_parent_id(-1)
|
||||
if len(top_departments) == 0:
|
||||
return []
|
||||
|
||||
d_list = []
|
||||
|
||||
for index in top_df.index:
|
||||
top_d = top_df.loc[index].to_dict()
|
||||
|
||||
for top_d in top_departments:
|
||||
department_id = top_d['department_id']
|
||||
|
||||
# 检查 department_id 是否作为其他部门的 parent
|
||||
sub_df = self.d_df[
|
||||
self.d_df['department_parent_id'].eq(department_id)
|
||||
].sort_values(by=['sort_value'], ascending=True)
|
||||
|
||||
sub_deps = self.get_department_by_parent_id(department_id)
|
||||
employees = []
|
||||
|
||||
if self.append_employee:
|
||||
# 要包含员工
|
||||
employees = self.get_employees_by_d_id(department_id)
|
||||
|
||||
top_d['employees'] = employees
|
||||
|
||||
if sub_df.empty:
|
||||
if len(sub_deps) == 0:
|
||||
top_d[sub_departments_column_name] = []
|
||||
d_list.append(top_d)
|
||||
continue
|
||||
|
||||
self.parse_sub_department(sub_df, top_d)
|
||||
self.parse_sub_department(sub_deps, top_d)
|
||||
d_list.append(top_d)
|
||||
|
||||
return d_list
|
||||
|
||||
def get_all_departments(self, is_tree=1):
|
||||
if self.d_df.empty:
|
||||
if len(self.all_department_list) == 0:
|
||||
return []
|
||||
|
||||
if is_tree != 1:
|
||||
return self.d_df.to_dict('records')
|
||||
return self.all_department_list
|
||||
|
||||
return self.get_tree_departments()
|
||||
|
||||
def parse_sub_department(self, df, top_d):
|
||||
def parse_sub_department(self, deps, top_d):
|
||||
sub_departments = []
|
||||
for s_index in df.index:
|
||||
d = df.loc[s_index].to_dict()
|
||||
sub_df = self.d_df[
|
||||
self.d_df['department_parent_id'].eq(
|
||||
df.at[s_index, 'department_id'])
|
||||
].sort_values(by=['sort_value'], ascending=True)
|
||||
for d in deps:
|
||||
sub_deps = self.get_department_by_parent_id(d['department_id'])
|
||||
employees = []
|
||||
|
||||
if self.append_employee:
|
||||
# 要包含员工
|
||||
employees = self.get_employees_by_d_id(
|
||||
df.at[s_index, 'department_id'])
|
||||
employees = self.get_employees_by_d_id(d['department_id'])
|
||||
|
||||
d['employees'] = employees
|
||||
|
||||
if sub_df.empty:
|
||||
if len(sub_deps) == 0:
|
||||
d[sub_departments_column_name] = []
|
||||
sub_departments.append(d)
|
||||
continue
|
||||
|
||||
self.parse_sub_department(sub_df, d)
|
||||
self.parse_sub_department(sub_deps, d)
|
||||
sub_departments.append(d)
|
||||
|
||||
top_d[sub_departments_column_name] = sub_departments
|
||||
|
@ -321,58 +303,52 @@ class DepartmentCRUD(object):
|
|||
|
||||
@staticmethod
|
||||
def get_department_tree_list():
|
||||
df = get_department_df()
|
||||
if df.empty:
|
||||
all_deps = get_all_department_list()
|
||||
if len(all_deps) == 0:
|
||||
return []
|
||||
|
||||
# 一级部门
|
||||
top_df = df[df['department_parent_id'].eq(-1)]
|
||||
if top_df.empty:
|
||||
top_deps = list(filter(lambda d: d['department_parent_id'] == -1, all_deps))
|
||||
if len(top_deps) == 0:
|
||||
return []
|
||||
|
||||
tree_list = []
|
||||
|
||||
for index in top_df.index:
|
||||
for top_d in top_deps:
|
||||
tree = Tree()
|
||||
identifier_root = top_df.at[index, 'department_id']
|
||||
identifier_root = top_d['department_id']
|
||||
tree.create_node(
|
||||
top_df.at[index, 'department_name'],
|
||||
top_d['department_name'],
|
||||
identifier_root
|
||||
)
|
||||
|
||||
# 检查 department_id 是否作为其他部门的 parent
|
||||
sub_df = df[
|
||||
df['department_parent_id'].eq(identifier_root)
|
||||
]
|
||||
if sub_df.empty:
|
||||
sub_ds = list(filter(lambda d: d['department_parent_id'] == identifier_root, all_deps))
|
||||
if len(sub_ds) == 0:
|
||||
tree_list.append(tree)
|
||||
continue
|
||||
|
||||
DepartmentCRUD.parse_sub_department_node(
|
||||
sub_df, df, tree, identifier_root)
|
||||
sub_ds, all_deps, tree, identifier_root)
|
||||
|
||||
tree_list.append(tree)
|
||||
|
||||
return tree_list
|
||||
|
||||
@staticmethod
|
||||
def parse_sub_department_node(df, all_df, tree, parent_id):
|
||||
for s_index in df.index:
|
||||
def parse_sub_department_node(sub_ds, all_ds, tree, parent_id):
|
||||
for d in sub_ds:
|
||||
tree.create_node(
|
||||
df.at[s_index, 'department_name'],
|
||||
df.at[s_index, 'department_id'],
|
||||
d['department_name'],
|
||||
d['department_id'],
|
||||
parent=parent_id
|
||||
)
|
||||
|
||||
sub_df = all_df[
|
||||
all_df['department_parent_id'].eq(
|
||||
df.at[s_index, 'department_id'])
|
||||
]
|
||||
if sub_df.empty:
|
||||
next_sub_ds = list(filter(lambda item_d: item_d['department_parent_id'] == d['department_id'], all_ds))
|
||||
if len(next_sub_ds) == 0:
|
||||
continue
|
||||
|
||||
DepartmentCRUD.parse_sub_department_node(
|
||||
sub_df, all_df, tree, df.at[s_index, 'department_id'])
|
||||
next_sub_ds, all_ds, tree, d['department_id'])
|
||||
|
||||
@staticmethod
|
||||
def get_departments_and_ids(department_parent_id, block):
|
||||
|
@ -380,44 +356,30 @@ class DepartmentCRUD(object):
|
|||
Department.department_parent_id == department_parent_id,
|
||||
Department.deleted == 0,
|
||||
).order_by(Department.sort_value.asc())
|
||||
df = get_df_from_read_sql(query)
|
||||
if df.empty:
|
||||
all_departments = DepartmentCRUD.get_department_by_query(query)
|
||||
if len(all_departments) == 0:
|
||||
return [], []
|
||||
|
||||
tree_list = DepartmentCRUD.get_department_tree_list()
|
||||
employee_df = get_all_employee_df(block)
|
||||
all_employee_list = get_all_employee_list(block)
|
||||
|
||||
department_id_list = list(df['department_id'].values)
|
||||
department_id_list = [d['department_id'] for d in all_departments]
|
||||
query = Department.query.filter(
|
||||
Department.department_parent_id.in_(department_id_list),
|
||||
Department.deleted == 0,
|
||||
).order_by(Department.sort_value.asc()).group_by(Department.department_id)
|
||||
sub_df = get_df_from_read_sql(query)
|
||||
if sub_df.empty:
|
||||
df['has_sub'] = 0
|
||||
sub_deps = DepartmentCRUD.get_department_by_query(query)
|
||||
|
||||
def handle_row_employee_count(row):
|
||||
return len(employee_df[employee_df['department_id'] == row['department_id']])
|
||||
sub_map = {d['department_parent_id']: 1 for d in sub_deps}
|
||||
|
||||
df['employee_count'] = df.apply(
|
||||
lambda row: handle_row_employee_count(row), axis=1)
|
||||
for d in all_departments:
|
||||
d['has_sub'] = sub_map.get(d['department_id'], 0)
|
||||
|
||||
else:
|
||||
sub_map = {d['department_parent_id']: 1 for d in sub_df.to_dict('records')}
|
||||
d_ids = DepartmentCRUD.get_department_id_list_by_root(d['department_id'], tree_list)
|
||||
|
||||
def handle_row(row):
|
||||
d_ids = DepartmentCRUD.get_department_id_list_by_root(
|
||||
row['department_id'], tree_list)
|
||||
row['employee_count'] = len(
|
||||
employee_df[employee_df['department_id'].isin(d_ids)])
|
||||
d['employee_count'] = len(list(filter(lambda e: e['department_id'] in d_ids, all_employee_list)))
|
||||
|
||||
row['has_sub'] = sub_map.get(row['department_id'], 0)
|
||||
|
||||
return row
|
||||
|
||||
df = df.apply(lambda row: handle_row(row), axis=1)
|
||||
|
||||
return df.to_dict('records'), department_id_list
|
||||
return all_departments, department_id_list
|
||||
|
||||
@staticmethod
|
||||
def get_department_id_list_by_root(root_department_id, tree_list=None):
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
import traceback
|
||||
from datetime import datetime
|
||||
|
||||
import pandas as pd
|
||||
from flask import abort
|
||||
from flask_login import current_user
|
||||
from sqlalchemy import or_, literal_column, func, not_, and_
|
||||
|
@ -17,7 +16,6 @@ from api.extensions import db
|
|||
from api.lib.common_setting.acl import ACLManager
|
||||
from api.lib.common_setting.const import COMMON_SETTING_QUEUE, OperatorType
|
||||
from api.lib.common_setting.resp_format import ErrFormat
|
||||
from api.lib.common_setting.utils import get_df_from_read_sql
|
||||
from api.models.common_setting import Employee, Department
|
||||
|
||||
|
||||
|
@ -214,59 +212,49 @@ class EmployeeCRUD(object):
|
|||
return CreateEmployee().batch_create(employee_list)
|
||||
|
||||
@staticmethod
|
||||
def get_export_employee_df(block_status):
|
||||
criterion = [
|
||||
Employee.deleted == 0
|
||||
]
|
||||
def get_export_employee_list(block_status):
|
||||
if block_status >= 0:
|
||||
criterion.append(
|
||||
Employee.block == block_status
|
||||
)
|
||||
employees = Employee.get_by(block=block_status, to_dict=False)
|
||||
else:
|
||||
employees = Employee.get_by(to_dict=False)
|
||||
|
||||
query = Employee.query.with_entities(
|
||||
Employee.employee_id,
|
||||
Employee.nickname,
|
||||
Employee.email,
|
||||
Employee.sex,
|
||||
Employee.mobile,
|
||||
Employee.position_name,
|
||||
Employee.last_login,
|
||||
Employee.department_id,
|
||||
Employee.direct_supervisor_id,
|
||||
).filter(*criterion)
|
||||
df = get_df_from_read_sql(query)
|
||||
if df.empty:
|
||||
return df
|
||||
keep_cols = EmployeeCRUD.get_current_user_view_columns()
|
||||
|
||||
query = Department.query.filter(
|
||||
*criterion
|
||||
)
|
||||
department_df = get_df_from_read_sql(query)
|
||||
all_departments = Department.get_by(to_dict=False)
|
||||
d_id_map = {d.department_id: d.department_name for d in all_departments}
|
||||
e_id_map = {e.employee_id: e.nickname for e in employees}
|
||||
|
||||
def find_name(row):
|
||||
department_id = row['department_id']
|
||||
_df = department_df[department_df['department_id']
|
||||
== department_id]
|
||||
row['department_name'] = '' if _df.empty else _df.iloc[0]['department_name']
|
||||
export_columns_map = {
|
||||
'username': "用户名",
|
||||
'nickname': '姓名',
|
||||
'email': '邮箱',
|
||||
'department_name': '部门',
|
||||
'sex': '性别',
|
||||
'mobile': '手机号',
|
||||
'position_name': '岗位',
|
||||
'nickname_direct_supervisor': '直属上级',
|
||||
'last_login': '上次登录时间',
|
||||
}
|
||||
|
||||
direct_supervisor_id = row['direct_supervisor_id']
|
||||
_df = df[df['employee_id'] == direct_supervisor_id]
|
||||
row['nickname_direct_supervisor'] = '' if _df.empty else _df.iloc[0]['nickname']
|
||||
data_list = []
|
||||
for e in employees:
|
||||
department_name = d_id_map.get(e.department_id, '')
|
||||
nickname_direct_supervisor = e_id_map.get(e.direct_supervisor_id, '')
|
||||
try:
|
||||
last_login = str(e.last_login) if e.last_login else ''
|
||||
except:
|
||||
last_login = ''
|
||||
data = e.to_dict()
|
||||
data['last_login'] = last_login
|
||||
data['department_name'] = department_name
|
||||
data['nickname_direct_supervisor'] = nickname_direct_supervisor
|
||||
|
||||
if isinstance(row['last_login'], pd.Timestamp):
|
||||
try:
|
||||
row['last_login'] = str(row['last_login'])
|
||||
except:
|
||||
row['last_login'] = ''
|
||||
else:
|
||||
row['last_login'] = ''
|
||||
tmp = {export_columns_map[k]: data[k] for k in export_columns_map.keys() if
|
||||
k in keep_cols or k in sub_columns}
|
||||
|
||||
return row
|
||||
data_list.append(tmp)
|
||||
|
||||
df = df.apply(find_name, axis=1)
|
||||
df.drop(['department_id', 'direct_supervisor_id',
|
||||
'employee_id'], axis=1, inplace=True)
|
||||
return df
|
||||
return data_list
|
||||
|
||||
@staticmethod
|
||||
def batch_employee(column_name, column_value, employee_id_list):
|
||||
|
@ -481,7 +469,7 @@ class EmployeeCRUD(object):
|
|||
if value:
|
||||
abort(400, ErrFormat.query_column_none_keep_value_empty.format(column))
|
||||
expr = [attr.is_(None)]
|
||||
if column not in ["entry_date", "leave_date", "dfc_entry_date", "last_login"]:
|
||||
if column not in ["last_login"]:
|
||||
expr += [attr == '']
|
||||
expr = [or_(*expr)]
|
||||
elif operator == OperatorType.IS_NOT_EMPTY:
|
||||
|
|
|
@ -1,23 +1,6 @@
|
|||
# -*- coding:utf-8 -*-
|
||||
from datetime import datetime
|
||||
|
||||
import pandas as pd
|
||||
from sqlalchemy import text
|
||||
|
||||
from api.extensions import db
|
||||
|
||||
|
||||
def get_df_from_read_sql(query, to_dict=False):
|
||||
bind = query.session.bind
|
||||
query = query.statement.compile(dialect=bind.dialect if bind else None,
|
||||
compile_kwargs={"literal_binds": True}).string
|
||||
a = db.engine
|
||||
df = pd.read_sql(sql=text(query), con=a.connect())
|
||||
|
||||
if to_dict:
|
||||
return df.to_dict('records')
|
||||
return df
|
||||
|
||||
|
||||
def get_cur_time_str(split_flag='-'):
|
||||
f = f"%Y{split_flag}%m{split_flag}%d{split_flag}%H{split_flag}%M{split_flag}%S{split_flag}%f"
|
||||
|
|
|
@ -108,7 +108,7 @@ def _auth_with_ip_white_list():
|
|||
|
||||
|
||||
def _auth_with_app_token():
|
||||
if _auth_with_session():
|
||||
if _auth_with_session() or _auth_with_token():
|
||||
if not is_app_admin(request.values.get('app_id')) and request.method != "GET":
|
||||
return False
|
||||
elif is_app_admin(request.values.get('app_id')):
|
||||
|
|
|
@ -150,17 +150,6 @@ class EmployeeViewExportExcel(APIView):
|
|||
url_prefix = (f'{prefix}/export_all',)
|
||||
|
||||
def get(self):
|
||||
col_desc_map = {
|
||||
'nickname': "姓名",
|
||||
'email': '邮箱',
|
||||
'sex': '性别',
|
||||
'mobile': '手机号',
|
||||
'department_name': '部门',
|
||||
'position_name': '岗位',
|
||||
'nickname_direct_supervisor': '直接上级',
|
||||
'last_login': '上次登录时间',
|
||||
}
|
||||
|
||||
# 规定了静态文件的存储位置
|
||||
excel_filename = 'all_employee_info.xlsx'
|
||||
excel_path = current_app.config['UPLOAD_DIRECTORY_FULL']
|
||||
|
@ -168,20 +157,19 @@ class EmployeeViewExportExcel(APIView):
|
|||
|
||||
# 根据parameter查表,自连接通过上级id获取上级名字列
|
||||
block_status = int(request.args.get('block_status', -1))
|
||||
df = EmployeeCRUD.get_export_employee_df(block_status)
|
||||
data_list = EmployeeCRUD.get_export_employee_list(block_status)
|
||||
|
||||
# 改变列名为中文head
|
||||
try:
|
||||
df = df.rename(columns=col_desc_map)
|
||||
except Exception as e:
|
||||
abort(500, ErrFormat.rename_columns_failed.format(str(e)))
|
||||
headers = data_list[0].keys()
|
||||
from openpyxl import Workbook
|
||||
|
||||
# 生成静态excel文件
|
||||
try:
|
||||
df.to_excel(excel_path_with_filename,
|
||||
sheet_name='Sheet1', index=False, encoding="utf-8")
|
||||
except Exception as e:
|
||||
current_app.logger.error(e)
|
||||
abort(500, ErrFormat.generate_excel_failed.format(str(e)))
|
||||
wb = Workbook()
|
||||
ws = wb.active
|
||||
# insert header
|
||||
for col_num, col_data in enumerate(headers, start=1):
|
||||
ws.cell(row=1, column=col_num, value=col_data)
|
||||
|
||||
for row_num, row_data in enumerate(data_list, start=2):
|
||||
for col_num, col_data in enumerate(row_data.values(), start=1):
|
||||
ws.cell(row=row_num, column=col_num, value=col_data)
|
||||
wb.save(excel_path_with_filename)
|
||||
return send_from_directory(excel_path, excel_filename, as_attachment=True)
|
||||
|
|
|
@ -12,7 +12,7 @@ bs4==0.0.1
|
|||
cachelib==0.9.0
|
||||
celery==4.3.0
|
||||
celery-once==3.0.1
|
||||
certifi==2023.5.7
|
||||
certifi==2023.7.22
|
||||
charset-normalizer==3.1.0
|
||||
click==8.1.3
|
||||
dnspython==2.3.0
|
||||
|
@ -47,9 +47,7 @@ meld3==2.0.1
|
|||
mistune==3.0.1
|
||||
more-itertools==5.0.0
|
||||
msgpack-python==0.5.6
|
||||
numpy==1.18.5
|
||||
pandas==1.3.2
|
||||
Pillow==8.3.2
|
||||
Pillow==9.2.0
|
||||
pkgutil_resolve_name==1.3.10
|
||||
pyasn1==0.5.0
|
||||
pyasn1-modules==0.3.0
|
||||
|
@ -58,7 +56,7 @@ PyJWT==2.4.0
|
|||
PyMySQL==0.9.3
|
||||
python-dateutil==2.8.2
|
||||
python-dotenv==1.0.0
|
||||
python-ldap==3.2.0
|
||||
python-ldap==3.4.0
|
||||
pytz==2023.3
|
||||
PyYAML==6.0
|
||||
redis==3.2.1
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
public/* linguist-vendored
|
|
@ -1,26 +0,0 @@
|
|||
.DS_Store
|
||||
node_modules
|
||||
/dist
|
||||
/dist.zip
|
||||
/temp
|
||||
|
||||
# local env files
|
||||
.env.local
|
||||
.env.*.local
|
||||
|
||||
# Log files
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
|
||||
# Editor directories and files
|
||||
.idea
|
||||
.vscode
|
||||
*.suo
|
||||
*.ntvs*
|
||||
*.njsproj
|
||||
*.sln
|
||||
*.sw*
|
||||
*.css.map
|
||||
|
||||
.env.development
|
|
@ -1,11 +0,0 @@
|
|||
#Oneops-UI
|
||||
|
||||
```shell
|
||||
## build
|
||||
yarn run build
|
||||
|
||||
## develop
|
||||
yarn run serve
|
||||
|
||||
|
||||
```
|
Loading…
Reference in New Issue