|
|
import pandas as pd
|
|
|
import requests
|
|
|
import time
|
|
|
import urllib
|
|
|
import numpy as np
|
|
|
import xlwt
|
|
|
from pymongo import MongoClient
|
|
|
import pymysql
|
|
|
import datetime
|
|
|
# from xlwt import *
|
|
|
import sys
|
|
|
import json
|
|
|
|
|
|
nowTime = datetime.datetime.now()
|
|
|
yesTime = nowTime + datetime.timedelta(days=-1)
|
|
|
weekAgoTime = nowTime + datetime.timedelta(days=-7)
|
|
|
outputObj = {}
|
|
|
|
|
|
global skns_file_path
|
|
|
global save_stock_path
|
|
|
global proccessed_stock_path
|
|
|
global result_path
|
|
|
|
|
|
def getProductCodes():
|
|
|
df = pd.read_excel(skns_file_path)
|
|
|
df = df.groupby(['id', 'model']).agg({'size': 'count'}).reset_index()
|
|
|
return df['model'].tolist()
|
|
|
|
|
|
|
|
|
def getHeaders():
|
|
|
url = 'http://run.yohops.com/data-analysis-web/user/login?username=admin&password=yohodata123456'
|
|
|
header = {
|
|
|
'Accept': 'application/json, text/plain, */*',
|
|
|
'Accept-Encoding': 'gzip, deflate',
|
|
|
'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7,ja;q=0.6',
|
|
|
'Connection': 'keep-alive',
|
|
|
'Host': 'run.yohops.com',
|
|
|
'Referer': 'http://run.yohops.com/login.html',
|
|
|
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
|
|
|
}
|
|
|
r = requests.get(url, headers=header)
|
|
|
cookie = requests.utils.dict_from_cookiejar(r.cookies)
|
|
|
header['Cookie'] = 'JSESSIONID=%s; u_=%s; p_=%s' % (cookie['JSESSIONID'], cookie['u_'], cookie['p_'])
|
|
|
header['Referer'] = 'http://run.yohops.com/sys/search.html'
|
|
|
return header
|
|
|
|
|
|
|
|
|
def getStock(models, headers):
|
|
|
endstamp = int(time.time() / 3600) * 3600
|
|
|
timestamp = endstamp - 8 * 60 * 60
|
|
|
url = "http://run.yohops.com/data-analysis-web/executeSql?sql=select id, product_id, product_name, product_code, size_name, size_id, on_count, pay_count, FROM_UNIXTIME(create_time,{0}) as create_timestr, create_date from ufo_product_size_storeage_analysis where pre_sale_flag = 0 and create_time >={1} and create_time < {2} and product_code in {3} group by product_id,size_name, create_timestr;&db=run&export=Y".format(
|
|
|
urllib.parse.quote_plus("'%m%d%H'"), timestamp, endstamp, str(tuple(models)))
|
|
|
|
|
|
response = requests.get(url, headers=headers)
|
|
|
with open(save_stock_path, "wb") as code:
|
|
|
code.write(response.content)
|
|
|
|
|
|
def getWeekSales(models, headers, spot):
|
|
|
url = "http://ufo-auth.yohops.com/open/exportdata"
|
|
|
|
|
|
headers['Host'] = 'ufo-auth.yohops.com'
|
|
|
headers['Referer'] = 'http://ufo-auth.yohops.com/login.html'
|
|
|
|
|
|
reqData = {
|
|
|
'startdate': weekAgoTime.strftime('%Y%m%d'),
|
|
|
'enddate': yesTime.strftime('%Y%m%d'),
|
|
|
'productcodes': ','.join(models)
|
|
|
}
|
|
|
|
|
|
columnName = 'count_spot'
|
|
|
|
|
|
if int(spot) > 0:
|
|
|
reqData['isproductready'] = 1
|
|
|
columnName = 'count_all'
|
|
|
|
|
|
res = requests.post(url=url, data=reqData, headers=headers, timeout=10)
|
|
|
resJson = res.json()
|
|
|
|
|
|
data = []
|
|
|
if ('data' in resJson):
|
|
|
data = resJson['data']
|
|
|
|
|
|
countList = []
|
|
|
|
|
|
outputObj['week_' + columnName] = str(len(data))
|
|
|
|
|
|
for item in data:
|
|
|
countItem = [
|
|
|
str(item['product_code']),
|
|
|
str(item['size_name']),
|
|
|
item['count(1)'],
|
|
|
]
|
|
|
countList.append(countItem)
|
|
|
|
|
|
return pd.DataFrame(countList, columns=['model', 'size', columnName])
|
|
|
|
|
|
|
|
|
def convertSize(row, df):
|
|
|
CONVER_SIZE_DIC = {
|
|
|
'35 1/2': '35.5',
|
|
|
'36 2/3': '36.5',
|
|
|
'37 1/3': '37',
|
|
|
'38 2/3': '38.5',
|
|
|
'39 1/3': '39',
|
|
|
'40 2/3': '40.5',
|
|
|
'41 1/3': '41',
|
|
|
'42 2/3': '42.5',
|
|
|
'43 1/3': '43',
|
|
|
'44 2/3': '44.5',
|
|
|
'45 1/3': '45',
|
|
|
'46 2/3': '46.5',
|
|
|
'47 1/3': '47',
|
|
|
'49 1/3': '49',
|
|
|
}
|
|
|
if 'adidas' in row['product_name'] and row['size_name'] in CONVER_SIZE_DIC:
|
|
|
row['size_name'] = CONVER_SIZE_DIC[row['size_name']]
|
|
|
size_name = row['size_name']
|
|
|
orignal_pay_count = df[(df['size_name'] == size_name) & (df['product_id'] == row['product_id']) & (df['product_code'] == row['product_code'])]['pay_count'].max()
|
|
|
|
|
|
row['pay_count'] = orignal_pay_count + row['pay_count']
|
|
|
return row
|
|
|
|
|
|
def handleExcel():
|
|
|
df = pd.read_excel(save_stock_path,
|
|
|
dtype={'id': np.int32, 'product_id': np.int32, 'product_code': str, 'size_name': str,
|
|
|
'on_count': np.int32, 'pay_count': np.int32, 'create_timestr': str, 'create_date': str})
|
|
|
df = df[df['product_name'] != '作废']
|
|
|
# df = df[df['product_name'].str.contains("adidas", case = False)]
|
|
|
df = df.apply(convertSize, args=[df], axis=1)
|
|
|
del df['size_id']
|
|
|
df = df.groupby(['product_id', 'product_name', 'product_code', 'size_name', 'create_timestr', 'create_date']).agg(
|
|
|
{'on_count': sum, 'pay_count': sum}).reset_index()
|
|
|
|
|
|
|
|
|
createtimelist = df.groupby(['create_timestr']).agg({'on_count': sum}).index.values.tolist()
|
|
|
createtimelist.sort(reverse=True)
|
|
|
|
|
|
newdf = df.groupby(['product_id', 'product_name', 'product_code', 'size_name']).agg(
|
|
|
{'on_count': sum}).reset_index()
|
|
|
for createtime in createtimelist:
|
|
|
sub_df = df[df['create_timestr'] == createtime]
|
|
|
if createtimelist.index(createtime) == 0:
|
|
|
sub_df = sub_df[['pay_count', 'on_count', 'product_id', 'size_name']].rename(
|
|
|
columns={'on_count': createtime})
|
|
|
else:
|
|
|
sub_df = sub_df[['on_count', 'product_id', 'size_name']].rename(columns={'on_count': createtime})
|
|
|
|
|
|
newdf = newdf.merge(sub_df, how='left', on=['product_id', 'size_name']).fillna(0)
|
|
|
|
|
|
newdf.insert(loc=4, column='size_id', value=0)
|
|
|
newdf.to_excel(proccessed_stock_path, index=None)
|
|
|
|
|
|
|
|
|
def getBrandIDAndName():
|
|
|
db = pymysql.connect(host='172.16.6.117',
|
|
|
port=3306,
|
|
|
user='root',
|
|
|
password='asdf1234!',
|
|
|
database='duapp',
|
|
|
charset='utf8')
|
|
|
cursor = db.cursor()
|
|
|
query = 'SELECT * FROM duapp.BRAND;'
|
|
|
cursor.execute(query)
|
|
|
results = cursor.fetchall()
|
|
|
brand_id_name_dic = {}
|
|
|
for row_number in range(0, cursor.rowcount):
|
|
|
brand_id_name_dic[str(results[row_number][0])] = results[row_number][1]
|
|
|
return brand_id_name_dic
|
|
|
|
|
|
|
|
|
def getSknsExcelBrand(brand_id_name_dic):
|
|
|
conn = MongoClient('172.16.6.117', 27017)
|
|
|
mongodb = conn.du
|
|
|
product_db = mongodb.product
|
|
|
|
|
|
du_id_brandname_dic = {}
|
|
|
for product in product_db.find({}):
|
|
|
brandIDStr = str(product['detail']['brandId'])
|
|
|
productIDStr = str(product['detail']['productId'])
|
|
|
if brandIDStr in brand_id_name_dic:
|
|
|
du_id_brandname_dic[productIDStr] = brand_id_name_dic[brandIDStr]
|
|
|
|
|
|
du_id_brandname_dic['18624'] = 'Timberland'
|
|
|
du_id_brandname_dic['22922'] = 'Nike'
|
|
|
du_id_brandname_dic['31470'] = 'adidas'
|
|
|
du_id_brandname_dic['31471'] = 'adidas'
|
|
|
du_id_brandname_dic['31515'] = 'adidas'
|
|
|
du_id_brandname_dic['26063'] = 'Nike'
|
|
|
return du_id_brandname_dic
|
|
|
|
|
|
def getSkuCount(df, model, size):
|
|
|
week_df = df[df['model'].str.lower().isin([model.lower()])]
|
|
|
week_size_df = week_df[week_df['size'].str.lower().isin([str(size).lower()])]
|
|
|
|
|
|
count = '0'
|
|
|
if len(week_size_df) > 0:
|
|
|
count = week_size_df.iloc[0, 2]
|
|
|
|
|
|
return count
|
|
|
|
|
|
|
|
|
def combineExcels(du_id_brandname_dic, week_spot_pay_count, week_pay_count):
|
|
|
aligment = xlwt.Alignment()
|
|
|
aligment.horz = aligment.HORZ_RIGHT
|
|
|
style_gray = xlwt.XFStyle()
|
|
|
pattern = xlwt.Pattern()
|
|
|
pattern.pattern = xlwt.Pattern.SOLID_PATTERN
|
|
|
pattern.pattern_fore_colour = xlwt.Style.colour_map['gray25'] # 设置单元格背景色为黄色
|
|
|
style_gray.pattern = pattern
|
|
|
style_gray.alignment = aligment
|
|
|
|
|
|
style_red = xlwt.XFStyle()
|
|
|
pattern = xlwt.Pattern()
|
|
|
pattern.pattern = xlwt.Pattern.SOLID_PATTERN
|
|
|
pattern.pattern_fore_colour = xlwt.Style.colour_map['red'] # 设置单元格背景色为黄色
|
|
|
style_red.pattern = pattern
|
|
|
style_red.alignment = aligment
|
|
|
|
|
|
style_yellow = xlwt.XFStyle()
|
|
|
pattern = xlwt.Pattern()
|
|
|
pattern.pattern = xlwt.Pattern.SOLID_PATTERN
|
|
|
pattern.pattern_fore_colour = xlwt.Style.colour_map['yellow'] # 设置单元格背景色为黄色
|
|
|
style_yellow.pattern = pattern
|
|
|
style_yellow.alignment = aligment
|
|
|
|
|
|
style_green = xlwt.XFStyle()
|
|
|
pattern = xlwt.Pattern()
|
|
|
pattern.pattern = xlwt.Pattern.SOLID_PATTERN
|
|
|
pattern.pattern_fore_colour = xlwt.Style.colour_map['green'] # 设置单元格背景色为黄色
|
|
|
style_green.pattern = pattern
|
|
|
style_green.alignment = aligment
|
|
|
|
|
|
style_orange = xlwt.XFStyle()
|
|
|
pattern = xlwt.Pattern()
|
|
|
pattern.pattern = xlwt.Pattern.SOLID_PATTERN
|
|
|
pattern.pattern_fore_colour = xlwt.Style.colour_map['light_orange'] # 设置单元格背景色为黄色
|
|
|
style_orange.pattern = pattern
|
|
|
style_orange.alignment = aligment
|
|
|
|
|
|
style_green = xlwt.XFStyle()
|
|
|
pattern = xlwt.Pattern()
|
|
|
pattern.pattern = xlwt.Pattern.SOLID_PATTERN
|
|
|
pattern.pattern_fore_colour = xlwt.Style.colour_map['sea_green'] # 设置单元格背景色为黄色
|
|
|
style_green.pattern = pattern
|
|
|
borders = xlwt.Borders()
|
|
|
borders.left = xlwt.Borders.THIN
|
|
|
borders.right = xlwt.Borders.THIN
|
|
|
borders.top = xlwt.Borders.THIN
|
|
|
borders.bottom = xlwt.Borders.THIN
|
|
|
|
|
|
style_green.borders = borders
|
|
|
|
|
|
style_border = xlwt.XFStyle()
|
|
|
style_border.borders = borders
|
|
|
# 关心的
|
|
|
SIZE_CONCERN = ['36', '37', '37.5', '38', '41', '42', '43']
|
|
|
skns_df = pd.read_excel(skns_file_path, dtype={'id': np.int32, 'model': str, 'size': str})
|
|
|
ufo_df = pd.read_excel(proccessed_stock_path,
|
|
|
dtype={'id': np.int32, 'product_id': np.int32, 'product_code': str, 'size_name': str,
|
|
|
'on_count': np.int32, 'create_timestr': str, 'create_date': str})
|
|
|
workbook_write = xlwt.Workbook(encoding='UTF-8')
|
|
|
worksheet_write = workbook_write.add_sheet('Sheet1')
|
|
|
worksheet_write.write(0, 0, 'id')
|
|
|
worksheet_write.write(0, 1, 'duId')
|
|
|
worksheet_write.write(0, 2, 'brand')
|
|
|
worksheet_write.write(0, 3, 'model')
|
|
|
worksheet_write.write(0, 4, 'name')
|
|
|
worksheet_write.write(0, 5, 'productId')
|
|
|
worksheet_write.write(0, 6, 'size')
|
|
|
worksheet_write.write(0, 7, 'duPrice')
|
|
|
worksheet_write.write(0, 8, 'ufoPrice')
|
|
|
offset_price = 2
|
|
|
|
|
|
time_extra_name = {}
|
|
|
time_on_count = ufo_df.columns
|
|
|
for i in range(7, 11):
|
|
|
worksheet_write.write(0, i+offset_price, 'time' + str(i - 6))
|
|
|
time_extra_name['time' + str(i - 6)] = time_on_count[17 - i]
|
|
|
worksheet_write.write(0, 11+offset_price, 'count')
|
|
|
worksheet_write.write(0, 12+offset_price, 'weekSpotCount')
|
|
|
worksheet_write.write(0, 13+offset_price, 'weekCount')
|
|
|
worksheet_write.write(0, 14+offset_price, 'status')
|
|
|
worksheet_write.write(0, 15+offset_price, 'extra')
|
|
|
|
|
|
time_extra_name_json = json.dumps(time_extra_name)
|
|
|
|
|
|
rowNum = 1
|
|
|
gray = 0
|
|
|
yellow = 0
|
|
|
greater_then_zero = 0
|
|
|
|
|
|
for index, row in skns_df.iterrows():
|
|
|
currentSizeName = index
|
|
|
try:
|
|
|
float(row['size'])
|
|
|
currentSizeName = row['size']
|
|
|
if currentSizeName in SIZE_CONCERN:
|
|
|
pass
|
|
|
else:
|
|
|
currentSizeName = ''
|
|
|
except:
|
|
|
currentSizeName = ''
|
|
|
pass
|
|
|
|
|
|
if currentSizeName == '':
|
|
|
continue
|
|
|
|
|
|
brand = ''
|
|
|
duid = str(row['id'])
|
|
|
model = row['model']
|
|
|
yhid = row['yhid']
|
|
|
duPrice = row['duPrice']
|
|
|
ufoPrice = row['ufoPrice']
|
|
|
soldNum = row['count']
|
|
|
|
|
|
if duid in du_id_brandname_dic:
|
|
|
brand = du_id_brandname_dic[duid]
|
|
|
|
|
|
sub_df = ufo_df[ufo_df['product_code'].str.lower().isin([model.lower()])]
|
|
|
|
|
|
|
|
|
worksheet_write.write(rowNum, 0, rowNum)
|
|
|
worksheet_write.write(rowNum, 1, duid)
|
|
|
worksheet_write.write(rowNum, 2, brand)
|
|
|
worksheet_write.write(rowNum, 3, model)
|
|
|
|
|
|
|
|
|
productID = ''
|
|
|
if len(sub_df) > 0:
|
|
|
productID = sub_df.iloc[0, 0]
|
|
|
productName = sub_df.iloc[0, 1]
|
|
|
sub_df = sub_df[sub_df['size_name'].isin([currentSizeName])]
|
|
|
worksheet_write.write(rowNum, 4, productName)
|
|
|
|
|
|
if len(sub_df) > 0:
|
|
|
for i in range(7, 11):
|
|
|
currentCount = 0
|
|
|
try:
|
|
|
currentCount = int(sub_df.iloc[0, 17 - i])
|
|
|
except:
|
|
|
pass
|
|
|
|
|
|
if i > 7:
|
|
|
preCount = 0
|
|
|
try:
|
|
|
preCount = int(sub_df.iloc[0, 17 - i + 1])
|
|
|
except:
|
|
|
preCount = 0
|
|
|
|
|
|
if currentCount > preCount:
|
|
|
worksheet_write.write(rowNum, i+offset_price, currentCount, style=style_red)
|
|
|
elif currentCount < preCount:
|
|
|
worksheet_write.write(rowNum, i+offset_price, currentCount, style=style_green)
|
|
|
elif currentCount == 0:
|
|
|
worksheet_write.write(rowNum, i+offset_price, currentCount, style=style_gray)
|
|
|
else:
|
|
|
worksheet_write.write(rowNum, i+offset_price, currentCount)
|
|
|
else:
|
|
|
if currentCount == 0:
|
|
|
worksheet_write.write(rowNum, i+offset_price, currentCount, style=style_gray)
|
|
|
else:
|
|
|
worksheet_write.write(rowNum, i+offset_price, currentCount)
|
|
|
if sub_df.iat[0, 5] == 0:
|
|
|
gray += 1
|
|
|
|
|
|
worksheet_write.write(rowNum, 14+offset_price, '1')
|
|
|
else:
|
|
|
greater_then_zero += 1
|
|
|
worksheet_write.write(rowNum, 14+offset_price, '0')
|
|
|
else:
|
|
|
#有对应型号没对应尺码
|
|
|
for i in range(7, 11):
|
|
|
worksheet_write.write(rowNum, i+offset_price, '0', style=style_gray)
|
|
|
gray += 1
|
|
|
|
|
|
worksheet_write.write(rowNum, 14+offset_price, '1')
|
|
|
else:
|
|
|
stock_style = style_yellow
|
|
|
|
|
|
if int(yhid) > 0:
|
|
|
productID = yhid
|
|
|
stock_style = style_gray
|
|
|
gray += 1
|
|
|
worksheet_write.write(rowNum, 14+offset_price, '1')
|
|
|
else:
|
|
|
yellow += 1
|
|
|
worksheet_write.write(rowNum, 14+offset_price, '2')
|
|
|
|
|
|
#无对应型号
|
|
|
for i in range(7, 11):
|
|
|
worksheet_write.write(rowNum, i+offset_price, '0', style=stock_style)
|
|
|
|
|
|
|
|
|
worksheet_write.write(rowNum, 5, str(productID))
|
|
|
worksheet_write.write(rowNum, 6, currentSizeName)
|
|
|
worksheet_write.write(rowNum, 7, duPrice)
|
|
|
worksheet_write.write(rowNum, 8, ufoPrice)
|
|
|
worksheet_write.write(rowNum, 11+offset_price, soldNum)
|
|
|
|
|
|
sizeCount = getSkuCount(week_pay_count, model, row['size'])
|
|
|
sizeSpotCount = getSkuCount(week_spot_pay_count, model, row['size'])
|
|
|
|
|
|
worksheet_write.write(rowNum, 12+offset_price, str(sizeSpotCount))
|
|
|
worksheet_write.write(rowNum, 13+offset_price, str(sizeCount))
|
|
|
worksheet_write.write(rowNum, 15+offset_price, time_extra_name_json)
|
|
|
rowNum += 1
|
|
|
|
|
|
workbook_write.save(result_path)
|
|
|
|
|
|
if(sys.argv.__len__() > 2):
|
|
|
skns_file_path = sys.argv[2]
|
|
|
save_stock_path = sys.argv[1] + '/auto_stock%s.xls' % (nowTime.strftime("%m%d"))
|
|
|
proccessed_stock_path = sys.argv[1] + '/auto_stock_processed%s.xls' % (nowTime.strftime("%m%d"))
|
|
|
result_path = sys.argv[1] + '/top300_sales_stock%s.xls' % (nowTime.strftime("%m%d"))
|
|
|
|
|
|
|
|
|
modellist = getProductCodes()
|
|
|
headers = getHeaders()
|
|
|
|
|
|
getStock(modellist, headers)
|
|
|
|
|
|
week_spot_df = getWeekSales(modellist, headers, 0)
|
|
|
week_df = getWeekSales(modellist, headers, 1)
|
|
|
|
|
|
handleExcel()
|
|
|
brand_id_name_dic = getBrandIDAndName()
|
|
|
du_id_brandname_dic = getSknsExcelBrand(brand_id_name_dic)
|
|
|
combineExcels(du_id_brandname_dic, week_spot_df, week_df)
|
|
|
|
|
|
outputObj['path'] = result_path
|
|
|
outputObj['day'] = nowTime.strftime('%Y-%m-%d')
|
|
|
|
|
|
print(json.dumps(outputObj)) |
...
|
...
|
|