sklearn api
#coding:utf-8
import pandas as pd
import time
import numpy as np
import warnings
import numpy as np
np.random.seed(2018)
warnings.filterwarnings("ignore")
# 时间处理
def time2cov(time_):
'''
时间是根据天数推移,所以日期为脱敏,但是时间本身不脱敏
:param time_:
:return:
'''
return time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time_))
print('train')
train = pd.read_csv('../data/round1_ijcai_18_train_20180301.txt',sep=" ")
train = train.drop_duplicates(['instance_id'])
train = train.reset_index(drop=True)
print('test')
test_a = pd.read_csv('../data/round1_ijcai_18_test_a_20180301.txt',sep=" ")
all_data = pd.concat([train,test_a])
all_data['real_time'] = pd.to_datetime(all_data['context_timestamp'].apply(time2cov))
all_data['real_hour'] = all_data['real_time'].dt.hour
all_data['real_day'] = all_data['real_time'].dt.day
def time_change(hour):
hour = hour - 1
if hour == -1:
hour = 23
return hour
def time_change_1(hour):
hour = hour + 1
if hour == 24:
hour = 0
return hour
all_data['hour_before'] = all_data['real_hour'].apply(time_change)
all_data['hour_after'] = all_data['real_hour'].apply(time_change_1)
# 18 21 19 20 22 23 24 | 25
print(all_data['real_day'].unique())
# train and test cov radio
# print(len((set(train['user_id']))&(set(test_a['user_id'])))/len(set(test_a['user_id'])))
# print(len((set(train['shop_id']))&(set(test_a['shop_id'])))/len(set(test_a['shop_id'])))
# print(len((set(train['item_id']))&(set(test_a['item_id'])))/len(set(test_a['item_id'])))
# user 0.26714801444043323
# shop 0.9781637717121588
# item 0.956427604871448
# shop feat
# item feat
# user feat
def c_log_loss(y_t,y_p):
tmp = np.array(y_t) * np.log(np.array(y_p)) + (1 - np.array(y_t)) * np.log(1 - np.array(y_p))
return -np.sum(tmp)/len(y_t),False
# 获取当前时间之前的前x天的转化率特征
def get_before_cov_radio(all_data,label_data,cov_list = list(['shop_id','item_id','real_hour','item_pv_level','item_sales_level']),day_list = list([1,2,3])):
result = []
r = pd.DataFrame()
label_data_time = label_data['real_day'].min()
label_data_time_set = label_data['real_day'].unique()
print('label set day',label_data_time_set)
for cov in cov_list:
for d in day_list:
feat_set = all_data[
(all_data['real_day']>=label_data_time-d)&(all_data['real_day']<label_data_time)
]
print("cov feature",feat_set['real_day'].unique())
print("cov time",cov)
tmp = feat_set.groupby([cov],as_index=False).is_trade.agg({'mean':np.mean,'count':'count'}).add_suffix("_%s_before_%d_day"%(cov,d))
tmp.rename(columns={'%s_%s_before_%d_day'%(cov,cov,d):cov},inplace=True)
if d == 1:
r = tmp
else:
r = pd.merge(r,tmp,on=[cov],how='outer').fillna(0)
result.append(r)
return result
def calc_categry_feat(data):
data['item_category_list_1'] = data['item_category_list'].apply(lambda x: int(x.split(';')[0]))
data['item_category_list_2'] = data['item_category_list'].apply(lambda x: int(x.split(';')[1]))
data['item_property_list_0'] = data['item_property_list'].apply(lambda x: int(x.split(';')[0]))
data['item_property_list_1'] = data['item_property_list'].apply(lambda x: int(x.split(';')[1]))
data['item_property_list_2'] = data['item_property_list'].apply(lambda x: int(x.split(';')[2]))
for i in range(3):
data['predict_category_%d' % (i)] = data['predict_category_property'].apply(
lambda x: int(str(x.split(";")[i]).split(":")[0]) if len(x.split(";")) > i else -1
)
for item_cate in ['item_category_list_1','item_category_list_2']:
for pre_item_cate in ['predict_category_0','predict_category_1','predict_category_2']:
data['%s_%s'%(item_cate,pre_item_cate)] = data[item_cate] == data[pre_item_cate]
data['%s_%s'%(item_cate,pre_item_cate)] = data['%s_%s'%(item_cate,pre_item_cate)].astype(int)
del data['item_category_list']
del data['item_property_list']
del data['predict_category_property']
return data
take_columns = ['instance_id','item_id','shop_id','user_id','is_trade']
shop_current_col = [
'shop_score_description','shop_score_delivery','shop_score_service',
'shop_star_level','shop_review_positive_rate','shop_review_num_level'
]
user_col = [
'user_gender_id','user_age_level','user_occupation_id','user_star_level'
]
item_col = [
'item_brand_id','item_city_id','item_price_level',
'item_sales_level','item_collected_level','item_pv_level',
'item_category_list','item_property_list'
]
time_feat = ['real_hour','hour_before','hour_after','context_timestamp','real_day']
context_col = ['predict_category_property','context_page_id']
feat = take_columns + shop_current_col + time_feat + user_col + item_col + context_col
def get_history_user_feat(all_data,data):
label_data_time = data['real_day'].min()
print(label_data_time)
tmp = all_data[all_data['real_day'] < label_data_time]
print(tmp['real_day'].unique())
user_time = tmp.groupby(['user_id'],as_index=False).context_timestamp.agg({'day_begin':'min','day_end':'max'})
user_time['alive'] = user_time['day_end'] - user_time['day_begin']
user_time['s_alive'] = label_data_time - user_time['day_begin']
user_time['alive/s_alive'] = user_time['alive'] / user_time['s_alive']
user_time_cov = tmp[tmp['is_trade']==1]
user_time_cov = user_time_cov.groupby(['user_id'], as_index=False).context_timestamp.agg({'day_end_cov': 'max'})
user_time_cov = pd.DataFrame(user_time_cov).drop_duplicates(['user_id','day_end_cov'])
data = pd.merge(data,user_time[['user_id','alive','s_alive','alive/s_alive','day_begin','day_end']],on=['user_id'],how='left')
data = pd.merge(data,user_time_cov,on=['user_id'],how='left')
data['day_end_cov'] = data['day_end_cov'].fillna(data['day_end'])
data['alive_cov'] = data['day_end_cov'] - data['day_begin']
data['alive/alive_cov'] = data['alive'] / data['alive_cov']
# data['s_alive/alive_cov'] = data['s_alive'] / data['alive_cov']
del data['day_end_cov']
del data['day_end']
del data['day_begin']
# for i in [1,2,3]:
# tmp = all_data[(all_data['real_day'] < data['real_day'].min()) & (all_data['real_day'] >= data['real_day'].min() - i)]
# user_item_sales_level_day = tmp.groupby(['user_id'], as_index=False)['item_sales_level'] \
# .agg({'user_item_sales_level_day_mean': 'mean',
# 'user_item_sales_level_day_median': 'median',
# 'user_item_sales_level_day_min': 'min',
# 'user_item_sales_level_day_max': 'max',
# 'user_item_sales_level_day_std': 'std',
# 'user_item_sales_level_day_count': 'count'})
# data = pd.merge(data, user_item_sales_level_day, 'left', on=['user_id'])
# data = data[['user_id','alive','s_alive','alive/s_alive','alive_cov','alive/alive_cov']]
return data.fillna(-1)
def get_history_shop_feat(all_data,data):
label_data_time = data['real_day'].min()
print(label_data_time)
for i in [1,2,3]:
tmp = all_data[(all_data['real_day'] < label_data_time)&(all_data['real_day'] >= label_data_time - i)]
shop_score_service_hour = tmp.groupby(['real_hour'], as_index=False)[
'shop_score_service'] \
.agg({
'shop_score_service_hour_std_%d'%(i): 'std',
})
data = pd.merge(data, shop_score_service_hour, 'left', on=['real_hour'])
shop_score_delivery = tmp.groupby(['real_hour'], as_index=False)[
'shop_score_delivery'] \
.agg({
'shop_score_delivery_hour_std_%d' % (i): 'std',
})
data = pd.merge(data, shop_score_delivery, 'left', on=['real_hour'])
shop_score_service_hour = tmp.groupby(['real_hour'], as_index=False)[
'shop_score_description'] \
.agg({
'shop_score_description_hour_std_%d' % (i): 'std',
})
data = pd.merge(data, shop_score_service_hour, 'left', on=['real_hour'])
shop_review_positive_rate = tmp.groupby(['real_hour'], as_index=False)[
'shop_review_positive_rate'] \
.agg({
'shop_review_positive_rate_hour_std_%d' % (i): 'std',
})
data = pd.merge(data, shop_review_positive_rate, 'left', on=['real_hour'])
shop_star_level = tmp.groupby(['real_hour'], as_index=False)[
'shop_star_level'] \
.agg({
'shop_star_level_hour_std_%d' % (i): 'std',
})
data = pd.merge(data, shop_star_level, 'left', on=['real_hour'])
shop_review_num_level = tmp.groupby(['real_hour'], as_index=False)[
'shop_review_num_level'] \
.agg({
'shop_review_num_level_hour_std_%d' % (i): 'std',
})
data = pd.merge(data, shop_review_num_level, 'left', on=['real_hour'])
shop_query_day_hour = tmp.groupby(['shop_id', 'real_hour']).size().reset_index().rename(
columns={0: 'shop_query_day_hour_%d'%(i)})
data = pd.merge(data, shop_query_day_hour, 'left', on=['shop_id', 'real_hour'])
return data
def get_history_item_feat(all_data,data):
for i in [1, 2, 3]:
tmp = all_data[(all_data['real_day']<data['real_day'].min())&(all_data['real_day']>=data['real_day'].min()-i)]
item_brand_id_day = tmp.groupby(['item_city_id','real_hour']).size().reset_index().rename(
columns={0: 'item_brand_id_day_%d'%(i)})
data = pd.merge(data, item_brand_id_day, 'left', on=['item_city_id','real_hour'])
item_brand_id_hour = tmp.groupby(['item_brand_id', 'real_hour']).size().reset_index().rename(
columns={0: 'item_brand_id_hour_%d'%(i)})
data = pd.merge(data, item_brand_id_hour, 'left', on=['item_brand_id', 'real_hour'])
item_pv_level_hour = tmp.groupby(['item_pv_level', 'real_hour']).size().reset_index().rename(
columns={0: 'item_pv_level_hour_%d'%(i)})
data = pd.merge(data, item_pv_level_hour, 'left', on=['item_pv_level','real_hour'])
#
# item_pv_level_day = data.groupby(['real_day','real_hour'], as_index=False)['item_pv_level'] \
# .agg({'item_pv_level_day_mean_%d'%(i): 'mean',
# 'item_pv_level_day_median_%d'%(i): 'median',
# 'item_pv_level_day_std_%d'%(i): 'std'
# })
# data = pd.merge(data, item_pv_level_day, 'left', on=['real_day','real_hour'])
return data
print('make feat')
def make_feat(data,feat):
'''
:param data: 标签数据,当前时刻的用户特征
:param feat: 特征数据,统计的用户特征
:return: 拼接后的特征
'''
data = calc_categry_feat(data)
data = get_history_user_feat(all_data,data)
data = get_history_shop_feat(all_data,data)
data = get_history_item_feat(all_data,data)
for f in feat:
data = pd.merge(data,f,on=[f.columns[0]],how='left')
return data.fillna(0)
test_a = all_data[train.shape[0]:]
train = all_data[:train.shape[0]]
val_a = train[train['real_day']==24]
train_a = train[train['real_day']==23]
train_b = train[train['real_day']==22]
train_c = train[train['real_day']==21]
# 传入全部数据和当前标签数据
test_cov_feat = get_before_cov_radio(all_data,test_a)
val_cov_feat = get_before_cov_radio(all_data,val_a)
train_cov_feat_a = get_before_cov_radio(all_data,train_a)
train_cov_feat_b = get_before_cov_radio(all_data,train_b)
train_cov_feat_c = get_before_cov_radio(all_data,train_c)
train_a = make_feat(train_a[feat],train_cov_feat_a)
train_b = make_feat(train_b[feat],train_cov_feat_b)
train_c = make_feat(train_c[feat],train_cov_feat_c)
test_a = make_feat(test_a[feat],test_cov_feat)
val_a = make_feat(val_a[feat],val_cov_feat)
train = pd.concat([train_a,train_b])
train = pd.concat([train,train_c])
# print(train.shape)
# train = pd.concat([train,val_a])
# print(train.shape)
y_train = train.pop('is_trade')
train_index = train.pop('instance_id')
X_train = train
y_test = test_a.pop('is_trade')
test_index = test_a.pop('instance_id')
X_test = test_a
y_val = val_a.pop('is_trade')
val_index = val_a.pop('instance_id')
X_val = val_a
# print(train.head())
category_list = [
'item_id','shop_id','user_id','user_gender_id','user_age_level',
'user_occupation_id','user_star_level',
'item_brand_id', 'item_city_id', 'item_price_level',
'item_sales_level', 'item_collected_level', 'item_pv_level',
'shop_review_num_level','shop_star_level','item_category_list_1','item_category_list_2',
'item_property_list_0','item_property_list_1','item_property_list_2',
'predict_category_0','predict_category_1','predict_category_2','context_page_id'
]
def make_cat(data):
for i in category_list:
data[i] = data[i].astype('category')
return data
train_test_val = pd.concat([X_train,X_test])
train_test_val = pd.concat([train_test_val,X_val])
train_test_val = train_test_val.reset_index(drop=True)
# train_test_val = make_cat(train_test_val)
#
# X_train = train_test_val[:X_train.shape[0]]
# X_test = train_test_val[X_train.shape[0]:X_train.shape[0]+X_test.shape[0]]
# X_val = train_test_val[X_train.shape[0]+X_test.shape[0]:]
X_train = make_cat(X_train)
X_test = make_cat(X_test)
X_val = make_cat(X_val)
print(X_train.shape)
print(X_test.shape)
print(X_val.shape)
# X_test = make_cat(X_test)
# X_val = make_cat(X_val)
del X_train['hour_before']
del X_test['hour_before']
del X_val['hour_before']
del X_train['hour_after']
del X_test['hour_after']
del X_val['hour_after']
del X_train['real_day']
del X_test['real_day']
del X_val['real_day']
print(X_train.dtypes)
del X_train['context_timestamp']
del X_test['context_timestamp']
del X_val['context_timestamp']
X_train = X_train[X_train.columns]
X_test = X_test[X_train.columns]
X_val = X_val[X_train.columns]
import lightgbm as lgb
#
# 线下学习
gbm = lgb.LGBMRegressor(objective='binary',
num_leaves=32,
learning_rate=0.01,
n_estimators=2000,
colsample_bytree = 0.65,
subsample = 0.65,
seed=0
)
gbm.fit(X_train,y_train,
eval_set=[(X_val, y_val)],
eval_metric=['binary_logloss'],
early_stopping_rounds= 200)
imp = pd.DataFrame()
imp['n'] = list(X_train.columns)
imp['s'] = list(gbm.feature_importances_)
print(imp.sort_values('s',ascending=False))
print('Start predicting...')
# predict
y_pred_1 = gbm.predict(X_val, num_iteration=gbm.best_iteration)
y_tt = gbm.predict(X_train, num_iteration=gbm.best_iteration)
from sklearn.metrics import log_loss
print(log_loss(y_val,y_pred_1))
print(log_loss(y_train,y_tt))
# 线上提交
gbm_sub = lgb.LGBMRegressor(objective='binary',
num_leaves=32,
learning_rate=0.01,
n_estimators = gbm.best_iteration+1,
colsample_bytree = 0.65,
subsample = 0.65,
seed=0
)
X_train = pd.concat([X_train,X_val])
y_train = pd.concat([y_train,y_val])
X_train = make_cat(X_train)
X_train = X_train[X_train.columns]
gbm_sub.fit(X_train,y_train,
eval_set=[(X_train, y_train)],
eval_metric=['binary_logloss'])
y_sub_1 = gbm_sub.predict(X_test)
y_tt = gbm_sub.predict(X_train, num_iteration=gbm_sub.best_iteration)
from sklearn.metrics import log_loss
print(log_loss(y_train,y_tt))
sub = pd.DataFrame()
sub['instance_id'] = list(test_index)
sub['predicted_score'] = list(y_sub_1)
sub.to_csv('../result/20180409.txt',sep=" ",index=False)
import pandas as pd
import lightgbm as lgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import log_loss
from sklearn import preprocessing
import warnings
warnings.filterwarnings("ignore")
import time
def timestamp_datetime(value):
format = '%Y-%m-%d %H:%M:%S'
value = time.localtime(value)
dt = time.strftime(format, value)
return dt
def base_process(data):
lbl = preprocessing.LabelEncoder()
print(
'--------------------------------------------------------------item--------------------------------------------------------------')
data['len_item_category'] = data['item_category_list'].map(lambda x: len(str(x).split(';')))
data['len_item_property'] = data['item_property_list'].map(lambda x: len(str(x).split(';')))
for i in range(1, 3):
data['item_category_list' + str(i)] = lbl.fit_transform(data['item_category_list'].map(
lambda x: str(str(x).split(';')[i]) if len(str(x).split(';')) > i else '')) # item_category_list的第0列全部都一样
for i in range(10):
data['item_property_list' + str(i)] = lbl.fit_transform(data['item_property_list'].map(lambda x: str(str(x).split(';')[i]) if len(str(x).split(';')) > i else ''))
for col in ['item_id', 'item_brand_id', 'item_city_id']:
data[col] = lbl.fit_transform(data[col])
print(
'--------------------------------------------------------------user--------------------------------------------------------------')
for col in ['user_id']:
data[col] = lbl.fit_transform(data[col])
#print('id:',data[col])
print('user 0,1 feature')
data['gender0'] = data['user_gender_id'].apply(lambda x: 1 if x == -1 else 2)
data['age0'] = data['user_age_level'].apply(lambda x: 1 if x == 1004 | x == 1005 | x == 1006 | x == 1007 else 2)
data['occupation0'] = data['user_occupation_id'].apply(lambda x: 1 if x == -1 | x == 2003 else 2)
data['star0'] = data['user_star_level'].apply(lambda x: 1 if x == -1 | x == 3000 | x == 3001 else 2)
print(
'--------------------------------------------------------------context--------------------------------------------------------------')
data['realtime'] = data['context_timestamp'].apply(timestamp_datetime)
data['realtime'] = pd.to_datetime(data['realtime'])
data['day'] = data['realtime'].dt.day
data['hour'] = data['realtime'].dt.hour
data['len_predict_category_property'] = data['predict_category_property'].map(lambda x: len(str(x).split(';')))
for i in range(5):
data['predict_category_property' + str(i)] = lbl.fit_transform(data['predict_category_property'].map(
lambda x: str(str(x).split(';')[i]) if len(str(x).split(';')) > i else ''))
print('context 0,1 feature')
data['context_page0'] = data['context_page_id'].apply(
lambda x: 1 if x == 4001 | x == 4002 | x == 4003 | x == 4004 | x == 4007 else 2)
print(
'--------------------------------------------------------------shop--------------------------------------------------------------')
for col in ['shop_id']:
data[col] = lbl.fit_transform(data[col])
data['shop_score_delivery0'] = data['shop_score_delivery'].apply(lambda x: 0 if x <= 0.98 and x >= 0.96 else 1)
print(data.shape)
return data
def map_hour(x):
if (x>=7)&(x<=12):
return 1
elif (x>=13)&(x<=20):
return 2
else:
return 3
def deliver(x):
#x=round(x,6)
jiange=0.1
for i in range(1,20):
if (x>=4.1+jiange*(i-1))&(x<=4.1+jiange*i):
return i+1
if x==-5:
return 1
def deliver1(x):
if (x>=2)&(x<=4):
return 1
elif (x>=5)&(x<=7):
return 2
else:
return 3
def review(x):
# x=round(x,6)
jiange = 0.02
for i in range(1, 30):
if (x >= 0.714 + jiange * (i - 1)) & (x <= 0.714 + jiange * i):
return i + 1
if x == -1:
return 1
def review1(x):
# x=round(x,6)
if (x>=2)&(x<=12):
return 1
elif (x>=13)&(x<=15):
return 2
else:
return 3
def service(x):
#x=round(x,6)
jiange=0.1
for i in range(1,20):
if (x>=3.93+jiange*(i-1))&(x<=3.93+jiange*i):
return i+1
if x==-1:
return 1
def service1(x):
if (x>=2)&(x<=7):
return 1
elif (x>=8)&(x<=9):
return 2
else:
return 3
def describe(x):
#x=round(x,6)
jiange=0.1
for i in range(1,30):
if (x>=3.93+jiange*(i-1))&(x<=3.93+jiange*i):
return i+1
if x==-1:
return 1
def describe1(x):
if (x>=2)&(x<=8):
return 1
elif (x>=9)&(x<=10):
return 2
else:
return 3
def shijian(data):
data['hour_map'] = data['hour'].apply(map_hour)
return data
def shop_fenduan(data):
data['shop_score_delivery'] = data['shop_score_delivery'] * 5
data = data[data['shop_score_delivery'] != -5]
data['deliver_map'] = data['shop_score_delivery'].apply(deliver)
data['deliver_map'] = data['deliver_map'].apply(deliver1)
# del data['shop_score_delivery']
print(data.deliver_map.value_counts())
data['shop_score_service'] = data['shop_score_service'] * 5
data = data[data['shop_score_service'] != -5]
data['service_map'] = data['shop_score_service'].apply(service)
data['service_map'] = data['service_map'].apply(service1)
# del data['shop_score_service']
print(data.service_map.value_counts()) # 视为好评,中评,差评
#
data['shop_score_description'] = data['shop_score_description'] * 5
data = data[data['shop_score_description'] != -5]
data['de_map'] = data['shop_score_description'].apply(describe)
data['de_map'] = data['de_map'].apply(describe1)
# del data['shop_score_description']
print(data.de_map.value_counts())
data = data[data['shop_review_positive_rate'] != -1]
data['review_map'] = data['shop_review_positive_rate'].apply(review)
data['review_map'] = data['review_map'].apply(review1)
print(data.review_map.value_counts())
data['normal_shop'] = data.apply(
lambda x: 1 if (x.deliver_map == 3) & (x.service_map == 3) & (x.de_map == 3) & (x.review_map == 3) else 0,
axis=1)
del data['de_map']
del data['service_map']
del data['deliver_map']
del data['review_map']
return data
def slide_cnt(data):
# item_cnt = data.groupby(by='item_id').count()['instance_id'].to_dict()
# data['item_cnt'] = data['item_id'].apply(lambda x: item_cnt[x])
# user_cnt = data.groupby(by='user_id').count()['instance_id'].to_dict()
# data['user_cnt'] = data['user_id'].apply(lambda x: user_cnt[x])
# shop_cnt = data.groupby(by='shop_id').count()['instance_id'].to_dict()
# data['shop_cnt'] = data['shop_id'].apply(lambda x: shop_cnt[x])
print('当前日期前一天的cnt')
for d in range(19, 26): # 18到24号
df1 = data[data['day'] == d - 1]
df2 = data[data['day'] == d] # 19到25号
user_cnt = df1.groupby(by='user_id').count()['instance_id'].to_dict()
item_cnt = df1.groupby(by='item_id').count()['instance_id'].to_dict()
shop_cnt = df1.groupby(by='shop_id').count()['instance_id'].to_dict()
df2['user_cnt1'] = df2['user_id'].apply(lambda x: user_cnt.get(x, 0))
df2['item_cnt1'] = df2['item_id'].apply(lambda x: item_cnt.get(x, 0))
df2['shop_cnt1'] = df2['shop_id'].apply(lambda x: shop_cnt.get(x, 0))
df2 = df2[['user_cnt1', 'item_cnt1', 'shop_cnt1', 'instance_id']]
if d == 19:
Df2 = df2
else:
Df2 = pd.concat([df2, Df2])
data = pd.merge(data, Df2, on=['instance_id'], how='left')
print('当前日期之前的cnt')
for d in range(19, 26):
# 19到25,25是test
df1 = data[data['day'] < d]
df2 = data[data['day'] == d]
user_cnt = df1.groupby(by='user_id').count()['instance_id'].to_dict()
item_cnt = df1.groupby(by='item_id').count()['instance_id'].to_dict()
shop_cnt = df1.groupby(by='shop_id').count()['instance_id'].to_dict()
df2['user_cntx'] = df2['user_id'].apply(lambda x: user_cnt.get(x, 0))
df2['item_cntx'] = df2['item_id'].apply(lambda x: item_cnt.get(x, 0))
df2['shop_cntx'] = df2['shop_id'].apply(lambda x: shop_cnt.get(x, 0))
df2 = df2[['user_cntx', 'item_cntx', 'shop_cntx', 'instance_id']]
if d == 19:
Df2 = df2
else:
Df2 = pd.concat([df2, Df2])
data = pd.merge(data, Df2, on=['instance_id'], how='left')
print("前一个小时的统计量")
return data
def zuhe(data):
for col in ['user_gender_id','user_age_level','user_occupation_id','user_star_level']:
data[col] = data[col].apply(lambda x: 0 if x == -1 else x)
for col in ['item_sales_level', 'item_price_level', 'item_collected_level',
'user_gender_id','user_age_level','user_occupation_id','user_star_level',
'shop_review_num_level', 'shop_star_level']:
data[col] = data[col].astype(str)
print('item两两组合')
data['sale_price'] = data['item_sales_level'] + data['item_price_level']
data['sale_collect'] = data['item_sales_level'] + data['item_collected_level']
data['price_collect'] = data['item_price_level'] + data['item_collected_level']
print('user两两组合')
data['gender_age'] = data['user_gender_id'] + data['user_age_level']
data['gender_occ'] = data['user_gender_id'] + data['user_occupation_id']
data['gender_star'] = data['user_gender_id'] + data['user_star_level']
print('shop两两组合')
data['review_star'] = data['shop_review_num_level'] + data['shop_star_level']
for col in ['item_sales_level', 'item_price_level', 'item_collected_level', 'sale_price','sale_collect', 'price_collect',
'user_gender_id', 'user_age_level', 'user_occupation_id', 'user_star_level','gender_age','gender_occ','gender_star',
'shop_review_num_level','shop_star_level','review_star']:
data[col] = data[col].astype(int)
del data['review_star']
return data
def item(data):
print('一个item有多少brand,price salse collected level……')
itemcnt = data.groupby(['item_id'], as_index=False)['instance_id'].agg({'item_cnt': 'count'})
data = pd.merge(data, itemcnt, on=['item_id'], how='left')
for col in ['item_brand_id','item_city_id', 'item_price_level', 'item_sales_level', 'item_collected_level', 'item_pv_level']:
itemcnt = data.groupby([col, 'item_id'], as_index=False)['instance_id'].agg({str(col) + '_item_cnt': 'count'})
data = pd.merge(data, itemcnt, on=[col, 'item_id'], how='left')
data[str(col) + '_item_prob']=data[str(col) + '_item_cnt']/data['item_cnt']
del data['item_cnt']
print('一个brand有多少price salse collected level……')
itemcnt = data.groupby(['item_brand_id'], as_index=False)['instance_id'].agg({'item_brand_cnt': 'count'})
data = pd.merge(data, itemcnt, on=['item_brand_id'], how='left')
for col in ['item_city_id', 'item_price_level', 'item_sales_level', 'item_collected_level', 'item_pv_level']:
itemcnt = data.groupby([col, 'item_brand_id'], as_index=False)['instance_id'].agg({str(col) + '_brand_cnt': 'count'})
data = pd.merge(data, itemcnt, on=[col, 'item_brand_id'], how='left')
data[str(col) + '_brand_prob'] = data[str(col) + '_brand_cnt'] / data['item_brand_cnt']
del data['item_brand_cnt']
print('一个city有多少item_price_level,item_sales_level,item_collected_level,item_pv_level')
itemcnt = data.groupby(['item_city_id'], as_index=False)['instance_id'].agg({'item_city_cnt': 'count'})
data = pd.merge(data, itemcnt, on=['item_city_id'], how='left')
for col in ['item_price_level', 'item_sales_level', 'item_collected_level', 'item_pv_level']:
itemcnt = data.groupby([col, 'item_city_id'], as_index=False)['instance_id'].agg({str(col) + '_city_cnt': 'count'})
data = pd.merge(data, itemcnt, on=[col, 'item_city_id'], how='left')
data[str(col) + '_city_prob'] = data[str(col) + '_city_cnt'] / data['item_city_cnt']
del data['item_city_cnt']
print('一个price有多少item_sales_level,item_collected_level,item_pv_level')
itemcnt = data.groupby(['item_price_level'], as_index=False)['instance_id'].agg({'item_price_cnt': 'count'})
data = pd.merge(data, itemcnt, on=['item_price_level'], how='left')
for col in ['item_sales_level', 'item_collected_level', 'item_pv_level']:
itemcnt = data.groupby([col, 'item_city_id'], as_index=False)['instance_id'].agg({str(col) + '_price_cnt': 'count'})
data = pd.merge(data, itemcnt, on=[col, 'item_city_id'], how='left')
data[str(col) + '_price_prob'] = data[str(col) + '_price_cnt'] / data['item_price_cnt']
del data['item_price_cnt']
print('一个item_sales_level有多少item_collected_level,item_pv_level')
itemcnt = data.groupby(['item_sales_level'], as_index=False)['instance_id'].agg({'item_salse_cnt': 'count'})
data = pd.merge(data, itemcnt, on=['item_sales_level'], how='left')
for col in ['item_collected_level', 'item_pv_level']:
itemcnt = data.groupby([col, 'item_sales_level'], as_index=False)['instance_id'].agg({str(col) + '_salse_cnt': 'count'})
data = pd.merge(data, itemcnt, on=[col, 'item_sales_level'], how='left')
data[str(col) + '_salse_prob'] = data[str(col) + '_salse_cnt'] / data['item_salse_cnt']
del data['item_salse_cnt']
print('一个item_collected_level有多少item_pv_level')
itemcnt = data.groupby(['item_collected_level'], as_index=False)['instance_id'].agg({'item_coll_cnt': 'count'})
data = pd.merge(data, itemcnt, on=['item_collected_level'], how='left')
for col in ['item_pv_level']:
itemcnt = data.groupby([col, 'item_collected_level'], as_index=False)['instance_id'].agg({str(col) + '_coll_cnt': 'count'})
data = pd.merge(data, itemcnt, on=[col, 'item_collected_level'], how='left')
data[str(col) + '_coll_prob'] = data[str(col) + '_coll_cnt'] / data['item_coll_cnt']
del data['item_coll_cnt']
return data
def user(data):
print('用户有多少性别')
itemcnt = data.groupby(['user_id'], as_index=False)['instance_id'].agg({'user_cnt': 'count'})
data = pd.merge(data, itemcnt, on=['user_id'], how='left')
for col in ['user_gender_id','user_age_level', 'user_occupation_id', 'user_star_level']:
itemcnt = data.groupby([col, 'user_id'], as_index=False)['instance_id'].agg({str(col) + '_user_cnt': 'count'})
data = pd.merge(data, itemcnt, on=[col, 'user_id'], how='left')
data[str(col) + '_user_prob']=data[str(col) + '_user_cnt']/data['user_cnt']
del data['user_cnt']
print('性别的年龄段,职业有多少')
itemcnt = data.groupby(['user_gender_id'], as_index=False)['instance_id'].agg({'user_gender_cnt': 'count'})
data = pd.merge(data, itemcnt, on=['user_gender_id'], how='left')
for col in ['user_age_level', 'user_occupation_id', 'user_star_level']:
itemcnt = data.groupby([col, 'user_gender_id'], as_index=False)['instance_id'].agg({str(col) + '_user_gender_cnt': 'count'})
data = pd.merge(data, itemcnt, on=[col, 'user_gender_id'], how='left')
data[str(col) + '_user_gender_prob']=data[str(col) + '_user_gender_cnt']/data['user_gender_cnt']
del data['user_gender_cnt']
print('user_age_level对应的user_occupation_id,user_star_level')
itemcnt = data.groupby(['user_age_level'], as_index=False)['instance_id'].agg({'user_age_cnt': 'count'})
data = pd.merge(data, itemcnt, on=['user_age_level'], how='left')
for col in ['user_occupation_id', 'user_star_level']:
itemcnt = data.groupby([col, 'user_age_level'], as_index=False)['instance_id'].agg({str(col) + '_user_age_cnt': 'count'})
data = pd.merge(data, itemcnt, on=[col, 'user_age_level'], how='left')
data[str(col) + '_user_age_prob']=data[str(col) + '_user_age_cnt']/data['user_age_cnt']
del data['user_age_cnt']
print('user_occupation_id对应的user_star_level')
itemcnt = data.groupby(['user_occupation_id'], as_index=False)['instance_id'].agg({'user_occ_cnt': 'count'})
data = pd.merge(data, itemcnt, on=['user_occupation_id'], how='left')
for col in ['user_star_level']:
itemcnt = data.groupby([col, 'user_occupation_id'], as_index=False)['instance_id'].agg({str(col) + '_user_occ_cnt': 'count'})
data = pd.merge(data, itemcnt, on=[col, 'user_occupation_id'], how='left')
data[str(col) + '_user_occ_prob']=data[str(col) + '_user_occ_cnt']/data['user_occ_cnt']
del data['user_occ_cnt']
return data
def user_item(data):
itemcnt = data.groupby(['user_id'], as_index=False)['instance_id'].agg({'user_cnt': 'count'})
data = pd.merge(data, itemcnt, on=['user_id'], how='left')
print('一个user有多少item_id,item_brand_id……')
for col in ['item_id',
'item_brand_id','item_city_id','item_price_level',
'item_sales_level','item_collected_level','item_pv_level']:
item_shop_cnt = data.groupby([col, 'user_id'], as_index=False)['instance_id'].agg({str(col)+'_user_cnt': 'count'})
data = pd.merge(data, item_shop_cnt, on=[col, 'user_id'], how='left')
data[str(col) + '_user_prob'] = data[str(col) + '_user_cnt'] / data['user_cnt']
print('一个user_gender有多少item_id,item_brand_id……')
itemcnt = data.groupby(['user_gender_id'], as_index=False)['instance_id'].agg({'user_gender_cnt': 'count'})
data = pd.merge(data, itemcnt, on=['user_gender_id'], how='left')
for col in ['item_id',
'item_brand_id','item_city_id','item_price_level',
'item_sales_level','item_collected_level','item_pv_level']:
item_shop_cnt = data.groupby([col, 'user_gender_id'], as_index=False)['instance_id'].agg({str(col)+'_user_gender_cnt': 'count'})
data = pd.merge(data, item_shop_cnt, on=[col, 'user_gender_id'], how='left')
data[str(col) + '_user_gender_prob'] = data[str(col) + '_user_gender_cnt'] / data['user_gender_cnt']
print('一个user_age_level有多少item_id,item_brand_id……')
itemcnt = data.groupby(['user_age_level'], as_index=False)['instance_id'].agg({'user_age_cnt': 'count'})
data = pd.merge(data, itemcnt, on=['user_age_level'], how='left')
for col in ['item_id',
'item_brand_id','item_city_id','item_price_level',
'item_sales_level','item_collected_level','item_pv_level']:
item_shop_cnt = data.groupby([col, 'user_age_level'], as_index=False)['instance_id'].agg({str(col)+'_user_age_cnt': 'count'})
data = pd.merge(data, item_shop_cnt, on=[col, 'user_age_level'], how='left')
data[str(col) + '_user_age_prob'] = data[str(col) + '_user_age_cnt'] / data['user_age_cnt']
print('一个user_occupation_id有多少item_id,item_brand_id…')
itemcnt = data.groupby(['user_occupation_id'], as_index=False)['instance_id'].agg({'user_occ_cnt': 'count'})
data = pd.merge(data, itemcnt, on=['user_occupation_id'], how='left')
for col in ['item_id',
'item_brand_id','item_city_id','item_price_level',
'item_sales_level','item_collected_level','item_pv_level']:
item_shop_cnt = data.groupby([col, 'user_occupation_id'], as_index=False)['instance_id'].agg({str(col)+'_user_occ_cnt': 'count'})
data = pd.merge(data, item_shop_cnt, on=[col, 'user_occupation_id'], how='left')
data[str(col) + '_user_occ_prob'] = data[str(col) + '_user_occ_cnt'] / data['user_occ_cnt']
return data
def user_shop(data):
print('一个user有多少shop_id,shop_review_num_level……')
for col in ['shop_id', 'shop_review_num_level', 'shop_star_level']:
item_shop_cnt = data.groupby([col, 'user_id'], as_index=False)['instance_id'].agg(
{str(col) + '_user_cnt': 'count'})
data = pd.merge(data, item_shop_cnt, on=[col, 'user_id'], how='left')
data[str(col) + '_user_prob'] = data[str(col) + '_user_cnt'] / data['user_cnt']
del data['user_cnt']
print('一个user_gender有多少shop_id,shop_review_num_level……')
for col in ['shop_id', 'shop_review_num_level', 'shop_star_level']:
item_shop_cnt = data.groupby([col, 'user_gender_id'], as_index=False)['instance_id'].agg(
{str(col) + '_user_gender_cnt': 'count'})
data = pd.merge(data, item_shop_cnt, on=[col, 'user_gender_id'], how='left')
data[str(col) + '_user_gender_prob'] = data[str(col) + '_user_gender_cnt'] / data['user_gender_cnt']
del data['user_gender_cnt']
print('一个user_age_level有多少shop_id,shop_review_num_level……')
for col in ['shop_id', 'shop_review_num_level', 'shop_star_level']:
item_shop_cnt = data.groupby([col, 'user_age_level'], as_index=False)['instance_id'].agg(
{str(col) + '_user_age_cnt': 'count'})
data = pd.merge(data, item_shop_cnt, on=[col, 'user_age_level'], how='left')
data[str(col) + '_user_age_prob'] = data[str(col) + '_user_age_cnt'] / data['user_age_cnt']
del data['user_age_cnt']
print('一个user_occupation_id有多少shop_id,shop_review_num_level……')
for col in ['shop_id', 'shop_review_num_level', 'shop_star_level']:
item_shop_cnt = data.groupby([col, 'user_occupation_id'], as_index=False)['instance_id'].agg(
{str(col) + '_user_occ_cnt': 'count'})
data = pd.merge(data, item_shop_cnt, on=[col, 'user_occupation_id'], how='left')
data[str(col) + '_user_occ_prob'] = data[str(col) + '_user_occ_cnt'] / data['user_occ_cnt']
del data['user_occ_cnt']
return data
def shop_item(data):
print('一个shop有多少item_id,item_brand_id,item_city_id,item_price_level……')
itemcnt = data.groupby(['shop_id'], as_index=False)['instance_id'].agg({'shop_cnt': 'count'})
data = pd.merge(data, itemcnt, on=['shop_id'], how='left')
for col in ['item_id',
'item_brand_id','item_city_id','item_price_level',
'item_sales_level','item_collected_level','item_pv_level']:
item_shop_cnt = data.groupby([col, 'shop_id'], as_index=False)['instance_id'].agg({str(col)+'_shop_cnt': 'count'})
data = pd.merge(data, item_shop_cnt, on=[col, 'shop_id'], how='left')
data[str(col) + '_shop_prob'] = data[str(col) + '_shop_cnt'] / data['shop_cnt']
del data['shop_cnt']
print('一个shop_review_num_level有多少item_id,item_brand_id,item_city_id,item_price_level……')
itemcnt = data.groupby(['shop_review_num_level'], as_index=False)['instance_id'].agg({'shop_rev_cnt': 'count'})
data = pd.merge(data, itemcnt, on=['shop_review_num_level'], how='left')
for col in ['item_id',
'item_brand_id','item_city_id','item_price_level',
'item_sales_level','item_collected_level','item_pv_level']:
item_shop_cnt = data.groupby([col, 'shop_review_num_level'], as_index=False)['instance_id'].agg({str(col)+'_shop_rev_cnt': 'count'})
data = pd.merge(data, item_shop_cnt, on=[col, 'shop_review_num_level'], how='left')
data[str(col) + '_shop_rev_prob'] = data[str(col) + '_shop_rev_cnt'] / data['shop_rev_cnt']
del data['shop_rev_cnt']
# print('一个shop_star_level有多少item_id,item_brand_id,item_city_id,item_price_level……')
# itemcnt = data.groupby(['shop_star_level'], as_index=False)['instance_id'].agg({'shop_star_cnt': 'count'})
# data = pd.merge(data, itemcnt, on=['shop_star_level'], how='left')
# for col in ['item_id',
# 'item_brand_id', 'item_city_id', 'item_price_level',
# 'item_sales_level', 'item_collected_level', 'item_pv_level']:
# item_shop_cnt = data.groupby([col, 'shop_star_level'], as_index=False)['instance_id'].agg({str(col) + '_shop_star_cnt': 'count'})
# data = pd.merge(data, item_shop_cnt, on=[col, 'shop_star_level'], how='left')
# data[str(col) + '_shop_star_prob'] = data[str(col) + '_shop_star_cnt'] / data['shop_star_cnt']
# del data['shop_star_cnt']
return data
def lgbCV(train, test):
col = [c for c in train if
c not in ['is_trade', 'item_category_list', 'item_property_list', 'predict_category_property', 'instance_id',
'context_id', 'realtime', 'context_timestamp']]
# cat = ['sale_price', 'gender_star', 'user_age_level', 'item_price_level', 'item_sales_level', 'sale_collect',
# 'price_collect', 'item_brand_id', 'user_star_level', 'item_id', 'shop_id',
# 'item_city_id', 'context_page_id', 'gender_age', 'shop_star_level', 'item_pv_level', 'user_occupation_id',
# 'day', 'gender_occ', 'user_gender_id']
X = train[col]
y = train['is_trade'].values
X_tes = test[col]
y_tes = test['is_trade'].values
print('Training LGBM model...')
lgb0 = lgb.LGBMClassifier(
objective='binary',
# metric='binary_error',
num_leaves=35,
max_depth=8,
learning_rate=0.05,
seed=2018,
colsample_bytree=0.8,
# min_child_samples=8,
subsample=0.9,
n_estimators=20000)
lgb_model = lgb0.fit(X, y, eval_set=[(X_tes, y_tes)], early_stopping_rounds=200)
best_iter = lgb_model.best_iteration_
predictors = [i for i in X.columns]
feat_imp = pd.Series(lgb_model.feature_importances_, predictors).sort_values(ascending=False)
print(feat_imp)
print(feat_imp.shape)
# pred= lgb_model.predict(test[col])
pred = lgb_model.predict_proba(test[col])[:, 1]
test['pred'] = pred
test['index'] = range(len(test))
# print(test[['is_trade','pred']])
print('误差 ', log_loss(test['is_trade'], test['pred']))
return best_iter
def sub(train, test, best_iter):
col = [c for c in train if
c not in ['is_trade', 'item_category_list', 'item_property_list', 'predict_category_property', 'instance_id',
'context_id', 'realtime', 'context_timestamp']]
X = train[col]
y = train['is_trade'].values
print('Training LGBM model...')
lgb0 = lgb.LGBMClassifier(
objective='binary',
# metric='binary_error',
num_leaves=35,
max_depth=8,
learning_rate=0.05,
seed=2018,
colsample_bytree=0.8,
# min_child_samples=8,
subsample=0.9,
n_estimators=best_iter)
lgb_model = lgb0.fit(X, y)
predictors = [i for i in X.columns]
feat_imp = pd.Series(lgb_model.feature_importances_, predictors).sort_values(ascending=False)
print(feat_imp)
print(feat_imp.shape)
# pred= lgb_model.predict(test[col])
pred = lgb_model.predict_proba(test[col])[:, 1]
test['predicted_score'] = pred
sub1 = test[['instance_id', 'predicted_score']]
sub=pd.read_csv("input/test.txt", sep="\s+")
sub=pd.merge(sub,sub1,on=['instance_id'],how='left')
sub=sub.fillna(0)
#sub[['instance_id', 'predicted_score']].to_csv('result/result0320.csv',index=None,sep=' ')
sub[['instance_id', 'predicted_score']].to_csv('result/result0326.txt',sep=" ",index=False)
if __name__ == "__main__":
train = pd.read_csv("input/train.txt", sep="\s+")
test = pd.read_csv("input/test.txt", sep="\s+")
data = pd.concat([train, test])
data = data.drop_duplicates(subset='instance_id') # 把instance id去重
print('make feature')
data = base_process(data)
data=shijian(data)
data=shop_fenduan(data)
data = slide_cnt(data)
data = zuhe(data)
print('----------------------------全局统计特征---------------------------------------------------')
data = item(data)
data = user(data)
data = user_item(data)
data = user_shop(data)
data=shop_item(data)
print(data.head(0))
"----------------------------------------------------线下----------------------------------------"
train= data[(data['day'] >= 18) & (data['day'] <= 23)]
test= data[(data['day'] == 24)]
best_iter = lgbCV(train, test)
"----------------------------------------------------线上----------------------------------------"
train = data[data.is_trade.notnull()]
test = data[data.is_trade.isnull()]
sub(train, test, best_iter)