Skip to content

Instantly share code, notes, and snippets.

@abhi1868sharma
Last active July 26, 2020 18:32
Show Gist options
  • Select an option

  • Save abhi1868sharma/673f7a1115294c4aaef5a187084838a8 to your computer and use it in GitHub Desktop.

Select an option

Save abhi1868sharma/673f7a1115294c4aaef5a187084838a8 to your computer and use it in GitHub Desktop.
import lightgbm as lgb
import numpy as np
import random
from collections import Counter
def lgb_data(training_rows=1000000,dim=100):
"""
Create training/validation data for lightghbm
Convert into lightgbm format
"""
X_train,y_train = np.random.rand(training_rows,dim), [random.randint(0,1) for _ in range(training_rows)]
X_test,y_test = np.random.rand(50000,dim), [random.randint(0,1) for _ in range(50000)]
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
return lgb_train,lgb_eval
def lgb_training():
"""
defining lightgbm parameters
calling lgb_data for training/validation data
training lightgbm model
"""
lgb_params = {'boosting_type': 'gbdt', 'objective': 'regression',
'metric': {'l2', 'l1'}, 'num_leaves': 31,'learning_rate': 0.05, 'feature_fraction': 0.9,
'bagging_fraction': 0.8,'bagging_freq': 5,'verbose': 0}
lgb_train,lgb_eval = lgb_data()
rfr = lgb.train(lgb_params,lgb_train,num_boost_round=100,valid_sets=lgb_eval,early_stopping_rounds=50)
return rfr
rfr = lgb_training()
# change the path according to your local system
rfr.save_model(f"/Users/num/gitrepos/blogs/medium/gpu_based_inference/models/lgb_model.txt")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment