Skip to content

Instantly share code, notes, and snippets.

View tinhb92's full-sized avatar
⛹️‍♂️
Moment to moment

HOANG Bao Tin tinhb92

⛹️‍♂️
Moment to moment
View GitHub Profile
@tinhb92
tinhb92 / Run external Python script in Golang
Created September 3, 2022 16:12 — forked from olegpolukhin/Run external Python script in Golang
run external Python script in Golang
package main
import (
"bufio"
"fmt"
"io"
"os/exec"
)
func main() {
sudo add-apt-repository -y ppa:apt-fast/stable
sudo add-apt-repository -y ppa:graphics-drivers/ppa
sudo apt-get update
sudo apt-get -y install apt-fast
# prompts
sudo apt-fast -y upgrade
sudo apt-fast install -y ubuntu-drivers-common libvorbis-dev libflac-dev libsndfile-dev cmake build-essential libgflags-dev libgoogle-glog-dev libgtest-dev google-mock zlib1g-dev libeigen3-dev libboost-all-dev libasound2-dev libogg-dev libtool libfftw3-dev libbz2-dev liblzma-dev libgoogle-glog0v5 gcc-6 gfortran-6 g++-6 doxygen graphviz libsox-fmt-all parallel exuberant-ctags vim-nox python-powerline python3-pip
sudo apt-fast install -y tigervnc-standalone-server firefox lsyncd mesa-common-dev ack
@tinhb92
tinhb92 / train_search_learner.py
Created April 17, 2019 16:58
train search learner
learn = Learner(dat, DartsRnnSearch(),
opt_func = torch.optim.SGD,
callback_fns = [
HidInit,
partial(ArchParamUpdate, search_dat=search_dat,
arch_lr=arch_lr, arch_wdecay=arch_wdecay, wdecay=wdecay),
Regu,
PrintGenotype,
partial(GradientClipping, clip=clip),
partial(CSVLogger, filename = csv_name, append = True),
def train():
assert args.batch_size % args.small_batch_size == 0, 'batch_size must be divisible by small_batch_size'
# Turn on training mode which enables dropout.
total_loss = 0
start_time = time.time()
ntokens = len(corpus.dictionary)
hidden = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)]
hidden_valid = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)]
batch, i = 0, 0
def one_batch(self, i, xb, yb):
try:
self.iter = i
self.xb,self.yb = xb,yb; self('begin_batch')
self.pred = self.model(self.xb); self('after_pred')
self.loss = self.loss_func(self.pred, self.yb); self('after_loss')
if not self.in_train: return
self.loss.backward(); self('after_backward')
self.opt.step(); self('after_step')
self.opt.zero_grad()