Skip to content

Instantly share code, notes, and snippets.

@vkkhare
vkkhare / train.kt
Created April 1, 2020 07:55
pytorch testing module
val script = Module.load("torchscript.pt")
val batchSize = IValue.from(
Tensor.fromBlob(longArrayOf(1), longArrayOf(1, 1))
)
val lr = IValue.from(
Tensor.fromBlob(floatArrayOf(0.01f), longArrayOf(1, 1))
)
val w1 = IValue.from(
Tensor.fromBlob(
FloatArray(392 * 784) { Random.nextFloat() / sqrt(784F) },
@vkkhare
vkkhare / MainActivity.kt
Last active March 4, 2020 10:42
testing pytorch mobile for training on android
package org.openmined.KotlinSyft
import android.os.Bundle
import com.google.android.material.snackbar.Snackbar
import androidx.appcompat.app.AppCompatActivity
import android.view.Menu
import android.view.MenuItem
import kotlinx.android.synthetic.main.activity_main.*
@vkkhare
vkkhare / pytorch.py
Last active November 24, 2018 15:28
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
def train(model, resnet,device, train_loader, optimizer_res, optimizer_att, epoch,losslist,loss,lmb):
for param in model.parameters():
param.requires_grad = False
for param in resnet.parameters():
param.requires_grad = True
model.eval()
resnet.train()
loss_l = torch.zeros(1,dtype=torch.float32).to(device)
b_idx = 0
for x in train_loader:
@vkkhare
vkkhare / INSTALL.md
Created June 26, 2018 07:43 — forked from arya-oss/INSTALL.md
Ubuntu 16.04 Developer Tools installation

Ubuntu 16.04 Developer Tools Installation

First things first !

sudo apt update
sudo apt upgrade

Standard Developer Tools

sudo apt-get install build-essential git
@vkkhare
vkkhare / mips.s
Last active February 13, 2018 14:46
.data
a: .word
b: .word
c: .word
ar: .space 8
.text
main:
@vkkhare
vkkhare / python3.5
Last active January 17, 2018 15:14
import numpy as np
import random
import sklearn
from sklearn.datasets.samples_generator import make_regression
import pylab
from scipy import stats
def gradient_descent(alpha, x, y, ep, max_iter):
converged = False
iter = 0