Skip to content

Instantly share code, notes, and snippets.

@wielandbrendel
wielandbrendel / brendel_bethge.py
Created October 27, 2019 19:40
BrendelBethge attacks
import logging
from foolbox.utils import onehot_like
from foolbox.attacks.base import Attack
from foolbox.attacks.base import generator_call_decorator
import numpy as np
from numba import jitclass
import line_profiler
from numba import njit
@njit
def square(x, y):
return x * y
@njit
def closure(y):
return lambda x: square(x, y)
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
@wielandbrendel
wielandbrendel / vgg19.py
Created March 7, 2016 14:47
Loading of VGG-19
def get_vgg19(file='vgg19.pkl', leak=0, only_mean=False):
if leak == 0:
rectify = lasagne.nonlinearities.rectify
else:
rectify = LeakyRectify(leak)
def build_model():
net = {}
net['input'] = InputLayer((None, 3, 224, 224))
@wielandbrendel
wielandbrendel / rec_network_sim.py
Created September 30, 2015 12:09
Recurrent network simulation in Cython
cimport cython
cimport numpy as np
from libc.math cimport log, exp, sqrt, sin
import numpy as np
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
def c_analytic_simulate(np.ndarray[double, ndim=2] A, np.ndarray[double, ndim=2] T, np.ndarray[double, ndim=2] Of,
@wielandbrendel
wielandbrendel / recurrent_simulation.jl
Last active September 30, 2015 11:33
Updated simulation code (no devec + inplace BLAS operations)
blas_set_num_threads(1)
function train_network(A, T, Of, cs, dt)
I, N = size(T)
z = zeros(I)
r = zeros(N)
Az = Array(Float64, I)
Ofr = Array(Float64, N)
I_teach = Array(Float64, N)
Tz = Array(Float64, N)
@wielandbrendel
wielandbrendel / gist:ccf1ff6f8f92139439be
Last active September 3, 2015 13:12
Small CNN trained on CIFAR 10 using Keras
import os
os.environ['THEANO_FLAGS'] = 'device=gpu0, assert_no_cpu_op=raise, on_unused_input=ignore, floatX=float32'
from __future__ import absolute_import
from __future__ import print_function
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
@wielandbrendel
wielandbrendel / gist:8505669e9a54b0446ce5
Created February 16, 2015 16:14
Using kernel_stretch from random.hpp from cudarray leaves input array unchanged
#include <stdio.h>
#include "random.hpp"
#define N 10
int main( void )
{
float a[N];
float alpha = 2;
float beta = 3;
@wielandbrendel
wielandbrendel / gist:e999c85f834dc2bd2cec
Created February 16, 2015 16:09
Simple Cuda kernel to multiply array with a constant (plus offset)
#include <stdio.h>
#define N 10
#define kNumBlockThreads 512
// copied from random.hpp
#define CUDA_GRID_STRIDE_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < n; \
i += blockDim.x * gridDim.x)