Skip to content

Instantly share code, notes, and snippets.

View mirceamironenco's full-sized avatar

Mircea Mironenco mirceamironenco

  • Amsterdam, Netherlands
View GitHub Profile
include(FetchContent)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS ON)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
set(MACOSX_FOUND TRUE)
endif()
from __future__ import annotations
import dataclasses
import pathlib
import sys
import warnings
from collections import deque
from collections.abc import Sequence
from typing import (
Annotated,
@mirceamironenco
mirceamironenco / spin-up-gcloud-vm.txt
Last active February 12, 2018 12:04 — forked from jamsawamsa/spin-up-gcloud-vm.txt
spin-up-gcloud-vm
gcloud compute instances create mircea \
--min-cpu-platform "Intel Broadwell" \
--machine-type n1-standard-1 --zone europe-west1-b \
--boot-disk-size 500GB --boot-disk-type=pd-ssd\
--accelerator type=nvidia-tesla-k80,count=1 \
--image-family ubuntu-1604-lts --image-project ubuntu-os-cloud \
--maintenance-policy TERMINATE --restart-on-failure \
--metadata startup-script='#!/bin/bash
echo "Checking for CUDA and installing."
# Check for CUDA and try to install.

Keybase proof

I hereby claim:

  • I am mirceamironenco on github.
  • I am mirceamironenco (https://keybase.io/mirceamironenco) on keybase.
  • I have a public key whose fingerprint is AE6D E27B 8985 EA4B 1082 A35B EDFF DD1F 075B 2BAB

To claim this, I am signing this object:

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import time
import numpy as np
import tensorflow as tf
class Layer(object):
def __init__(self, scope="dense_layer"):
self.scope = scope
def __call__(self, x, **kwargs):
with tf.name_scope(self.scope):
return self.output(x, **kwargs)
def output(self, x, **kwargs):
raise NotImplementedError()
def loss(self, mc_logits, y_true):
"""
mc_logits is batch_size x num_k_samples x num_classes
y_true is batch_size x num_classes
self.k_mc is the number of samples used.
self.alpha is the alpha-divergence parameter
(0-VI,0.5-Hellinger,1.0-EP)
"""
mc_log_softmax = mc_logits - tf.reduce_max(mc_logits, axis=2,
keep_dims=True)