Skip to content

Instantly share code, notes, and snippets.

@arturaugusto
Last active August 29, 2015 14:01
Show Gist options
  • Select an option

  • Save arturaugusto/e302c88c17fbc789748b to your computer and use it in GitHub Desktop.

Select an option

Save arturaugusto/e302c88c17fbc789748b to your computer and use it in GitHub Desktop.
formatToResolution = function(toFormat, model) {
toFormat <- as.numeric(toFormat);
modelResolution <- numberResolution(model);
digits <- modelResolution;
formatedNumber <- sprintf(paste("%.", toString(digits), "f", sep=""), round(toFormat, digits));
return(formatedNumber);
}
# Solve code fragment
solve_snippet = function(code_snippet, var_to_return = "u", code_inject_before = ""){
code_snippet <- paste(code_inject_before, ";", code_snippet, "; return(" ,var_to_return, ")" );
eval(parse(text=gsub("[;,\r]","\n",code_snippet)));
}
# Get uncertanty from model or reclassification, depending of what exists
# function to supress NULL from lapply
compact = function(x) Filter(Negate(is.null), x)
get_typeB_unc = function(object, code){
# Get uncertnty information
get_unc = function(u, var_name){
u <- list(
var_name = var_name,
meas = "-",
name = paste(var_name, " (", u$name , ")", sep = ""),
value = solve_snippet(u$`function`, "u", code),
distribution = as.numeric(u$distribution)
)
return(u)
}
# Get uncertanty if use on evaluation or always evaluate is set
always_or_use_now = function(u, var_name){
if(u$use_on_evaluation || u$always_evaluate){
return(get_unc(u, var_name))
}else{
return(NULL)
}
}
# Check if we need to get reclassification stuff or model
choose_unc_source = function(v){
# If reclassification list exists, by default we get this data instead of model unc
if(is.list(v$measurement_range$reclassification_range_u)){
var_u <- lapply(v$measurement_range$reclassification_range_u,
function(u){always_or_use_now(u, v$name)}
)
}else{
# It var is UUT, get only if use_on_evaluation or always_evaluate is checked
if(v$is_uut){
var_u <- lapply(v$measurement_range$uncertainties,
function(u){always_or_use_now(u, v$name)}
)
}
# Remove if return NULL
a.list <- var_u[!sapply(var_u, is.null)]
}
}
# call function for each item on the list
lapply(object$variables, choose_unc_source)
}
# Get the prefix of a given measurement range
get_var_prefix = function(mrange){
# get prefix value
if(is.null(mrange$prefix_id)){
prefix <- 1;
}else{
prefix <- as.numeric(mrange$prefix_id);
}
return(prefix)
}
# Get type A uncertanties
get_typeA_unc = function(object){
lapply(object$variables,
function(v){
prefix <- get_var_prefix(v$measurement_range)
meas_mean <- mean(as.numeric(unlist(v$measurements)))*prefix
meas_sd <- sd(as.numeric(v$measurements))*prefix
u <- meas_sd/sqrt(object$procedure$n)
list(
var_name = v$name,
meas = meas_mean,
name = paste(v$name, "(meas)"),
value = u,
distribution = 1
)
})
}
# Process user input data necessary to be used by other functions
process_input_data = function(object){
# Get correction function
get_correction = function(v, code_inject_before){
# Append corrections lists
corrections = c(v$measurement_range$corrections, v$measurement_range$reclassification_range_c)
compact(lapply(corrections,
function(c){
# Solve correction function snippet, sending the c as the returining var
solve_snippet(c$`function`, "c", code_inject_before)
}))
}
# Create evaluable code that set some variables,
# so user can acess the value from code snippets
# properties -> code
properties_code <- lapply(object$properties, function(p){paste(p$name, "<-", p$value)});
# measurements average -> code
build_variables_code = function(v){
prefix <- get_var_prefix(v$measurement_range)
# get measurement mean at the base unit, without the prefix
meas_mean <- mean(as.numeric(unlist(v$measurements)))*prefix;
# get avaliable corrections for current var
# sending a snippet to define the mean value, as at model context we cannot know the name of var
corrections <- get_correction(v, paste("pval <-", meas_mean))
lapply(as.numeric(unlist(corrections)),
function(correction){
meas_mean + correction
})
var_code <- paste(v$name, "<-", meas_mean);
# if variavle is the uut variable, set adicional variable pval (point value)
if(v$name == "VI"){
var_code <- paste("pval <-", var_code);
}
return(var_code);
}
variables_code <- lapply(object$variables, build_variables_code);
# Join lists and create a single String
result_code <- paste(c(variables_code, properties_code), collapse = ";");
# return the code adding the end semicolum
return(paste(result_code, ";", sep = ""));
}
solve_uncertanties = function(object, typeA, typeB, code){
n <- object$procedure$n
x <- lapply(typeA, function(v){v$value})
# Get only the right side hand of the function
expr <- parse(text=object$procedure$expression)[[1]][3]
# Create list of sensitivity coefficients
sensitivity_coefficients <- lapply(typeA,
function(v){
# Evaluate the first differetial equation on expression
eval(parse(text=code))
eval(D(parse(text=expr), v$var_name))
})
calculate_contribution = function(u){
((u$value/u$distribution)*get(u$var_name, sensitivity_coefficients))^2
}
typeA_contribution <- lapply(typeA, calculate_contribution)
# Type B comes with one level nested, becouse this type can have multiple uncertanties
# while Type A by default have only one
typeB_contribution <- lapply(typeB,
function(u){
lapply(u, calculate_contribution)
})
# Unlist all and sum the contributions
combined_uncertanty <- sqrt(sum(unlist(typeA_contribution), unlist(typeB_contribution)))
# Welch–Satterthwaite denominator
w_den <- sum((unlist(x)^4)/(n-1))
# effective degrees of freedom
veff <- (combined_uncertanty^4)/w_den
k <- qt(0.97725,df=veff)
U <- combined_uncertanty*k
# Formata veff
veff <- round(veff)
if(is.nan(veff) || veff > 9999){
veff <- "∞"
}
# Function to convert lists formats to the flatten format required by datatables
to_datatables_format = function(u, contributions){
# Fatten the lists
flat_contributions <- unlist(contributions, use.names = FALSE)
flat_u <- unlist(u, use.names = FALSE)
# Get lists sizes
n <- length(flat_contributions)
# This is the lenght that delimit each uncertanty component
component_size <- length(flat_u)/n
# Split in equal size arrays
component_list <- split(flat_u, ceiling(seq_along(flat_u)/component_size))
# the seq gives the index to get the correct contribution and component
lapply(seq(1,n),
function(i){
return( c(component_list[[i]], flat_contributions[i]) )
})
}
# Format data to fit datatables
typeA_datatables <- to_datatables_format(typeA, typeA_contribution)
typeB_datatables <- to_datatables_format(typeB, typeB_contribution)
list(aaData = c(typeA_datatables,typeB_datatables), aoColumns = '[{"sTitle": "Descrição"},{"sTitle": "Estimativa"},{"sTitle": "Divisor"},{"sTitle": "Incerteza padrao"},{"sTitle": "Coef."},{"sTitle": "ci*u(xi)"},{"sTitle": "ui(y)^2"}]')
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment