Skip to content
Snippets Groups Projects
Commit a2898047 authored by ye87zine's avatar ye87zine
Browse files

initial commit

parents
Branches
No related tags found
No related merge requests found
source("renv/activate.R")
# R specific
.Rproj.user/
.Rhistory
.RData
.Ruserdata
# renv
renv/library/
renv/staging/
renv/cache/
# Data files
r_objects/
R/performance_analysis_files
R/performance_analysis.html
\ No newline at end of file
# General packages
library(dplyr)
library(tidyr)
library(ggplot2)
library(furrr)
# Geo packages
library(terra)
library(CoordinateCleaner)
library(sf)
# DB packages
library(Symobio)
library(DBI)
# Modeling packages
library(caret)
library(pROC)
library(cito)
source("R/utils.R")
con = db_connect()
sf::sf_use_s2(use_s2 = FALSE)
# ---------------------------------------------------------------------------#
# Prepare Geodata ####
# ---------------------------------------------------------------------------#
raster_info = tbl(con, "datasets") %>%
dplyr::filter(stringr::str_detect(name, "CHELSA")) %>%
collect()
raster_filepaths = list.files(raster_info$raw_data_path, pattern = ".tif$", full.names = T) %>%
stringr::str_sort(numeric = T)
sa_polygon = rnaturalearth::ne_countries() %>%
dplyr::filter(continent == "South America") %>%
sf::st_union()
# ---------------------------------------------------------------------------#
# Prepare Occurrence Data ####
# ---------------------------------------------------------------------------#
load("r_objects/test_species.RData")
test_species = unique(test_species$name_matched)
occs = tbl(con, "species_occurrences") %>%
dplyr::filter(species %in% test_species) %>%
dplyr::select(-year) %>%
dplyr::distinct() %>%
collect() %>%
sf::st_as_sf(coords = c("longitude", "latitude"), remove = F, crs = sf::st_crs(4326)) %>%
sf::st_filter(sa_polygon)
occs_flagged = occs %>%
dplyr::distinct(species, coordinate_id, longitude, latitude) %>%
group_by(species) %>%
group_split() %>%
purrr::map( # Loop over species individually due to bug in CoordinateCleaner
CoordinateCleaner::clean_coordinates,
lon = "longitude",
lat = "latitude",
tests = c("centroids", "gbif", "institutions", "outliers"),
outliers_method = "quantile",
verbose = F
) %>%
bind_rows() %>%
dplyr::filter(.summary == T) %>%
dplyr::select(species, coordinate_id, longitude, latitude)
env_vars = tbl(con, "raster_extracts_num") %>%
dplyr::filter(
coordinate_id %in% occs$coordinate_id,
metric == "mean"
) %>%
arrange(raster_layer_id) %>%
tidyr::pivot_wider(id_cols = coordinate_id, names_from = raster_layer_id, names_prefix = "layer_", values_from = value) %>%
collect()
occs_final = occs %>%
inner_join(occs_flagged, by = c("species", "coordinate_id", "longitude", "latitude")) %>%
inner_join(env_vars, by = "coordinate_id") %>%
dplyr::select(-coordinate_id)
save(occs_final, file = "r_objects/occs_final.RData")
# ---------------------------------------------------------------------------#
# Main loop ####
# ---------------------------------------------------------------------------#
occs_split = split(occs_final, occs_final$species)
future::plan("multisession", workers = 16)
model_results = furrr::future_map(occs_split, .options = furrr::furrr_options(seed = 123), .f = function(occs_spec){
# Initial check
if(nrow(occs_spec) < 10){
return(NULL)
}
species = occs_spec$species[1]
# ------------------------------- #
# Define model/sampling region ####
# ------------------------------- #
sample_region = tryCatch({
sf::sf_use_s2(use_s2 = FALSE)
occs_bbox = occs_spec %>%
sf::st_bbox() %>%
expand_bbox(0.25) %>%
sf::st_as_sfc() %>%
st_set_crs(st_crs(occs_spec))
sample_region = suppressMessages(
st_as_sf(st_intersection(occs_bbox, sa_polygon))
)
}, error = function(e){
glue::glue("Skipping species {species}:\n{e}")
})
if(is.character(sample_region)){ # There was an error
return(sample_region)
}
# ------------------------------- #
# Create pseudo absence ####
# ------------------------------- #
sample_points = st_as_sf(st_sample(sample_region, nrow(occs_spec))) # TODO: Vary sample size?
abs_spec = terra::rast(raster_filepaths) %>%
setNames(paste0("layer_", 1:19)) %>%
terra::extract(sample_points) %>%
dplyr::select(-ID) %>%
dplyr::mutate(
presence = "absent",
geometry = sample_points$x
) %>%
tibble()
# ------------------------------- #
# Create modeling dataset ####
# ------------------------------- #
model_data = occs_spec %>%
dplyr::mutate(presence = "present") %>%
bind_rows(abs_spec) %>%
dplyr::mutate(presence = as.factor(presence))
# Define cross-validation folds
spatial_folds = blockCV::cv_spatial(
model_data,
column = "presence",
k = 5,
progress = F, plot = F, report = F
)
model_data$fold = spatial_folds$folds_ids
model_data$geometry = NULL
# Split into train and test datasets
train_index = createDataPartition(model_data$presence, p = 0.7, list = FALSE)
train_data = model_data[train_index, ]
test_data = model_data[-train_index, ]
# Define predictor columns
predictors = paste0("layer_", 1:19)
# ------------------------------- #
# Train models ####
# ------------------------------- #
# Preparation
na_performance = list(
AUC = NA,
Accuracy = NA,
Kappa = NA,
Precision = NA,
Recall = NA,
F1 = NA
)
# Feature selection
# --> Very time consuming!
# feature_selection = rfe(
# x = dplyr::select(model_data, contains("layer")),
# y = model_data$presence,
# rfeControl = rfeControl(functions = caretFuncs,
# method = "cv",
# number = 3, # Number of folds
# verbose = T)
# )
index_train = lapply(unique(sort(train_data$fold)), function(x){
return(which(train_data$fold != x))
})
train_ctrl = trainControl(
search = "grid",
classProbs = TRUE,
index = index_train,
summaryFunction = twoClassSummary,
savePredictions = "final"
)
# Random Forest
rf_performance = tryCatch({
rf_grid = expand.grid(
mtry = c(3,7,11,15,19) # Number of randomly selected predictors
)
rf_fit = caret::train(
x = train_data[, predictors],
y = train_data$presence,
method = "rf",
metric = "ROC",
tuneGrid = rf_grid,
trControl = train_ctrl
)
evaluate_model(rf_fit, test_data)
}, error = function(e){
na_performance
})
# Gradient Boosted Machine
gbm_performance = tryCatch({
gbm_grid <- expand.grid(
n.trees = c(100, 500, 1000, 1500), # Higher number of boosting iterations
interaction.depth = c(3, 5, 7), # Maximum depth of each tree
shrinkage = c(0.01, 0.005, 0.001), # Lower learning rates
n.minobsinnode = c(10, 20) # Minimum number of observations in nodes
)
gbm_fit = train(
x = train_data[, predictors],
y = train_data$presence,
method = "gbm",
metric = "ROC",
verbose = F,
tuneGrid = gbm_grid,
trControl = train_ctrl
)
evaluate_model(gbm_fit, test_data)
}, error = function(e){
na_performance
})
# Generalized additive Model
glm_performance = tryCatch({
glm_fit = train(
x = train_data[, predictors],
y = train_data$presence,
method = "glm",
family=binomial,
metric = "ROC",
preProcess = c("center", "scale"),
trControl = train_ctrl
)
evaluate_model(glm_fit, test_data)
}, error = function(e){
na_performance
})
# Summarize results
performance_summary = tibble(
species = species,
obs = nrow(occs_spec),
model = c("RF", "GBM", "GLM"),
auc = c(rf_performance$AUC, gbm_performance$AUC, glm_performance$AUC),
accuracy = c(rf_performance$Accuracy, gbm_performance$Accuracy, glm_performance$Accuracy),
kappa = c(rf_performance$Kappa, gbm_performance$Kappa, glm_performance$Kappa),
precision = c(rf_performance$Precision, gbm_performance$Precision, glm_performance$Precision),
recall = c(rf_performance$Recall, gbm_performance$Recall, glm_performance$Recall),
f1 = c(rf_performance$F1, gbm_performance$F1, glm_performance$F1)
)
return(performance_summary)
})
save(model_results, file = "r_objects/model_results.RData")
---
title: "sSDM Performance analysis"
format: html
editor: visual
---
```{r init, echo = FALSE, include = FALSE}
library(tidyverse)
library(Symobio)
library(sf)
library(plotly)
library(DT)
load("../r_objects/model_results.RData")
load("../r_objects/test_species.RData")
load("../r_objects/range_maps.RData")
load("../r_objects/range_maps_gridded.RData")
load("../r_objects/occs_final.RData")
sf::sf_use_s2(use_s2 = FALSE)
```
## Summary
This document summarizes the performance of three SDM algorithms (Random Forest, Gradient Boosting Machine, Generalized Linear Model) for 96 South American terrestrial mammal species. We use six metrics (AUC, F1, kappa, accuracy, precision, and recall) to evaluate model performance and look at how performance varies with five factors (number of records, range size, range occupancy, spatial dispersion, and functional group).
Modeling decisions:
- Randomly sampled pseudo-absences from expanded area of extent of occurrence records (×1.25)
- Balanced presences and absences for each species
- Predictors: all 19 CHELSA bioclim variables
- Spatial block cross-validation
- 70/30 Split of training vs. test data
- Grid search hyperparameter tuning for RF and GBM
Key findings:
- RF and GBM models generally performed better than GLM across metrics
- More occurrence records and larger range sizes tended to improve model accuracy
- Higher range occupancy correlated with better performance.
- Spatial dispersion and functional group showed some impact but were less consistent
## Analysis
The table below shows the analysed modeling results.
```{r performance, echo = FALSE, message=FALSE, warnings=FALSE}
performance = model_results %>%
purrr::keep(inherits, 'data.frame') %>%
bind_rows() %>%
pivot_longer(c(auc, accuracy, kappa, precision, recall, f1), names_to = "metric") %>%
dplyr::filter(!is.na(value)) %>%
dplyr::mutate(
metric = factor(metric, levels = c("auc", "kappa", "f1", "accuracy", "precision", "recall")),
value = pmax(value, 0, na.rm = T) # Fix one weird instance of f1 < 0
)
DT::datatable(performance)
```
### Number of records
- Model performance was generally better for species with more observations
- No major improvements in model performance beyond \~500 observations
```{r number_of_records, echo = FALSE, message=FALSE, warnings=FALSE}
df_plot = performance
# Create base plot
plot <- plot_ly(
data = df_plot,
x = ~obs,
y = ~value,
color = ~model,
type = 'scatter',
mode = 'markers',
name = ~model,
hoverinfo = 'text',
text = ~paste("Species:", species, "<br>Obervations:", obs, "<br>Value:", round(value, 3)),
transforms = list(
list(
type = 'filter',
target = ~metric,
operation = '=',
value = 'auc' # default value
)
)
)
# Add dropdown for selecting metric
plot <- plot %>%
layout(
title = "Model Performance vs. Number of observations",
xaxis = list(title = "Number of observations"),
yaxis = list(title = "Value"),
legend = list(x = 1.1, y = 0.5), # Move legend to the right of the plot
margin = list(r = 150), # Add right margin to accommodate legend
hovermode = 'closest',
updatemenus = list(
list(
type = "dropdown",
active = 0,
buttons = list(
list(method = "restyle", args = list("transforms[0].value", "auc"), label = "AUC"),
list(method = "restyle", args = list("transforms[0].value", "kappa"), label = "Kappa"),
list(method = "restyle", args = list("transforms[0].value", "f1"), label = "F1 score"),
list(method = "restyle", args = list("transforms[0].value", "accuracy"), label = "Accuracy"),
list(method = "restyle", args = list("transforms[0].value", "precision"), label = "Precision"),
list(method = "restyle", args = list("transforms[0].value", "recall"), label = "Recall")
)
)
)
)
bslib::card(plot, full_screen = T)
```
### Range characteristics
The spatial coverage of species occurrences was evaluated using a systematic hexagonal grid system, with each cell spanning 1 degree in both latitudinal and longitudinal dimensions. Two complementary metrics were used to assess range coverage: First, sampling completeness was quantified by determining the number of grid cells containing at least one occurrence record (*occupancy*). Second, we calculated the theoretical maximum number of occupied hexagons possible given the total number of observations (*spatial dispersion*).
#### Range size
- Model performance was lower for small ranged species
- No major improvements in model performance beyond range size of \~3M km²
```{r range_size, echo = FALSE, message=FALSE, warnings=FALSE}
df_join = range_maps %>%
dplyr::mutate(range_size = as.numeric(st_area(range_maps) / 1000000)) %>% # range in sqkm
sf::st_drop_geometry()
df_plot = performance %>%
inner_join(df_join, by = c("species" = "name_matched"))
# Create base plot
plot <- plot_ly(
data = df_plot,
x = ~range_size,
y = ~value,
color = ~model,
type = 'scatter',
mode = 'markers',
name = ~model,
hoverinfo = 'text',
text = ~paste("Species:", species, "<br>Range size:", round(range_size, -3), "<br>Value:", round(value, 3)),
transforms = list(
list(
type = 'filter',
target = ~metric,
operation = '=',
value = 'auc' # default value
)
)
)
# Add dropdown for selecting metric
plot <- plot %>%
layout(
title = "Model Performance vs. Range size",
xaxis = list(title = "Range size [sqkm]"),
yaxis = list(title = "Value"),
legend = list(x = 1.1, y = 0.5), # Move legend to the right of the plot
margin = list(r = 150), # Add right margin to accommodate legend
hovermode = 'closest',
updatemenus = list(
list(
type = "dropdown",
active = 0,
buttons = list(
list(method = "restyle", args = list("transforms[0].value", "auc"), label = "AUC"),
list(method = "restyle", args = list("transforms[0].value", "kappa"), label = "Kappa"),
list(method = "restyle", args = list("transforms[0].value", "f1"), label = "F1 score"),
list(method = "restyle", args = list("transforms[0].value", "accuracy"), label = "Accuracy"),
list(method = "restyle", args = list("transforms[0].value", "precision"), label = "Precision"),
list(method = "restyle", args = list("transforms[0].value", "recall"), label = "Recall")
)
)
)
)
bslib::card(plot, full_screen = T)
```
#### Occupancy
- Models for species with higher range occupancy showed slightly better performance
```{r range_occupancy, echo = FALSE, message=FALSE, warnings=FALSE}
df_cells_total = range_maps_gridded %>%
dplyr::rename("species" = name_matched) %>%
group_by(species) %>%
summarise(cells_total = n()) %>%
st_drop_geometry()
df_cells_occ <- range_maps_gridded %>%
st_join(occs_final, join = st_intersects) %>%
filter(name_matched == species) %>% # Filter only intersections of the same species
group_by(species) %>%
summarise(cells_occupied = n_distinct(geometry)) %>%
st_drop_geometry()
df_join = df_cells_total %>%
dplyr::inner_join(df_cells_occ, by = "species") %>%
dplyr::mutate(occupancy = cells_occupied / cells_total) %>%
dplyr::select(species, occupancy)
df_plot = performance %>%
inner_join(df_join, by = "species")
# Create base plot
plot <- plot_ly(
data = df_plot,
x = ~occupancy,
y = ~value,
color = ~model,
type = 'scatter',
mode = 'markers',
name = ~model,
hoverinfo = 'text',
text = ~paste("Species:", species, "<br>Range occupancy:", round(occupancy, 3), "<br>Value:", round(value, 3)),
transforms = list(
list(
type = 'filter',
target = ~metric,
operation = '=',
value = 'auc' # default value
)
)
)
# Add dropdown for selecting metric
plot <- plot %>%
layout(
title = "Model Performance vs. Range occupancy",
xaxis = list(title = "Range occupancy"),
yaxis = list(title = "Value"),
legend = list(x = 1.1, y = 0.5), # Move legend to the right of the plot
margin = list(r = 150), # Add right margin to accommodate legend
hovermode = 'closest',
updatemenus = list(
list(
type = "dropdown",
active = 0,
buttons = list(
list(method = "restyle", args = list("transforms[0].value", "auc"), label = "AUC"),
list(method = "restyle", args = list("transforms[0].value", "kappa"), label = "Kappa"),
list(method = "restyle", args = list("transforms[0].value", "f1"), label = "F1 score"),
list(method = "restyle", args = list("transforms[0].value", "accuracy"), label = "Accuracy"),
list(method = "restyle", args = list("transforms[0].value", "precision"), label = "Precision"),
list(method = "restyle", args = list("transforms[0].value", "recall"), label = "Recall")
)
)
)
)
bslib::card(plot, full_screen = T)
```
#### Spatial dispersion
- Models for species with higher spatial dispersion tended to exhibit lower performance
- Large spread, weak relationship
```{r spatial_dispersion, echo = FALSE, message=FALSE, warnings=FALSE}
df_occs_total = occs_final %>%
st_drop_geometry() %>%
group_by(species) %>%
summarise(occs_total = n())
df_join = df_occs_total %>%
dplyr::inner_join(df_cells_total, by = "species") %>%
dplyr::inner_join(df_cells_occ, by = "species") %>%
dplyr::mutate(dispersion = cells_occupied / pmin(cells_total, occs_total))
df_plot = performance %>%
inner_join(df_join, by = "species")
# Create base plot
plot <- plot_ly(
data = df_plot,
x = ~dispersion,
y = ~value,
color = ~model,
type = 'scatter',
mode = 'markers',
name = ~model,
hoverinfo = 'text',
text = ~paste("Species:", species, "<br>Dispersion:", round(dispersion, 3), "<br>Value:", round(value, 3)),
transforms = list(
list(
type = 'filter',
target = ~metric,
operation = '=',
value = 'auc' # default value
)
)
)
# Add dropdown for selecting metric
plot <- plot %>%
layout(
title = "Model Performance vs. Dispersion",
xaxis = list(title = "Dispersion"),
yaxis = list(title = "Value"),
legend = list(x = 1.1, y = 0.5), # Move legend to the right of the plot
margin = list(r = 150), # Add right margin to accommodate legend
hovermode = 'closest',
updatemenus = list(
list(
type = "dropdown",
active = 0,
buttons = list(
list(method = "restyle", args = list("transforms[0].value", "auc"), label = "AUC"),
list(method = "restyle", args = list("transforms[0].value", "kappa"), label = "Kappa"),
list(method = "restyle", args = list("transforms[0].value", "f1"), label = "F1 score"),
list(method = "restyle", args = list("transforms[0].value", "accuracy"), label = "Accuracy"),
list(method = "restyle", args = list("transforms[0].value", "precision"), label = "Precision"),
list(method = "restyle", args = list("transforms[0].value", "recall"), label = "Recall")
)
)
)
)
bslib::card(plot, full_screen = T)
```
### Functional group
- No major difference in model performance among functional groups
```{r functional_groups, echo = FALSE, message=FALSE, warnings=FALSE}
df_join = test_species %>%
dplyr::distinct(name_matched, functional_group) %>%
dplyr::mutate(functional_group = factor(functional_group, labels = c("arboreal", "large ground-dwelling", "small ground-dwelling")))
df_plot = performance %>%
dplyr::left_join(df_join, by = c("species" = "name_matched"))
plot <- plot_ly(
data = df_plot,
x = ~functional_group,
y = ~value,
color = ~model,
type = 'box',
boxpoints = "all",
jitter = 1,
pointpos = 0,
hoverinfo = 'text',
text = ~paste("Species:", species, "<br>Functional group:", functional_group, "<br>Value:", round(value, 3)),
transforms = list(
list(
type = 'filter',
target = ~metric,
operation = '=',
value = 'auc' # default value
)
)
)
plot <- plot %>%
layout(
title = "Model Performance vs. Functional Group",
xaxis = list(title = "Range occupancy"),
yaxis = list(title = "Value"),
legend = list(x = 1.1, y = 0.5), # Move legend to the right of the plot
margin = list(r = 150), # Add right margin to accommodate legend
hovermode = 'closest',
boxmode = "group",
updatemenus = list(
list(
type = "dropdown",
active = 0,
buttons = list(
list(method = "restyle", args = list("transforms[0].value", "auc"), label = "AUC"),
list(method = "restyle", args = list("transforms[0].value", "kappa"), label = "Kappa"),
list(method = "restyle", args = list("transforms[0].value", "f1"), label = "F1 score"),
list(method = "restyle", args = list("transforms[0].value", "accuracy"), label = "Accuracy"),
list(method = "restyle", args = list("transforms[0].value", "precision"), label = "Precision"),
list(method = "restyle", args = list("transforms[0].value", "recall"), label = "Recall")
)
)
)
)
bslib::card(plot, full_screen = T)
```
library(tidyverse)
library(Symobio)
library(sf)
library(rnaturalearth)
load("r_objects/test_species.RData")
sf::sf_use_s2(use_s2 = FALSE)
# Load range maps
range_maps = st_read(
"~/share/groups/mas_data/Saved_Data_Dropbox_Business/Datensätze/Range Maps/IUCN_range_maps_mammals_version2016/TERRESTRIAL_MAMMALS.shp",
geometry_column = "geometry",
promote_to_multi = T
) %>%
dplyr::filter(!legend %in% c("Extinct", "Not Mapped"))
# Load South America polygon
sa_polygon = rnaturalearth::ne_countries() %>%
dplyr::filter(continent == "South America") %>%
sf::st_union()
# Match names against GBIF backbone
maps_names_matched = lapply(unique(range_maps$binomial), function(name){
tryCatch({
match_result = Symobio::gbif_match_name(name = name)
if(match_result$status != "ACCEPTED"){
match_result = gbif_match_name(usageKey = match_result$acceptedUsageKey)
}
name_matched = if("species" %in% names(match_result)) match_result$species else NA
data.frame(name_orig = name, name_matched = name_matched)
}, error = function(e){
return(NULL)
})
})
save(range_maps_names_matched, file = "r_objects/range_maps_names_matched.RData")
# Subset range maps to target species and focal region
names_subset = Filter(is.data.frame, range_maps_names_matched) %>%
bind_rows() %>%
dplyr::filter(name_matched %in% test_species$name_matched)
range_maps = range_maps %>%
inner_join(names_subset, by = c("binomial" = "name_orig")) %>%
group_by(name_matched) %>%
summarize(geometry = suppressMessages(st_union(geometry))) %>%
st_intersection(sa_polygon)
save(range_maps, file = "r_objects/range_maps.RData")
# Gridded range maps
range_maps_gridded = st_make_grid(sa_polygon, square = FALSE, cellsize = 1) %>%
st_sf() %>%
st_join(range_maps, st_intersects, left = F)
save(range_maps_gridded, file = "r_objects/range_maps_gridded.RData")
expand_bbox <- function(bbox, expand_factor = 0.1) {
# Get current bbox dimensions
x_range <- bbox["xmax"] - bbox["xmin"]
y_range <- bbox["ymax"] - bbox["ymin"]
# Expand the limits, adjusting both directions correctly
bbox["xmin"] <- max(bbox["xmin"] - (expand_factor * x_range), -180)
bbox["xmax"] <- min(bbox["xmax"] + (expand_factor * x_range), 180)
bbox["ymin"] <- max(bbox["ymin"] - (expand_factor * y_range), -90)
bbox["ymax"] <- min(bbox["ymax"] + (expand_factor * y_range), 90)
return(bbox)
}
evaluate_model <- function(model, test_data, threshold = 0.5) {
# Accuracy: The proportion of correctly predicted instances (both true positives and true negatives) out of the total instances.
# Formula: Accuracy = (TP + TN) / (TP + TN + FP + FN)
# Precision: The proportion of true positives out of all instances predicted as positive.
# Formula: Precision = TP / (TP + FP)
# Recall (Sensitivity): The proportion of true positives out of all actual positive instances.
# Formula: Recall = TP / (TP + FN)
# F1 Score: The harmonic mean of Precision and Recall, balancing the two metrics.
# Formula: F1 = 2 * (Precision * Recall) / (Precision + Recall)
# Predict probabilities
probs <- predict(model, test_data, type = "prob")$present
preds <- predict(model, test_data, type = "raw")
actual <- test_data$presence
# Calculate AUC
auc <- pROC::roc(actual, probs, levels = c("present", "absent"), direction = ">")$auc
# Calculate confusion matrix
cm <- caret::confusionMatrix(preds, actual, positive = "present")
# Return metrics
return(list(
AUC = auc,
Accuracy = cm$overall["Accuracy"],
Kappa = cm$overall["Kappa"],
Precision = cm$byClass["Precision"],
Recall = cm$byClass["Recall"],
F1 = cm$byClass["F1"]
))
}
Version: 1.0
RestoreWorkspace: Default
SaveWorkspace: Default
AlwaysSaveHistory: Default
EnableCodeIndexing: Yes
UseSpacesForTab: Yes
NumSpacesForTab: 2
Encoding: UTF-8
RnwWeave: Sweave
LaTeX: pdfLaTeX
This diff is collapsed.
library/
local/
cellar/
lock/
python/
sandbox/
staging/
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment