Orchestrates hyperparameter tuning for multiple kernels simultaneously.
tune_random_machines(
data,
time_col = "t",
delta_col = "delta",
kernel_mix,
param_grids,
cv = 5L,
cores = parallel::detectCores(),
verbose = 0L,
...
)Training data frame.
Time column name.
Event column name.
A named list of base configurations.
A named list of grids.
Number of folds (default 5).
Number of parallel cores (default: parallel::detectCores()).
Verbosity level (0 or 1).
Additional fixed parameters.
An object of class "random_machines_tune".
if (FALSE) { # \dontrun{
if (reticulate::py_module_available("sksurv") && requireNamespace("mirai")) {
library(FastSurvivalSVM)
set.seed(99)
# --- 1. Prepare Data ---
df <- data_generation(n = 300, prop_cen = 0.25)
# =========================================================================
# 2. Define Custom Kernel Functions
# =========================================================================
# Wavelet Kernel (Custom 1)
my_wavelet <- function(x, z, A) {
u <- (as.numeric(x) - as.numeric(z)) / A
prod(cos(1.75 * u) * exp(-0.5 * u^2))
}
# Polynomial Kernel (Custom 2)
my_poly <- function(x, z, degree, coef0) {
val <- sum(as.numeric(x) * as.numeric(z)) + coef0
val ^ degree
}
# =========================================================================
# 3. Define Kernel Structure (Regression Mode: rank_ratio = 0)
# =========================================================================
kernel_mix <- list(
linear_std = list(kernel = "linear", rank_ratio = 0.0),
rbf_std = list(kernel = "rbf", rank_ratio = 0.0),
wavelet_ok = list(rank_ratio = 0.0),
poly_ok = list(rank_ratio = 0.0)
)
# =========================================================================
# 4. Define Grids (4 Kernels x 4 Values per parameter)
# =========================================================================
param_grids <- list(
# 1. Linear (Native): 4 alpha values
linear_std = list(
alpha = c(0.01, 0.1, 1.0, 10.0)
),
# 2. RBF (Native): 4 alpha values x 4 gamma values
rbf_std = list(
alpha = c(0.01, 0.1, 1.0, 10.0),
gamma = c(0.001, 0.01, 0.1, 1.0)
),
# 3. Wavelet (Custom): 4 variants (A) x 4 alphas
wavelet_ok = list(
kernel = grid_kernel(my_wavelet, A = c(0.5, 1.0, 1.5, 2.0)),
alpha = c(0.01, 0.1, 1.0, 10.0)
),
# 4. Polynomial (Custom): 4 variants (degree) x 4 alphas
# Note: We fix coef0=1 to reduce grid explosion, but degree varies 4 times
poly_ok = list(
kernel = grid_kernel(my_poly, degree = c(1, 2, 3, 4), coef0 = 1),
alpha = c(0.01, 0.1, 1.0, 10.0)
)
)
# =========================================================================
# 5. Run Tuning (Using ALL cores)
# =========================================================================
tune_res <- tune_random_machines(
data = df,
time_col = "tempo",
delta_col = "cens",
kernel_mix = kernel_mix,
param_grids = param_grids,
cv = 3,
cores = parallel::detectCores(),
verbose = 1
)
print(tune_res)
# 6. Bridge to Training
final_kernels <- as_kernels(tune_res, kernel_mix)
}
} # }