@article{2fe8befcec414d10a9ece271f6d3a78c,
title = "High-Performance Kernel Machines With Implicit Distributed Optimization and Randomization",
abstract = "We propose a framework for massive-scale training of kernel-based statistical models, based on combining distributed convex optimization with randomization techniques. Our approach is based on a block-splitting variant of the alternating directions method of multipliers, carefully reconfigured to handle very large random feature matrices under memory constraints, while exploiting hybrid parallelism typically found in modern clusters of multicore machines. Our high-performance implementation supports a variety of statistical learning tasks by enabling several loss functions, regularization schemes, kernels, and layers of randomized approximations for both dense and sparse datasets, in an extensible framework. We evaluate our implementation on large-scale model construction tasks and provide a comparison against existing sequential and parallel libraries. Supplementary materials for this article are available online.",
keywords = "Big-data, Kernel methods, Scalability, Statistical computations",
author = "Haim Avron and Vikas Sindhwani",
note = "Publisher Copyright: {\textcopyright} 2016 American Statistical Association and the American Society for Quality.",
year = "2016",
month = jul,
day = "2",
doi = "10.1080/00401706.2015.1111261",
language = "אנגלית",
volume = "58",
pages = "341--349",
journal = "Technometrics",
issn = "0040-1706",
publisher = "Taylor and Francis Ltd.",
number = "3",
}