@InProceedings{dziedzic19a, title = {Band-limited Training and Inference for Convolutional Neural Networks}, author = {Dziedzic, Adam and Paparrizos, John and Krishnan, Sanjay and Elmore, Aaron and Franklin, Michael}, booktitle = {Proceedings of the 36th International Conference on Machine Learning}, pages = {1745--1754}, year = {2019}, editor = {Chaudhuri, Kamalika and Salakhutdinov, Ruslan}, volume = {97}, series = {Proceedings of Machine Learning Research}, address = {Long Beach, California, USA}, month = {09--15 Jun}, publisher = {PMLR}, pdf = {http://proceedings.mlr.press/v97/dziedzic19a/dziedzic19a.pdf}, url = {http://proceedings.mlr.press/v97/dziedzic19a.html}, abstract = {The convolutional layers are core building blocks of neural network architectures. In general, a convolutional filter applies to the entire frequency spectrum of the input data. We explore artificially constraining the frequency spectra of these filters and data, called band-limiting, during training. The frequency domain constraints apply to both the feed-forward and back-propagation steps. Experimentally, we observe that Convolutional Neural Networks (CNNs) are resilient to this compression scheme and results suggest that CNNs learn to leverage lower-frequency components. In particular, we found: (1) band-limited training can effectively control the resource usage (GPU and memory); (2) models trained with band-limited layers retain high prediction accuracy; and (3) requires no modification to existing training algorithms or neural network architectures to use unlike other compression schemes.} }