This PR relies on C++ implementation fromhttps://github.com/rapidsai/cuml/pull/5987
Adds Python, and benchmarking code for Kernel PCA. This implementation of Kernel PCA support fit(), transform(), and fit_transform().
The API deviates from SKlearn by not supporting options for these fields: fit_inverse_transform, random_state, n_jobs, max_iter. If a user tries to set one of them a NotImplementedError will be raised.
The Criteria of Done mentions making the class pickable in cuml/tests/test_pickle.py. I couldn't find a PCA reference for this. Would appreciate pointers if additional work is needed.
We see an even greater speedup when we set n_components = n_samples. Setting n_components to n_samples is the same as default behavior, except zero eigenvalues aren't removed.
Manual tests
Kernel PCA with RBF kernel
code
from sklearn.decomposition import PCA as skPCA, KernelPCA as skKernelPCA
from sklearn import datasets
from cuml.decomposition import PCA as cuPCA
from cuml.experimental.decomposition import KernelPCA as cuKernelPCA
import numpy as np
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
iris = load_iris()
X = iris.data
y = iris.target
sk_pca = skPCA(n_components=3)
X_sk_pca = sk_pca.fit_transform(X)
cu_pca = cuPCA(n_components=3)
X_cu_pca = cu_pca.fit_transform(X)
sk_kpca = skKernelPCA(n_components=3, kernel='rbf')
X_sk_kpca = sk_kpca.fit_transform(X)
cu_kpca = cuKernelPCA(n_components=3, kernel='rbf')
X_cu_kpca = cu_kpca.fit_transform(X)
# Plot the results
fig = plt.figure(figsize=(24, 12))
ax1 = fig.add_subplot(231, projection='3d')
for target in np.unique(y):
ax1.scatter(X[y == target, 0], X[y == target, 1], X[y == target, 2], label=iris.target_names[target])
ax1.set_title('Original Data (First Three Features)')
ax1.set_xlabel('Feature 1')
ax1.set_ylabel('Feature 2')
ax1.set_zlabel('Feature 3')
ax1.legend()
ax2 = fig.add_subplot(232, projection='3d')
for target in np.unique(y):
ax2.scatter(X_sk_pca[y == target, 0], X_sk_pca[y == target, 1], X_sk_pca[y == target, 2], label=iris.target_names[target])
ax2.set_title('SK PCA (3 Components)')
ax2.set_xlabel('Principal Component 1')
ax2.set_ylabel('Principal Component 2')
ax2.set_zlabel('Principal Component 3')
ax2.legend()
ax3 = fig.add_subplot(233, projection='3d')
for target in np.unique(y):
ax3.scatter(X_cu_pca[y == target, 0], X_cu_pca[y == target, 1], X_cu_pca[y == target, 2], label=iris.target_names[target])
ax3.set_title('cuML PCA (3 Components)')
ax3.set_xlabel('Principal Component 1')
ax3.set_ylabel('Principal Component 2')
ax3.set_zlabel('Principal Component 3')
ax3.legend()
ax4 = fig.add_subplot(234, projection='3d')
for target in np.unique(y):
ax4.scatter(X_sk_kpca[y == target, 0], X_sk_kpca[y == target, 1], X_sk_kpca[y == target, 2], label=iris.target_names[target])
ax4.set_title('SK KernelPCA with RBF Kernel (3 Components)')
ax4.set_xlabel('Principal Component 1')
ax4.set_ylabel('Principal Component 2')
ax4.set_zlabel('Principal Component 3')
ax4.legend()
ax5 = fig.add_subplot(235, projection='3d')
for target in np.unique(y):
ax5.scatter(X_cu_kpca[y == target, 0], X_cu_kpca[y == target, 1], X_cu_kpca[y == target, 2], label=iris.target_names[target])
ax5.set_title('cuML KernelPCA with RBF Kernel (3 Components)')
ax5.set_xlabel('Principal Component 1')
ax5.set_ylabel('Principal Component 2')
ax5.set_zlabel('Principal Component 3')
ax5.legend()
plt.show()
Kernel PCA with poly kernel
code
from sklearn.datasets import make_classification
def plot_3d_projection(X, y, title, elev=30, azim=30):
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, projection='3d')
for target in np.unique(y):
ax.scatter(X[y == target, 0], X[y == target, 1], X[y == target, 2], label=str(target))
ax.set_title(title)
ax.set_xlabel('Component 1')
ax.set_ylabel('Component 2')
ax.set_zlabel('Component 3')
ax.legend()
ax.view_init(elev=elev, azim=azim) # Set viewpoint
plt.show()
X, y = make_classification(n_features=3, n_informative=3, n_redundant=0, n_clusters_per_class=1, n_classes=3)
plot_3d_projection(X, y, 'Original Data', elev=30, azim=60)
poly_kpca = cuKernelPCA(n_components=3, kernel='poly', degree=5, gamma=2, coef0=2)
X_poly_kpca = linear_kpca.fit_transform(X_linear)
plot_3d_projection(X_poly_kpca, y, 'cuML KernelPCA with Poly Kernel', elev=30, azim=120)
Projecting testing data
Case is copied from sklearn, except it uses cuML PCA and kernelPCA
code
from cuml.decomposition import PCA as cuPCA
from cuml.experimental.decomposition import KernelPCA as cuKernelPCA
from sklearn.datasets import make_circles
from sklearn.model_selection import train_test_split
X, y = make_circles(n_samples=1_000, factor=0.3, noise=0.05, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0)
pca = cuPCA(n_components=2)
kernel_pca = cuKernelPCA(
n_components=None, kernel="rbf", gamma=10, alpha=0.1
)
X_test_pca = pca.fit(X_train).transform(X_test)
X_test_kernel_pca = kernel_pca.fit(X_train).transform(X_test)
fig, (orig_data_ax, pca_proj_ax, kernel_pca_proj_ax) = plt.subplots(
ncols=3, figsize=(14, 4)
)
orig_data_ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test)
orig_data_ax.set_ylabel("Feature #1")
orig_data_ax.set_xlabel("Feature #0")
orig_data_ax.set_title("Testing data")
pca_proj_ax.scatter(X_test_pca[:, 0], X_test_pca[:, 1], c=y_test)
pca_proj_ax.set_ylabel("Principal component #1")
pca_proj_ax.set_xlabel("Principal component #0")
pca_proj_ax.set_title("Projection of testing data\n using PCA")
kernel_pca_proj_ax.scatter(X_test_kernel_pca[:, 0], X_test_kernel_pca[:, 1], c=y_test)
kernel_pca_proj_ax.set_ylabel("Principal component #1")
kernel_pca_proj_ax.set_xlabel("Principal component #0")
_ = kernel_pca_proj_ax.set_title("Projection of testing data\n using KernelPCA")
[x] Python class is as "near drop-in replacement" for Scikit-learn (or relevant industry standard) API as possible. This means parameters have the same names as Scikit-learn, and where differences exist, they are clearly documented in docstrings.
[x] Initial PR with the API design if there are going to be significant differences with reference APIs, or lack of a reference API, to have a discussion about it.
[ ] Python class is pickleable and a test has been added to cuml/tests/test_pickle.py
[x] APIs use input_to_cuml_array to accept flexible inputs and check their datatypes and use cumlArray.to_output() to return configurable outputs.
[x] Any internal parameters or array-based instance variables use CumlArray
Testing
[x] Pytests for wrapper functionality against Scikit-learn using relevant datasets
[x] Stress tests against reasonable inputs (e.g short-wide, tall-narrow, different numerical precision)
[x] Pytests for pickle capability
[x] Pytests to evaluate correctness against Scikit-learn on a variety of datasets
[x] Add algorithm to benchmarks package in python/cuml/benchmarks/algorithms.py and benchmarks notebook in python/cuml/notebooks/tools/cuml_benchmarks.ipynb
[x] PyTests that run in the "unit"-level marker should be quick to execute and should, in general, not significantly increase end-to-end test execution.
Description
This PR relies on C++ implementation from https://github.com/rapidsai/cuml/pull/5987 Adds Python, and benchmarking code for Kernel PCA. This implementation of Kernel PCA support fit(), transform(), and fit_transform().
Feature request: https://github.com/rapidsai/cuml/issues/1317
Tests and benchmarks were performed on an EC2
g4dn.xlarge
instance with CUDA 12.2.Click here to see environment details
Notes for Reviewers
The API deviates from SKlearn by not supporting options for these fields: fit_inverse_transform, random_state, n_jobs, max_iter. If a user tries to set one of them a
NotImplementedError
will be raised. The Criteria of Done mentions making the class pickable incuml/tests/test_pickle.py
. I couldn't find a PCA reference for this. Would appreciate pointers if additional work is needed.Benchmarks
From
notebooks/tools/cuml_benchmarks.ipynb
Benchmark output
We see an even greater speedup when we set n_components = n_samples. Setting n_components to n_samples is the same as default behavior, except zero eigenvalues aren't removed.
Manual tests
Kernel PCA with RBF kernel
code
Kernel PCA with poly kernel
code
Projecting testing data
Case is copied from sklearn, except it uses cuML PCA and kernelPCA
code
Definition of Done Criteria Checklist
Python Checklist
Design
cuml/tests/test_pickle.py
input_to_cuml_array
to accept flexible inputs and check their datatypes and usecumlArray.to_output()
to return configurable outputs.CumlArray
Testing
python/cuml/benchmarks/algorithms.py
and benchmarks notebook inpython/cuml/notebooks/tools/cuml_benchmarks.ipynb
Unit test results
Python Test Results