diff --git a/notebooks/03.2-Regression-Forests.ipynb b/notebooks/03.2-Regression-Forests.ipynb index 43b302d..258ef79 100644 --- a/notebooks/03.2-Regression-Forests.ipynb +++ b/notebooks/03.2-Regression-Forests.ipynb @@ -409,6 +409,13 @@ "\n", "Repeat this classification task with ``sklearn.ensemble.RandomForestClassifier``. How does the ``max_depth``, ``max_features``, and ``n_estimators`` affect the results?" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/notebooks/04.1-Dimensionality-PCA.ipynb b/notebooks/04.1-Dimensionality-PCA.ipynb index f07da6e..dc7a46f 100644 --- a/notebooks/04.1-Dimensionality-PCA.ipynb +++ b/notebooks/04.1-Dimensionality-PCA.ipynb @@ -183,7 +183,7 @@ "outputs": [], "source": [ "plt.scatter(Xproj[:, 0], Xproj[:, 1], c=y, edgecolor='none', alpha=0.5,\n", - " cmap=plt.cm.get_cmap('nipy_spectral', 10))\n", + " cmap=plt.colormaps.get_cmap('nipy_spectral'))\n", "plt.colorbar();" ] }, @@ -380,6 +380,13 @@ " \n", "Each of these has its own strengths & weaknesses, and areas of application. You can read about them on the [scikit-learn website](http://sklearn.org)." ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/notebooks/04.2-Clustering-KMeans.ipynb b/notebooks/04.2-Clustering-KMeans.ipynb index a1ef398..e540c99 100644 --- a/notebooks/04.2-Clustering-KMeans.ipynb +++ b/notebooks/04.2-Clustering-KMeans.ipynb @@ -81,7 +81,7 @@ "outputs": [], "source": [ "from sklearn.cluster import KMeans\n", - "est = KMeans(4) # 4 clusters\n", + "est = KMeans(4, n_init='auto') # 4 clusters\n", "est.fit(X)\n", "y_kmeans = est.predict(X)\n", "plt.scatter(X[:, 0], X[:, 1], c=y_kmeans, s=50, cmap='rainbow');" @@ -164,7 +164,7 @@ "metadata": {}, "outputs": [], "source": [ - "est = KMeans(n_clusters=10)\n", + "est = KMeans(n_clusters=10, n_init='auto')\n", "clusters = est.fit_predict(digits.data)\n", "est.cluster_centers_.shape" ] @@ -208,7 +208,7 @@ "labels = np.zeros_like(clusters)\n", "for i in range(10):\n", " mask = (clusters == i)\n", - " labels[mask] = mode(digits.target[mask])[0]" + " labels[mask] = mode(digits.target[mask], keepdims=False)[0]" ] }, { @@ -228,7 +228,7 @@ "\n", "X = PCA(2).fit_transform(digits.data)\n", "\n", - "kwargs = dict(cmap = plt.cm.get_cmap('rainbow', 10),\n", + "kwargs = dict(cmap=plt.colormaps.get_cmap('rainbow'),\n", " edgecolor='none', alpha=0.6)\n", "fig, ax = plt.subplots(1, 2, figsize=(8, 4))\n", "ax[0].scatter(X[:, 0], X[:, 1], c=labels, **kwargs)\n", @@ -368,7 +368,7 @@ "\n", "X = (china / 255.0).reshape(-1, 3)\n", " \n", - "model = MiniBatchKMeans(n_colors)\n", + "model = MiniBatchKMeans(n_colors, n_init='auto')\n", "labels = model.fit_predict(X)\n", "colors = model.cluster_centers_\n", "new_image = colors[labels].reshape(china.shape)\n", diff --git a/notebooks/05-Validation.ipynb b/notebooks/05-Validation.ipynb index c351764..78488f4 100644 --- a/notebooks/05-Validation.ipynb +++ b/notebooks/05-Validation.ipynb @@ -825,6 +825,13 @@ " \n", "These tools are powerful means of evaluating your model on your data." ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/notebooks/06.1-Practice-Penguins.ipynb b/notebooks/06.1-Practice-Penguins.ipynb index 1cfc165..3f8b300 100644 --- a/notebooks/06.1-Practice-Penguins.ipynb +++ b/notebooks/06.1-Practice-Penguins.ipynb @@ -297,6 +297,14 @@ "source": [ "# create a confusion matrix for cross-validated predictions\n" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3488c51c-4e0d-45ec-92aa-c791d14915d4", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": {