diff --git a/Feature_Importance_Classification.ipynb b/Feature_Importance_Classification.ipynb
index 2ea9743..12ac34e 100644
--- a/Feature_Importance_Classification.ipynb
+++ b/Feature_Importance_Classification.ipynb
@@ -456,7 +456,7 @@
"source": [
"from sklearn.ensemble import RandomForestClassifier\n",
"\n",
- "# Create decision tree classifer object\n",
+ "# Create decision tree classifier object\n",
"clf = RandomForestClassifier(random_state=0, n_jobs=-1)\n",
"\n",
"# Train model\n",
diff --git a/Feature_Importance_Continuous.ipynb b/Feature_Importance_Continuous.ipynb
index 18e6676..5891305 100644
--- a/Feature_Importance_Continuous.ipynb
+++ b/Feature_Importance_Continuous.ipynb
@@ -456,7 +456,7 @@
"source": [
"from sklearn.ensemble import RandomForestRegressor\n",
"\n",
- "# Create decision tree classifer object\n",
+ "# Create decision tree classifier object\n",
"clf = RandomForestRegressor(random_state=0, n_jobs=-1)\n",
"\n",
"# Train model\n",
diff --git a/Stock_Algorithms/Bernoulli_Restricted_Boltzmann_Machine.ipynb b/Stock_Algorithms/Bernoulli_Restricted_Boltzmann_Machine.ipynb
index 648da00..4428830 100644
--- a/Stock_Algorithms/Bernoulli_Restricted_Boltzmann_Machine.ipynb
+++ b/Stock_Algorithms/Bernoulli_Restricted_Boltzmann_Machine.ipynb
@@ -547,7 +547,7 @@
{
"cell_type": "markdown",
"source": [
- "Using higher random state, it increase the accuracy. However, changing n_compoents to 0, 1, 2, or more, it changes nothing."
+ "Using higher random state, it increase the accuracy. However, changing n_components to 0, 1, 2, or more, it changes nothing."
],
"metadata": {}
}
diff --git a/Stock_Algorithms/Classification_Cluster_2.ipynb b/Stock_Algorithms/Classification_Cluster_2.ipynb
index aa0ac5b..6004826 100644
--- a/Stock_Algorithms/Classification_Cluster_2.ipynb
+++ b/Stock_Algorithms/Classification_Cluster_2.ipynb
@@ -55,7 +55,7 @@
" 'Johnson & Johnson': 'JNJ',\n",
" 'Toyota': 'TM',\n",
" 'Honda': 'HMC',\n",
- " 'Mistubishi': 'MSBHY',\n",
+ " 'Mitsubishi': 'MSBHY',\n",
" 'Sony': 'SNE',\n",
" 'Exxon': 'XOM',\n",
" 'Chevron': 'CVX',\n",
@@ -230,7 +230,7 @@
"Cluster 6: Advanced Micro Devices\n",
"Cluster 7: Vale\n",
"Cluster 8: Exxon, Chevron, IBM\n",
- "Cluster 9: Pepsi, Coca Cola, Mistubishi, Valero Energy\n",
+ "Cluster 9: Pepsi, Coca Cola, Mitsubishi, Valero Energy\n",
"Cluster 10: Walgreen\n",
"Cluster 11: Petrobras\n",
"Cluster 12: Ford\n"
@@ -377,7 +377,7 @@
"25 Vale 0\n",
"24 Valero Energy 0\n",
"22 General Electrics 0\n",
- "20 Mistubishi 0\n",
+ "20 Mitsubishi 0\n",
"19 IBM 0\n",
"18 Walgreen 0\n",
"29 Johnson & Johnson 0\n",
@@ -496,7 +496,7 @@
"3 Lockheed Martin 9\n",
"23 American Express 9\n",
"7 Intel 9\n",
- "20 Mistubishi 9\n",
+ "20 Mitsubishi 9\n",
"9 McDonalds 9\n",
"18 Walgreen 9\n",
"10 Pepsi 9\n",
diff --git a/Stock_Algorithms/Classification_Cluster_3.ipynb b/Stock_Algorithms/Classification_Cluster_3.ipynb
index 0547012..ec058be 100644
--- a/Stock_Algorithms/Classification_Cluster_3.ipynb
+++ b/Stock_Algorithms/Classification_Cluster_3.ipynb
@@ -55,7 +55,7 @@
" 'Johnson & Johnson': 'JNJ',\n",
" 'Toyota': 'TM',\n",
" 'Honda': 'HMC',\n",
- " 'Mistubishi': 'MSBHY',\n",
+ " 'Mitsubishi': 'MSBHY',\n",
" 'Sony': 'SNE',\n",
" 'Exxon': 'XOM',\n",
" 'Chevron': 'CVX',\n",
@@ -244,7 +244,7 @@
"text": [
"Cluster 1: Microsoft, McDonalds, MasterCard, Symantec, Texas Instruments\n",
"Cluster 2: Lockheed Martin, Northrop Grumman, Boeing\n",
- "Cluster 3: Honda, Ford, Exxon, Intel, Chevron, Pepsi, Coca Cola, Toyota, Walgreen, IBM, Mistubishi, General Electrics, Valero Energy, Johnson & Johnson\n",
+ "Cluster 3: Honda, Ford, Exxon, Intel, Chevron, Pepsi, Coca Cola, Toyota, Walgreen, IBM, Mitsubishi, General Electrics, Valero Energy, Johnson & Johnson\n",
"Cluster 4: Navistar\n",
"Cluster 5: Advanced Micro Devices\n",
"Cluster 6: Bank of America, American Express\n",
diff --git a/Stock_Algorithms/Decision_Tree_Classifier_Visualize.ipynb b/Stock_Algorithms/Decision_Tree_Classifier_Visualize.ipynb
index 9772856..cc7cff2 100644
--- a/Stock_Algorithms/Decision_Tree_Classifier_Visualize.ipynb
+++ b/Stock_Algorithms/Decision_Tree_Classifier_Visualize.ipynb
@@ -260,7 +260,7 @@
{
"cell_type": "code",
"source": [
- "# Create decision tree classifer object\n",
+ "# Create decision tree classifier object\n",
"clf = DecisionTreeClassifier(random_state=0)\n",
"\n",
"# Train model\n",
diff --git a/Stock_Algorithms/Decision_Trees_Classification_Explained.ipynb b/Stock_Algorithms/Decision_Trees_Classification_Explained.ipynb
index 5e136b5..5271e17 100644
--- a/Stock_Algorithms/Decision_Trees_Classification_Explained.ipynb
+++ b/Stock_Algorithms/Decision_Trees_Classification_Explained.ipynb
@@ -51,7 +51,7 @@
{
"cell_type": "markdown",
"source": [
- "#### Getting the histocial data from yahoo finance"
+ "#### Getting the historical data from yahoo finance"
],
"metadata": {
"nteract": {
@@ -534,8 +534,8 @@
"cell_type": "code",
"source": [
"# Accuracy Score without Sklearn\n",
- "boolian = (y_test==prediction)\n",
- "accuracy = sum(boolian)/y_test.size\n",
+ "boolean = (y_test==prediction)\n",
+ "accuracy = sum(boolean)/y_test.size\n",
"accuracy"
],
"outputs": [
diff --git a/Stock_Algorithms/Decision_Trees_Classification_Part2.ipynb b/Stock_Algorithms/Decision_Trees_Classification_Part2.ipynb
index ec9ac77..db4f3e8 100644
--- a/Stock_Algorithms/Decision_Trees_Classification_Part2.ipynb
+++ b/Stock_Algorithms/Decision_Trees_Classification_Part2.ipynb
@@ -779,8 +779,8 @@
"cell_type": "code",
"source": [
"# Accuracy Score without Sklearn\n",
- "boolian = (y_test==predTree)\n",
- "accuracy = sum(boolian)/y_test.size\n",
+ "boolean = (y_test==predTree)\n",
+ "accuracy = sum(boolean)/y_test.size\n",
"accuracy"
],
"outputs": [
diff --git a/Stock_Algorithms/Decision_Trees_Classification_Part4.ipynb b/Stock_Algorithms/Decision_Trees_Classification_Part4.ipynb
index e7d1297..72f5ccd 100644
--- a/Stock_Algorithms/Decision_Trees_Classification_Part4.ipynb
+++ b/Stock_Algorithms/Decision_Trees_Classification_Part4.ipynb
@@ -173,7 +173,7 @@
{
"cell_type": "code",
"source": [
- "#Spilitting the dataset\n",
+ "#Splitting the dataset\n",
"removed =[0,50,100]\n",
"new_target = np.delete(y,removed)\n",
"new_data = np.delete(X,removed, axis=0) "
diff --git a/Stock_Algorithms/Decision_Trees_Classification_Part5.ipynb b/Stock_Algorithms/Decision_Trees_Classification_Part5.ipynb
index ee08a29..063fb80 100644
--- a/Stock_Algorithms/Decision_Trees_Classification_Part5.ipynb
+++ b/Stock_Algorithms/Decision_Trees_Classification_Part5.ipynb
@@ -348,8 +348,8 @@
"cell_type": "code",
"source": [
"# Accuracy Score without Sklearn\n",
- "boolian = (y_test==predTree)\n",
- "accuracy = sum(boolian)/y_test.size\n",
+ "boolean = (y_test==predTree)\n",
+ "accuracy = sum(boolean)/y_test.size\n",
"accuracy"
],
"outputs": [
diff --git a/Stock_Algorithms/Gradient_Ascent.ipynb b/Stock_Algorithms/Gradient_Ascent.ipynb
index 7cfffca..67ce5af 100644
--- a/Stock_Algorithms/Gradient_Ascent.ipynb
+++ b/Stock_Algorithms/Gradient_Ascent.ipynb
@@ -170,7 +170,7 @@
"source": [
"plt.title('Scatter Plot of X & y')\n",
"plt.xlabel('Independent Variable')\n",
- "plt.ylabel('Depdent Variable')\n",
+ "plt.ylabel('Dependent Variable')\n",
"plt.scatter(X, y)"
],
"outputs": [
@@ -299,12 +299,12 @@
"# Gradient rising demand\n",
"extreme_point = 0 # Initial value\n",
"alpha = 0.1 # Step, that is the learning rate\n",
- "presision = 0.001 # Range of tolerance\n",
+ "precision = 0.001 # Range of tolerance\n",
"while True:\n",
" plt.scatter(extreme_point, origin(extreme_point)) # Visualization\n",
" error = alpha * derivative(extreme_point) # Climbing pace\n",
" extreme_point += error # Climbing\n",
- " if abs(error) < presision:\n",
+ " if abs(error) < precision:\n",
" break # Exit iterative error is small\n",
"plt.show()"
],
@@ -352,12 +352,12 @@
"derivative = lambda X: 2 - 2 * X # (2-x ** 2) derivative\n",
"extreme_point = 0 # Initial value\n",
"alpha = 0.1 # Step, that is the learning rate\n",
- "presision = 0.0001 # Range of tolerance\n",
+ "precision = 0.0001 # Range of tolerance\n",
"while True:\n",
" error = alpha * derivative(extreme_point) # Climbing pace\n",
" extreme_point += error # Climbing\n",
" print(extreme_point)\n",
- " if abs(error) < presision:\n",
+ " if abs(error) < precision:\n",
" break"
],
"outputs": [
diff --git a/Stock_Algorithms/Implementing_Logistic_Regression.ipynb b/Stock_Algorithms/Implementing_Logistic_Regression.ipynb
index 8cd9d04..e3adf94 100644
--- a/Stock_Algorithms/Implementing_Logistic_Regression.ipynb
+++ b/Stock_Algorithms/Implementing_Logistic_Regression.ipynb
@@ -266,7 +266,7 @@
{
"cell_type": "markdown",
"source": [
- "Step 3: Predection"
+ "Step 3: Prediction"
],
"metadata": {}
},
@@ -642,7 +642,7 @@
{
"cell_type": "markdown",
"source": [
- "Step 4: Evaluating The Predection"
+ "Step 4: Evaluating The Prediction"
],
"metadata": {}
},
diff --git a/Stock_Algorithms/K_Means.ipynb b/Stock_Algorithms/K_Means.ipynb
index 1eebfde..96daa5a 100644
--- a/Stock_Algorithms/K_Means.ipynb
+++ b/Stock_Algorithms/K_Means.ipynb
@@ -10,7 +10,7 @@
{
"cell_type": "markdown",
"source": [
- "K-means is used for clustering such as unlabled data. \n",
+ "K-means is used for clustering such as unlabeled data. \n",
"\n\nk-means clustering is a method of vector quantization, originally from signal processing, that is popular for cluster analysis in data mining. (Wikipedia)"
],
"metadata": {}
diff --git a/Stock_Algorithms/K_Nearest_Neighbors_Part2.ipynb b/Stock_Algorithms/K_Nearest_Neighbors_Part2.ipynb
index bf4fccd..2c8c950 100644
--- a/Stock_Algorithms/K_Nearest_Neighbors_Part2.ipynb
+++ b/Stock_Algorithms/K_Nearest_Neighbors_Part2.ipynb
@@ -498,7 +498,7 @@
"Ks = 10\n",
"mean_acc = np.zeros((Ks-1))\n",
"std_acc = np.zeros((Ks-1))\n",
- "ConfustionMx = [];\n",
+ "ConfusionMx = [];\n",
"for n in range(1,Ks):\n",
" \n",
" #Train Model and Predict \n",
@@ -538,7 +538,7 @@
"plt.fill_between(range(1,Ks),mean_acc - 1 * std_acc,mean_acc + 1 * std_acc, alpha=0.10)\n",
"plt.legend(('Accuracy ', '+/- 3xstd'))\n",
"plt.ylabel('Accuracy ')\n",
- "plt.xlabel('Number of Nabors (K)')\n",
+ "plt.xlabel('Number of Neighbors (K)')\n",
"plt.tight_layout()\n",
"plt.show()"
],
diff --git a/Stock_Algorithms/LSTM_Neural_Networks.ipynb b/Stock_Algorithms/LSTM_Neural_Networks.ipynb
index d750fe9..b3ccf47 100644
--- a/Stock_Algorithms/LSTM_Neural_Networks.ipynb
+++ b/Stock_Algorithms/LSTM_Neural_Networks.ipynb
@@ -3,7 +3,7 @@
{
"cell_type": "markdown",
"source": [
- "# LTSM Neural Networks"
+ "# LSTM Neural Networks"
],
"metadata": {
"nteract": {
@@ -1388,7 +1388,7 @@
{
"cell_type": "code",
"source": [
- "# Calculate root mean squred error\n",
+ "# Calculate root mean squared error\n",
"trainScore = mean_squared_error([x[0][0] for x in Xtrain], trainPredict, squared=False)\n",
"print(\"Train Score: %.2f RMSE\" % (trainScore))\n",
"\n",
diff --git a/Stock_Algorithms/Lasso_Regression_Alpha_Levels.ipynb b/Stock_Algorithms/Lasso_Regression_Alpha_Levels.ipynb
index 323fe9a..a86b62e 100644
--- a/Stock_Algorithms/Lasso_Regression_Alpha_Levels.ipynb
+++ b/Stock_Algorithms/Lasso_Regression_Alpha_Levels.ipynb
@@ -286,7 +286,7 @@
" # Create a column of coefficient values\n",
" df[column_name] = lasso.coef_\n",
" \n",
- " # Return the datafram \n",
+ " # Return the dataframe\n",
" return df"
],
"outputs": [],
@@ -382,4 +382,4 @@
},
"nbformat": 4,
"nbformat_minor": 0
-}
\ No newline at end of file
+}
diff --git a/Stock_Algorithms/Lasso_Regression_Part2.ipynb b/Stock_Algorithms/Lasso_Regression_Part2.ipynb
index 6a58717..91ea2d7 100644
--- a/Stock_Algorithms/Lasso_Regression_Part2.ipynb
+++ b/Stock_Algorithms/Lasso_Regression_Part2.ipynb
@@ -278,7 +278,7 @@
"r_squared = model.score(X_test, y_test)\n",
"n_ignored_features = sum(model.coef_ == 0)\n",
"\n",
- "# Print peformance stats \n",
+ "# Print performance stats \n",
"print(\"The model can predict {0:.1%} of the variance in the test set.\".format(r_squared))\n",
"print(\"{} out of {} features were ignored.\".format(n_ignored_features, len( model.coef_)))"
],
diff --git a/Stock_Algorithms/Least_Squares_Regression.ipynb b/Stock_Algorithms/Least_Squares_Regression.ipynb
index 9c1196e..8d1f088 100644
--- a/Stock_Algorithms/Least_Squares_Regression.ipynb
+++ b/Stock_Algorithms/Least_Squares_Regression.ipynb
@@ -444,9 +444,9 @@
"X = np.linspace(min_x, max_x, len(x))\n",
"Y = c + m * x\n",
" \n",
- "# Ploting Line\n",
+ "# Plotting Line\n",
"plt.plot(X, Y, color='green', label='Regression Line')\n",
- "# Ploting Scatter Points\n",
+ "# Plotting Scatter Points\n",
"plt.scatter(x, y, c='red', label='Scatter Plot')\n",
" \n",
"plt.xlabel('Original Stock Price')\n",
diff --git a/Stock_Algorithms/Linear_Discriminat_Analysis.ipynb b/Stock_Algorithms/Linear_Discriminant_Analysis.ipynb
similarity index 100%
rename from Stock_Algorithms/Linear_Discriminat_Analysis.ipynb
rename to Stock_Algorithms/Linear_Discriminant_Analysis.ipynb
diff --git a/Stock_Algorithms/Linear_Discriminat_Analysis_Classification.ipynb b/Stock_Algorithms/Linear_Discriminant_Analysis_Classification.ipynb
similarity index 99%
rename from Stock_Algorithms/Linear_Discriminat_Analysis_Classification.ipynb
rename to Stock_Algorithms/Linear_Discriminant_Analysis_Classification.ipynb
index 016e1f5..02ab527 100644
--- a/Stock_Algorithms/Linear_Discriminat_Analysis_Classification.ipynb
+++ b/Stock_Algorithms/Linear_Discriminant_Analysis_Classification.ipynb
@@ -3,7 +3,7 @@
{
"cell_type": "markdown",
"source": [
- "# Linear Discriminat Analysis Classification"
+ "# Linear Discriminant Analysis Classification"
],
"metadata": {}
},
@@ -510,4 +510,4 @@
},
"nbformat": 4,
"nbformat_minor": 0
-}
\ No newline at end of file
+}
diff --git a/Stock_Algorithms/Linear_Regression_with_Normalize_Data.ipynb b/Stock_Algorithms/Linear_Regression_with_Normalize_Data.ipynb
index f15b711..9ba64d6 100644
--- a/Stock_Algorithms/Linear_Regression_with_Normalize_Data.ipynb
+++ b/Stock_Algorithms/Linear_Regression_with_Normalize_Data.ipynb
@@ -3,7 +3,7 @@
{
"cell_type": "markdown",
"source": [
- "# Linear Regression with Nomalize Data"
+ "# Linear Regression with Normalized Data"
],
"metadata": {}
},
diff --git a/Stock_Algorithms/Locally_Estimated_Scatterplot_Smoothing.ipynb b/Stock_Algorithms/Locally_Estimated_Scatterplot_Smoothing.ipynb
index d9820c8..eaee772 100644
--- a/Stock_Algorithms/Locally_Estimated_Scatterplot_Smoothing.ipynb
+++ b/Stock_Algorithms/Locally_Estimated_Scatterplot_Smoothing.ipynb
@@ -293,7 +293,7 @@
"\n",
" evalDF = pd.concat([evalDF, iterDF2])\n",
"\n",
- " # Reset indicies for returned DataFrames.\n",
+ " # Reset indices for returned DataFrames.\n",
" locsDF.reset_index(inplace=True)\n",
" locsDF.drop('index', axis=1, inplace=True)\n",
" locsDF['est'] = 0; evalDF['est'] = 0\n",
diff --git a/Stock_Algorithms/Logistic_Regression.ipynb b/Stock_Algorithms/Logistic_Regression.ipynb
index 94d6d5b..de31827 100644
--- a/Stock_Algorithms/Logistic_Regression.ipynb
+++ b/Stock_Algorithms/Logistic_Regression.ipynb
@@ -845,7 +845,7 @@
{
"cell_type": "markdown",
"source": [
- "Step 4: Evaluating The Predection"
+ "Step 4: Evaluating The Prediction"
],
"metadata": {}
},
diff --git a/Stock_Algorithms/Logistic_Regression_Large_Data.ipynb b/Stock_Algorithms/Logistic_Regression_Large_Data.ipynb
index 992ad63..6e4c660 100644
--- a/Stock_Algorithms/Logistic_Regression_Large_Data.ipynb
+++ b/Stock_Algorithms/Logistic_Regression_Large_Data.ipynb
@@ -257,7 +257,7 @@
{
"cell_type": "code",
"source": [
- "# Standarize features\n",
+ "# Standardize features\n",
"scaler = StandardScaler()\n",
"X_std = scaler.fit_transform(X)"
],
diff --git a/Stock_Algorithms/Mini-Batch_k-Means_Clustering.ipynb b/Stock_Algorithms/Mini-Batch_k-Means_Clustering.ipynb
index bde78cf..652d596 100644
--- a/Stock_Algorithms/Mini-Batch_k-Means_Clustering.ipynb
+++ b/Stock_Algorithms/Mini-Batch_k-Means_Clustering.ipynb
@@ -223,7 +223,7 @@
{
"cell_type": "code",
"source": [
- "# Standarize features\n",
+ "# Standardize features\n",
"scaler = StandardScaler()\n",
"X_std = scaler.fit_transform(X)"
],
diff --git a/Stock_Algorithms/Model_Selection.ipynb b/Stock_Algorithms/Model_Selection.ipynb
index 3b911f7..8938c9e 100644
--- a/Stock_Algorithms/Model_Selection.ipynb
+++ b/Stock_Algorithms/Model_Selection.ipynb
@@ -400,7 +400,7 @@
"cell_type": "code",
"source": [
"# View best hyperparameters\n",
- "print('Best Number Of Princpal Components:', best_model.best_estimator_.get_params()['preprocess__pca__n_components'])\n",
+ "print('Best Number Of Principal Components:', best_model.best_estimator_.get_params()['preprocess__pca__n_components'])\n",
"print('Best Penalty:', best_model.best_estimator_.get_params()['classifier__penalty'])\n",
"print('Best C:', best_model.best_estimator_.get_params()['classifier__C'])"
],
@@ -409,7 +409,7 @@
"output_type": "stream",
"name": "stdout",
"text": [
- "Best Number Of Princpal Components: 1\n",
+ "Best Number Of Principal Components: 1\n",
"Best Penalty: l1\n",
"Best C: 1.0\n"
]
diff --git a/Stock_Algorithms/Multiple_Linear_Regression.ipynb b/Stock_Algorithms/Multiple_Linear_Regression.ipynb
index 88f2599..bbe6891 100644
--- a/Stock_Algorithms/Multiple_Linear_Regression.ipynb
+++ b/Stock_Algorithms/Multiple_Linear_Regression.ipynb
@@ -229,7 +229,7 @@
{
"cell_type": "code",
"source": [
- "# Acoiding Dummy Variable Trap\n",
+ "# Avoiding Dummy Variable Trap\n",
"X = X[: , 1:]"
],
"outputs": [],
@@ -1311,7 +1311,7 @@
"print(\"\\nRetrieving manually the parameter estimates:\")\n",
"print(model._results.params)\n",
"\n",
- "# Peform analysis of variance on fitted linear model\n",
+ "# Perform analysis of variance on fitted linear model\n",
"anova_results = anova_lm(model)\n",
"\n",
"print('\\nANOVA results')\n",
diff --git a/Stock_Algorithms/Optimization_Parameters.ipynb b/Stock_Algorithms/Optimization_Parameters.ipynb
index c85de70..05b9eac 100644
--- a/Stock_Algorithms/Optimization_Parameters.ipynb
+++ b/Stock_Algorithms/Optimization_Parameters.ipynb
@@ -271,7 +271,7 @@
"\n",
"# Create a pipeline in three steps. \n",
"# 1. Standardize the data.\n",
- "# 2. Tranform the data with PCA.\n",
+ "# 2. Transform the data with PCA.\n",
"# 3. Train a logistic regression on the data.\n",
"pipe = Pipeline(steps=[('sc', sc), \n",
" ('pca', pca), \n",
diff --git a/Stock_Algorithms/Polynomial_Regression.ipynb b/Stock_Algorithms/Polynomial_Regression.ipynb
index df5896c..6ddca9b 100644
--- a/Stock_Algorithms/Polynomial_Regression.ipynb
+++ b/Stock_Algorithms/Polynomial_Regression.ipynb
@@ -10,9 +10,9 @@
{
"cell_type": "markdown",
"source": [
- "Polynomial Regression is a technique that is used for a nonlinear equation byt taking polynomial functions of indepedent variable. \n",
+ "Polynomial Regression is a technique that is used for a nonlinear equation byt taking polynomial functions of independent variable. \n",
"\n",
- "Transform the data to polynomail. Polynomial regression is for special case of the general linear regression model. It is useful for describing curvilinear relationships. Curvilinear relationships have by squaring or setting higher-order terms of the predictor variables.\n",
+ "Transform the data to polynomial. Polynomial regression is for special case of the general linear regression model. It is useful for describing curvilinear relationships. Curvilinear relationships have by squaring or setting higher-order terms of the predictor variables.\n",
"\n",
"Quadratic - 2nd order \n",
"Cubic - 3rd order \n",
diff --git a/Stock_Algorithms/PyCaret_Stock_Prediction.ipynb b/Stock_Algorithms/PyCaret_Stock_Prediction.ipynb
index 9f14750..257688e 100644
--- a/Stock_Algorithms/PyCaret_Stock_Prediction.ipynb
+++ b/Stock_Algorithms/PyCaret_Stock_Prediction.ipynb
@@ -184,7 +184,7 @@
{
"output_type": "display_data",
"data": {
- "text/html": "\n
\n \n \n | \n Description | \n Value | \n
\n \n \n \n 0 | \n session_id | \n 123 | \n
\n \n 1 | \n Target | \n Adj Close | \n
\n \n 2 | \n Original Data | \n (454, 6) | \n
\n \n 3 | \n Missing Values | \n False | \n
\n \n 4 | \n Numeric Features | \n 5 | \n
\n \n 5 | \n Categorical Features | \n 0 | \n
\n \n 6 | \n Ordinal Features | \n False | \n
\n \n 7 | \n High Cardinality Features | \n False | \n
\n \n 8 | \n High Cardinality Method | \n None | \n
\n \n 9 | \n Transformed Train Set | \n (317, 1) | \n
\n \n 10 | \n Transformed Test Set | \n (137, 1) | \n
\n \n 11 | \n Shuffle Train-Test | \n True | \n
\n \n 12 | \n Stratify Train-Test | \n False | \n
\n \n 13 | \n Fold Generator | \n KFold | \n
\n \n 14 | \n Fold Number | \n 10 | \n
\n \n 15 | \n CPU Jobs | \n -1 | \n
\n \n 16 | \n Use GPU | \n True | \n
\n \n 17 | \n Log Experiment | \n False | \n
\n \n 18 | \n Experiment Name | \n reg-default-name | \n
\n \n 19 | \n USI | \n 21a0 | \n
\n \n 20 | \n Imputation Type | \n simple | \n
\n \n 21 | \n Iterative Imputation Iteration | \n None | \n
\n \n 22 | \n Numeric Imputer | \n mean | \n
\n \n 23 | \n Iterative Imputation Numeric Model | \n None | \n
\n \n 24 | \n Categorical Imputer | \n constant | \n
\n \n 25 | \n Iterative Imputation Categorical Model | \n None | \n
\n \n 26 | \n Unknown Categoricals Handling | \n least_frequent | \n
\n \n 27 | \n Normalize | \n False | \n
\n \n 28 | \n Normalize Method | \n None | \n
\n \n 29 | \n Transformation | \n False | \n
\n \n 30 | \n Transformation Method | \n None | \n
\n \n 31 | \n PCA | \n False | \n
\n \n 32 | \n PCA Method | \n None | \n
\n \n 33 | \n PCA Components | \n None | \n
\n \n 34 | \n Ignore Low Variance | \n False | \n
\n \n 35 | \n Combine Rare Levels | \n False | \n
\n \n 36 | \n Rare Level Threshold | \n None | \n
\n \n 37 | \n Numeric Binning | \n False | \n
\n \n 38 | \n Remove Outliers | \n False | \n
\n \n 39 | \n Outliers Threshold | \n None | \n
\n \n 40 | \n Remove Multicollinearity | \n False | \n
\n \n 41 | \n Multicollinearity Threshold | \n None | \n
\n \n 42 | \n Remove Perfect Collinearity | \n True | \n
\n \n 43 | \n Clustering | \n False | \n
\n \n 44 | \n Clustering Iteration | \n None | \n
\n \n 45 | \n Polynomial Features | \n False | \n
\n \n 46 | \n Polynomial Degree | \n None | \n
\n \n 47 | \n Trignometry Features | \n False | \n
\n \n 48 | \n Polynomial Threshold | \n None | \n
\n \n 49 | \n Group Features | \n False | \n
\n \n 50 | \n Feature Selection | \n False | \n
\n \n 51 | \n Feature Selection Method | \n classic | \n
\n \n 52 | \n Features Selection Threshold | \n None | \n
\n \n 53 | \n Feature Interaction | \n False | \n
\n \n 54 | \n Feature Ratio | \n False | \n
\n \n 55 | \n Interaction Threshold | \n None | \n
\n \n 56 | \n Transform Target | \n False | \n
\n \n 57 | \n Transform Target Method | \n box-cox | \n
\n \n
\n",
+ "text/html": "\n\n \n \n | \n Description | \n Value | \n
\n \n \n \n 0 | \n session_id | \n 123 | \n
\n \n 1 | \n Target | \n Adj Close | \n
\n \n 2 | \n Original Data | \n (454, 6) | \n
\n \n 3 | \n Missing Values | \n False | \n
\n \n 4 | \n Numeric Features | \n 5 | \n
\n \n 5 | \n Categorical Features | \n 0 | \n
\n \n 6 | \n Ordinal Features | \n False | \n
\n \n 7 | \n High Cardinality Features | \n False | \n
\n \n 8 | \n High Cardinality Method | \n None | \n
\n \n 9 | \n Transformed Train Set | \n (317, 1) | \n
\n \n 10 | \n Transformed Test Set | \n (137, 1) | \n
\n \n 11 | \n Shuffle Train-Test | \n True | \n
\n \n 12 | \n Stratify Train-Test | \n False | \n
\n \n 13 | \n Fold Generator | \n KFold | \n
\n \n 14 | \n Fold Number | \n 10 | \n
\n \n 15 | \n CPU Jobs | \n -1 | \n
\n \n 16 | \n Use GPU | \n True | \n
\n \n 17 | \n Log Experiment | \n False | \n
\n \n 18 | \n Experiment Name | \n reg-default-name | \n
\n \n 19 | \n USI | \n 21a0 | \n
\n \n 20 | \n Imputation Type | \n simple | \n
\n \n 21 | \n Iterative Imputation Iteration | \n None | \n
\n \n 22 | \n Numeric Imputer | \n mean | \n
\n \n 23 | \n Iterative Imputation Numeric Model | \n None | \n
\n \n 24 | \n Categorical Imputer | \n constant | \n
\n \n 25 | \n Iterative Imputation Categorical Model | \n None | \n
\n \n 26 | \n Unknown Categoricals Handling | \n least_frequent | \n
\n \n 27 | \n Normalize | \n False | \n
\n \n 28 | \n Normalize Method | \n None | \n
\n \n 29 | \n Transformation | \n False | \n
\n \n 30 | \n Transformation Method | \n None | \n
\n \n 31 | \n PCA | \n False | \n
\n \n 32 | \n PCA Method | \n None | \n
\n \n 33 | \n PCA Components | \n None | \n
\n \n 34 | \n Ignore Low Variance | \n False | \n
\n \n 35 | \n Combine Rare Levels | \n False | \n
\n \n 36 | \n Rare Level Threshold | \n None | \n
\n \n 37 | \n Numeric Binning | \n False | \n
\n \n 38 | \n Remove Outliers | \n False | \n
\n \n 39 | \n Outliers Threshold | \n None | \n
\n \n 40 | \n Remove Multicollinearity | \n False | \n
\n \n 41 | \n Multicollinearity Threshold | \n None | \n
\n \n 42 | \n Remove Perfect Collinearity | \n True | \n
\n \n 43 | \n Clustering | \n False | \n
\n \n 44 | \n Clustering Iteration | \n None | \n
\n \n 45 | \n Polynomial Features | \n False | \n
\n \n 46 | \n Polynomial Degree | \n None | \n
\n \n 47 | \n Trigonometry Features | \n False | \n
\n \n 48 | \n Polynomial Threshold | \n None | \n
\n \n 49 | \n Group Features | \n False | \n
\n \n 50 | \n Feature Selection | \n False | \n
\n \n 51 | \n Feature Selection Method | \n classic | \n
\n \n 52 | \n Features Selection Threshold | \n None | \n
\n \n 53 | \n Feature Interaction | \n False | \n
\n \n 54 | \n Feature Ratio | \n False | \n
\n \n 55 | \n Interaction Threshold | \n None | \n
\n \n 56 | \n Transform Target | \n False | \n
\n \n 57 | \n Transform Target Method | \n box-cox | \n
\n \n
\n",
"text/plain": ""
},
"metadata": {}
diff --git a/Stock_Algorithms/PyCaret_Stock_Prediction_Part2.ipynb b/Stock_Algorithms/PyCaret_Stock_Prediction_Part2.ipynb
index bdc1087..b634811 100644
--- a/Stock_Algorithms/PyCaret_Stock_Prediction_Part2.ipynb
+++ b/Stock_Algorithms/PyCaret_Stock_Prediction_Part2.ipynb
@@ -205,7 +205,7 @@
{
"output_type": "display_data",
"data": {
- "text/html": "\n\n \n \n | \n Description | \n Value | \n
\n \n \n \n 0 | \n session_id | \n 123 | \n
\n \n 1 | \n Target | \n Adj Close | \n
\n \n 2 | \n Original Data | \n (454, 6) | \n
\n \n 3 | \n Missing Values | \n False | \n
\n \n 4 | \n Numeric Features | \n 5 | \n
\n \n 5 | \n Categorical Features | \n 0 | \n
\n \n 6 | \n Ordinal Features | \n False | \n
\n \n 7 | \n High Cardinality Features | \n False | \n
\n \n 8 | \n High Cardinality Method | \n None | \n
\n \n 9 | \n Transformed Train Set | \n (317, 1) | \n
\n \n 10 | \n Transformed Test Set | \n (137, 1) | \n
\n \n 11 | \n Shuffle Train-Test | \n True | \n
\n \n 12 | \n Stratify Train-Test | \n False | \n
\n \n 13 | \n Fold Generator | \n KFold | \n
\n \n 14 | \n Fold Number | \n 10 | \n
\n \n 15 | \n CPU Jobs | \n -1 | \n
\n \n 16 | \n Use GPU | \n False | \n
\n \n 17 | \n Log Experiment | \n False | \n
\n \n 18 | \n Experiment Name | \n reg-default-name | \n
\n \n 19 | \n USI | \n 6901 | \n
\n \n 20 | \n Imputation Type | \n simple | \n
\n \n 21 | \n Iterative Imputation Iteration | \n None | \n
\n \n 22 | \n Numeric Imputer | \n mean | \n
\n \n 23 | \n Iterative Imputation Numeric Model | \n None | \n
\n \n 24 | \n Categorical Imputer | \n constant | \n
\n \n 25 | \n Iterative Imputation Categorical Model | \n None | \n
\n \n 26 | \n Unknown Categoricals Handling | \n least_frequent | \n
\n \n 27 | \n Normalize | \n False | \n
\n \n 28 | \n Normalize Method | \n None | \n
\n \n 29 | \n Transformation | \n False | \n
\n \n 30 | \n Transformation Method | \n None | \n
\n \n 31 | \n PCA | \n False | \n
\n \n 32 | \n PCA Method | \n None | \n
\n \n 33 | \n PCA Components | \n None | \n
\n \n 34 | \n Ignore Low Variance | \n False | \n
\n \n 35 | \n Combine Rare Levels | \n False | \n
\n \n 36 | \n Rare Level Threshold | \n None | \n
\n \n 37 | \n Numeric Binning | \n False | \n
\n \n 38 | \n Remove Outliers | \n False | \n
\n \n 39 | \n Outliers Threshold | \n None | \n
\n \n 40 | \n Remove Multicollinearity | \n False | \n
\n \n 41 | \n Multicollinearity Threshold | \n None | \n
\n \n 42 | \n Remove Perfect Collinearity | \n True | \n
\n \n 43 | \n Clustering | \n False | \n
\n \n 44 | \n Clustering Iteration | \n None | \n
\n \n 45 | \n Polynomial Features | \n False | \n
\n \n 46 | \n Polynomial Degree | \n None | \n
\n \n 47 | \n Trignometry Features | \n False | \n
\n \n 48 | \n Polynomial Threshold | \n None | \n
\n \n 49 | \n Group Features | \n False | \n
\n \n 50 | \n Feature Selection | \n False | \n
\n \n 51 | \n Feature Selection Method | \n classic | \n
\n \n 52 | \n Features Selection Threshold | \n None | \n
\n \n 53 | \n Feature Interaction | \n False | \n
\n \n 54 | \n Feature Ratio | \n False | \n
\n \n 55 | \n Interaction Threshold | \n None | \n
\n \n 56 | \n Transform Target | \n False | \n
\n \n 57 | \n Transform Target Method | \n box-cox | \n
\n \n
\n",
+ "text/html": "\n\n \n \n | \n Description | \n Value | \n
\n \n \n \n 0 | \n session_id | \n 123 | \n
\n \n 1 | \n Target | \n Adj Close | \n
\n \n 2 | \n Original Data | \n (454, 6) | \n
\n \n 3 | \n Missing Values | \n False | \n
\n \n 4 | \n Numeric Features | \n 5 | \n
\n \n 5 | \n Categorical Features | \n 0 | \n
\n \n 6 | \n Ordinal Features | \n False | \n
\n \n 7 | \n High Cardinality Features | \n False | \n
\n \n 8 | \n High Cardinality Method | \n None | \n
\n \n 9 | \n Transformed Train Set | \n (317, 1) | \n
\n \n 10 | \n Transformed Test Set | \n (137, 1) | \n
\n \n 11 | \n Shuffle Train-Test | \n True | \n
\n \n 12 | \n Stratify Train-Test | \n False | \n
\n \n 13 | \n Fold Generator | \n KFold | \n
\n \n 14 | \n Fold Number | \n 10 | \n
\n \n 15 | \n CPU Jobs | \n -1 | \n
\n \n 16 | \n Use GPU | \n False | \n
\n \n 17 | \n Log Experiment | \n False | \n
\n \n 18 | \n Experiment Name | \n reg-default-name | \n
\n \n 19 | \n USI | \n 6901 | \n
\n \n 20 | \n Imputation Type | \n simple | \n
\n \n 21 | \n Iterative Imputation Iteration | \n None | \n
\n \n 22 | \n Numeric Imputer | \n mean | \n
\n \n 23 | \n Iterative Imputation Numeric Model | \n None | \n
\n \n 24 | \n Categorical Imputer | \n constant | \n
\n \n 25 | \n Iterative Imputation Categorical Model | \n None | \n
\n \n 26 | \n Unknown Categoricals Handling | \n least_frequent | \n
\n \n 27 | \n Normalize | \n False | \n
\n \n 28 | \n Normalize Method | \n None | \n
\n \n 29 | \n Transformation | \n False | \n
\n \n 30 | \n Transformation Method | \n None | \n
\n \n 31 | \n PCA | \n False | \n
\n \n 32 | \n PCA Method | \n None | \n
\n \n 33 | \n PCA Components | \n None | \n
\n \n 34 | \n Ignore Low Variance | \n False | \n
\n \n 35 | \n Combine Rare Levels | \n False | \n
\n \n 36 | \n Rare Level Threshold | \n None | \n
\n \n 37 | \n Numeric Binning | \n False | \n
\n \n 38 | \n Remove Outliers | \n False | \n
\n \n 39 | \n Outliers Threshold | \n None | \n
\n \n 40 | \n Remove Multicollinearity | \n False | \n
\n \n 41 | \n Multicollinearity Threshold | \n None | \n
\n \n 42 | \n Remove Perfect Collinearity | \n True | \n
\n \n 43 | \n Clustering | \n False | \n
\n \n 44 | \n Clustering Iteration | \n None | \n
\n \n 45 | \n Polynomial Features | \n False | \n
\n \n 46 | \n Polynomial Degree | \n None | \n
\n \n 47 | \n Trigonometry Features | \n False | \n
\n \n 48 | \n Polynomial Threshold | \n None | \n
\n \n 49 | \n Group Features | \n False | \n
\n \n 50 | \n Feature Selection | \n False | \n
\n \n 51 | \n Feature Selection Method | \n classic | \n
\n \n 52 | \n Features Selection Threshold | \n None | \n
\n \n 53 | \n Feature Interaction | \n False | \n
\n \n 54 | \n Feature Ratio | \n False | \n
\n \n 55 | \n Interaction Threshold | \n None | \n
\n \n 56 | \n Transform Target | \n False | \n
\n \n 57 | \n Transform Target Method | \n box-cox | \n
\n \n
\n",
"text/plain": ""
},
"metadata": {}
diff --git a/Stock_Algorithms/Quantile_Regression.ipynb b/Stock_Algorithms/Quantile_Regression.ipynb
index f168664..d487043 100644
--- a/Stock_Algorithms/Quantile_Regression.ipynb
+++ b/Stock_Algorithms/Quantile_Regression.ipynb
@@ -10,7 +10,7 @@
{
"cell_type": "markdown",
"source": [
- "Quantile Regression is estimating either the condintional median or other quantiles of the response variable. This regression is used for when outliers, high skeweness and heteroscedasticity exist in the data. However, quantile regression predicts a quantile (or percentile) for a given indpedent variables.\n",
+ "Quantile Regression is estimating either the conditional median or other quantiles of the response variable. This regression is used for when outliers, high skeweness and heteroscedasticity exist in the data. However, quantile regression predicts a quantile (or percentile) for a given indpedent variables.\n",
"\n",
"\n",
" 1. Quite beneficial when heteroscedasticity is present in the data.\n",
diff --git a/Stock_Algorithms/Ridge_Regression.ipynb b/Stock_Algorithms/Ridge_Regression.ipynb
index 8c3680e..65b67cd 100644
--- a/Stock_Algorithms/Ridge_Regression.ipynb
+++ b/Stock_Algorithms/Ridge_Regression.ipynb
@@ -13,7 +13,7 @@
"Ridge Regression is a technique for analyzing multiple regression that suffer from multicollinearity. However, multicollinearity occurs, least squares estimates are unbiased, but their variances are large so they may be far from the true value.\n",
"\n",
"\n",
- "Regularization is for solving over-fitting problem which implies model performing well on training data. However, it performing poorly on validation test data. In addition, regularization solves problem by adding a penatly term to the objective function and control the model complexity using that penalty term.\n",
+ "Regularization is for solving over-fitting problem which implies model performing well on training data. However, it performing poorly on validation test data. In addition, regularization solves problem by adding a penalty term to the objective function and control the model complexity using that penalty term.\n",
"\n",
"1. Large number of variables\n",
"2. Low ratio of number observations to number of variables\n",
@@ -24,7 +24,7 @@
"\n",
"L1 Loss function or L1 Regularization is minimizing the objective function by adding a penalty term to the sum of the absolute values of coefficients. This is called least absolute deviations method.\n",
"\n",
- "L2 Loss function or L2 Regularization is to minimize the objective function by adding a penalty term to the sum of the squares of cofficients. "
+ "L2 Loss function or L2 Regularization is to minimize the objective function by adding a penalty term to the sum of the squares of coefficients. "
],
"metadata": {}
},
diff --git a/Stock_Algorithms/SVC_Predicted_Probabilities.ipynb b/Stock_Algorithms/SVC_Predicted_Probabilities.ipynb
index 861daf6..9c49198 100644
--- a/Stock_Algorithms/SVC_Predicted_Probabilities.ipynb
+++ b/Stock_Algorithms/SVC_Predicted_Probabilities.ipynb
@@ -223,7 +223,7 @@
{
"cell_type": "code",
"source": [
- "# Standarize features\n",
+ "# Standardize features\n",
"scaler = StandardScaler()\n",
"X_std = scaler.fit_transform(X)"
],
diff --git a/Stock_Algorithms/Simple_Linear_Regression.ipynb b/Stock_Algorithms/Simple_Linear_Regression.ipynb
index 293f31a..22574fa 100644
--- a/Stock_Algorithms/Simple_Linear_Regression.ipynb
+++ b/Stock_Algorithms/Simple_Linear_Regression.ipynb
@@ -10,7 +10,7 @@
{
"cell_type": "markdown",
"source": [
- "Linear Regression is the simplest form of regression. The dependent variable is continuous; however, the relationship between the dependent variable and indepedent variables is assumed to be linear. \n",
+ "Linear Regression is the simplest form of regression. The dependent variable is continuous; however, the relationship between the dependent variable and independent variables is assumed to be linear. \n",
"\n",
"# Equation: y = mx + b\n",
"\n",
diff --git a/Stock_Algorithms/Simple_Linear_Regression_Part2.ipynb b/Stock_Algorithms/Simple_Linear_Regression_Part2.ipynb
index 0ada3d8..871fcd0 100644
--- a/Stock_Algorithms/Simple_Linear_Regression_Part2.ipynb
+++ b/Stock_Algorithms/Simple_Linear_Regression_Part2.ipynb
@@ -10,7 +10,7 @@
{
"cell_type": "markdown",
"source": [
- "Linear Regression is the simplest form of regression. The dependent variable is continuous; however, the relationship between the dependent variable and indepedent variables is assumed to be linear. \n",
+ "Linear Regression is the simplest form of regression. The dependent variable is continuous; however, the relationship between the dependent variable and independent variables is assumed to be linear. \n",
"\n",
"# Equation: y = mx + b\n",
"\n",
diff --git a/Stock_Algorithms/Simple_Linear_Regression_with_Normalize_Data.ipynb b/Stock_Algorithms/Simple_Linear_Regression_with_Normalize_Data.ipynb
index e3d0dc8..05a0eab 100644
--- a/Stock_Algorithms/Simple_Linear_Regression_with_Normalize_Data.ipynb
+++ b/Stock_Algorithms/Simple_Linear_Regression_with_Normalize_Data.ipynb
@@ -3,7 +3,7 @@
{
"cell_type": "markdown",
"source": [
- "# Linear Regression with Nomalize Data"
+ "# Linear Regression with Normalized Data"
],
"metadata": {}
},
diff --git a/Stock_Algorithms/Stochastic_Gradient_Descent_Classification.ipynb b/Stock_Algorithms/Stochastic_Gradient_Descent_Classification.ipynb
index 39a1d9c..868bd93 100644
--- a/Stock_Algorithms/Stochastic_Gradient_Descent_Classification.ipynb
+++ b/Stock_Algorithms/Stochastic_Gradient_Descent_Classification.ipynb
@@ -598,7 +598,7 @@
{
"cell_type": "code",
"source": [
- "from sklearn.metrics import r2_score # Continous and Binary\n",
+ "from sklearn.metrics import r2_score # Continuous and Binary\n",
"r2_score(X,Y)"
],
"outputs": [
diff --git a/Stock_Algorithms/Stochastic_Gradient_Descent_Regression_Part2.ipynb b/Stock_Algorithms/Stochastic_Gradient_Descent_Regression_Part2.ipynb
index 6a7e0e0..eec0056 100644
--- a/Stock_Algorithms/Stochastic_Gradient_Descent_Regression_Part2.ipynb
+++ b/Stock_Algorithms/Stochastic_Gradient_Descent_Regression_Part2.ipynb
@@ -273,7 +273,7 @@
{
"cell_type": "code",
"source": [
- "from sklearn.metrics import r2_score # Continous and Binary\n",
+ "from sklearn.metrics import r2_score # Continuous and Binary\n",
"r2_score(X,Y)"
],
"outputs": [
diff --git a/Stock_Algorithms/Support_Vector_Machine.ipynb b/Stock_Algorithms/Support_Vector_Machine.ipynb
index 1733154..31e3da4 100644
--- a/Stock_Algorithms/Support_Vector_Machine.ipynb
+++ b/Stock_Algorithms/Support_Vector_Machine.ipynb
@@ -400,7 +400,7 @@
{
"cell_type": "code",
"source": [
- "# Clheck for nan\n",
+ "# Check for nan\n",
"np.isnan(price)"
],
"outputs": [
diff --git a/Stock_Algorithms/TensorFlow_LinearRegression2.ipynb b/Stock_Algorithms/TensorFlow_LinearRegression2.ipynb
index 10b1956..e0f8e8c 100644
--- a/Stock_Algorithms/TensorFlow_LinearRegression2.ipynb
+++ b/Stock_Algorithms/TensorFlow_LinearRegression2.ipynb
@@ -392,7 +392,7 @@
"source": [
"batch_size = 32\n",
"learning_rate = 0.003\n",
- "n_epoches = 6000"
+ "n_epochs = 6000"
],
"outputs": [],
"execution_count": 30,
@@ -420,7 +420,7 @@
{
"cell_type": "code",
"source": [
- "# converting train and test dataframes to matices to creat datasets\n",
+ "# converting train and test dataframes to matrices to create datasets\n",
"train_d = train_d.astype('float32').as_matrix()\n",
"test_d = test_d.astype('float32').as_matrix()"
],
@@ -553,7 +553,7 @@
"sess = tf.Session()\n",
"sess.run(tf.global_variables_initializer())\n",
"\n",
- "for epoch in range(n_epoches):\n",
+ "for epoch in range(n_epochs):\n",
" sess.run(train_init)\n",
" try:\n",
" # Loop until all elements have been consumed\n",
@@ -683,4 +683,4 @@
},
"nbformat": 4,
"nbformat_minor": 4
-}
\ No newline at end of file
+}
diff --git a/Stock_Algorithms/Time_Series_Decomposition_Random_Walks.ipynb b/Stock_Algorithms/Time_Series_Decomposition_Random_Walks.ipynb
index 0bb9ce3..b407384 100644
--- a/Stock_Algorithms/Time_Series_Decomposition_Random_Walks.ipynb
+++ b/Stock_Algorithms/Time_Series_Decomposition_Random_Walks.ipynb
@@ -379,7 +379,7 @@
"import statsmodels.api as sm\n",
"\n",
"rcParams['figure.figsize'] = 11, 9\n",
- "decomposed_volume = sm.tsa.seasonal_decompose(dataset[\"High\"],freq=360) # The frequncy is annual\n",
+ "decomposed_volume = sm.tsa.seasonal_decompose(dataset[\"High\"],freq=360) # The frequency is annual\n",
"figure = decomposed_volume.plot()\n",
"plt.show()"
],