Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dev #2

Open
wants to merge 20 commits into
base: main
Choose a base branch
from
597 changes: 597 additions & 0 deletions .ipynb_checkpoints/CALCE_ed-checkpoint.ipynb

Large diffs are not rendered by default.

1,126 changes: 1,126 additions & 0 deletions .ipynb_checkpoints/MLP-checkpoint.ipynb

Large diffs are not rendered by default.

1,172 changes: 1,172 additions & 0 deletions .ipynb_checkpoints/MLP_ed-checkpoint.ipynb

Large diffs are not rendered by default.

842 changes: 842 additions & 0 deletions .ipynb_checkpoints/RNN & LSTM-checkpoint.ipynb

Large diffs are not rendered by default.

112 changes: 112 additions & 0 deletions .ipynb_checkpoints/RNN & LSTM_ed-checkpoint.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "1100232a",
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import random\n",
"import math\n",
"import os\n",
"import matplotlib.pyplot as plt\n",
"import matplotlib.cm as cm\n",
"import pandas as pd\n",
"import glob\n",
"import torch\n",
"import torch.nn as nn\n",
"import torch.nn.functional as F\n",
"import torchvision\n",
"%matplotlib inline\n",
"\n",
"from math import sqrt\n",
"from sklearn.metrics import mean_absolute_error\n",
"from sklearn.metrics import mean_squared_error"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "1e76422e",
"metadata": {},
"outputs": [],
"source": [
"def drop_outlier(array,count,bins):\n",
" index = []\n",
" range_ = np.arange(1,count,bins)\n",
" for i in range_[:-1]:\n",
" array_lim = array[i:i+bins]\n",
" sigma = np.std(array_lim)\n",
" mean = np.mean(array_lim)\n",
" th_max,th_min = mean + sigma*2, mean - sigma*2\n",
" idx = np.where((array_lim < th_max) & (array_lim > th_min))\n",
" idx = idx[0] + i\n",
" index.extend(list(idx))\n",
" return np.array(index)\n",
"\n",
"def build_sequences(text, window_size):\n",
" #text:list of capacity\n",
" x, y = [],[]\n",
" for i in range(len(text) - window_size):\n",
" sequence = text[i:i+window_size]\n",
" target = text[i+1:i+1+window_size]\n",
"\n",
" x.append(sequence)\n",
" y.append(target)\n",
"\n",
" return np.array(x), np.array(y)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "02188eb3",
"metadata": {},
"outputs": [],
"source": [
"# 留一评估:一组数据为测试集,其他所有数据全部拿来训练\n",
"def get_train_test(data_dict, name, window_size=8):\n",
" data_sequence=data_dict[name]['capacity']\n",
" train_data, test_data = data_sequence[:window_size+1], data_sequence[window_size+1:]\n",
" train_x, train_y = build_sequences(text=train_data, window_size=window_size)\n",
" for k, v in data_dict.items():\n",
" if k != name:\n",
" data_x, data_y = build_sequences(text=v['capacity'], window_size=window_size)\n",
" train_x, train_y = np.r_[train_x, data_x], np.r_[train_y, data_y]\n",
" \n",
" return train_x, train_y, list(train_data), list(test_data)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fd3cafbb",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Loading