Skip to content
This repository has been archived by the owner on Oct 3, 2020. It is now read-only.

Commit

Permalink
Refactor and add pre-commit config (#142)
Browse files Browse the repository at this point in the history
* add pre-commit config

* fix linting errors

* refactor into modules

* refactor into functions

* refactor into modules

* #139 test preemptible node mapping
  • Loading branch information
hjacobs authored Apr 10, 2020
1 parent fc8fd44 commit 8305141
Show file tree
Hide file tree
Showing 34 changed files with 1,359 additions and 943 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
.*.swp
*.egg*
__pycache__/
output/
.pytest_cache/
.coverage
.idea/
venv/
.vscode/
.mypy_cache
125 changes: 125 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
minimum_pre_commit_version: 1.21.0
repos:
# meta

- repo: meta
hooks:
- id: check-hooks-apply
- id: check-useless-excludes

- repo: https://codeberg.org/hjacobs/kube-manifest-lint
rev: 0.2.0
hooks:
- id: kube-manifest-lint
exclude: ^chart/kube-resource-report/templates/.*$

# formatters
- repo: https://github.com/asottile/reorder_python_imports
rev: v2.1.0
hooks:
- id: reorder-python-imports

- repo: https://github.com/ambv/black
rev: 19.10b0
hooks:
- id: black

- repo: https://github.com/asottile/pyupgrade
rev: v2.1.0
hooks:
- id: pyupgrade
stages: [push]

# linters
- repo: https://github.com/PyCQA/bandit
rev: 1.6.2
hooks:
- id: bandit
args: ["-x", "tests"]
stages: [push]

- repo: https://github.com/PyCQA/pydocstyle
rev: 5.0.2
hooks:
- id: pydocstyle
args: ["--ignore=D10,D21,D202"]

- repo: local
hooks:

- id: safety
name: safety
entry: safety
language: system
pass_filenames: false
args: ["check", "--bare"]
stages: [push]

- id: poetry
name: poetry
description: Validates the structure of the pyproject.toml file
entry: poetry check
language: system
pass_filenames: false
files: ^pyproject.toml$
stages: [push]

- repo: https://github.com/adrienverge/yamllint
rev: v1.21.0
hooks:
- id: yamllint
args: ["--strict", "-d", "{rules: {line-length: {max: 180}}}"]
#
exclude: >
(?x)^(
^chart/kube-resource-report/templates/.*$
)
#

- repo: https://github.com/pre-commit/mirrors-mypy
rev: v0.770
hooks:
- id: mypy

- repo: https://github.com/pryorda/dockerfilelint-precommit-hooks
rev: v0.1.0
hooks:
- id: dockerfilelint
stages: [commit] # required

# miscellaneous

- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.5.0
hooks:
- id: check-added-large-files
- id: check-docstring-first
- id: debug-statements
- id: end-of-file-fixer
- id: flake8
additional_dependencies: ["flake8-bugbear"]
- id: trailing-whitespace
- id: check-ast
- id: check-builtin-literals
- id: detect-private-key
- id: mixed-line-ending
- id: name-tests-test
args: ["--django"]

- repo: https://github.com/pre-commit/pygrep-hooks
rev: v1.5.1
hooks:
# - id: rst-backticks
- id: python-use-type-annotations
- id: python-no-log-warn
- id: python-no-eval
- id: python-check-mock-methods
- id: python-check-blanket-noqa

# commit-msg
# http://jorisroovers.com/gitlint/#using-gitlint-through-pre-commit

- repo: https://github.com/jorisroovers/gitlint
rev: v0.13.1
hooks:
- id: gitlint
6 changes: 2 additions & 4 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -12,16 +12,14 @@ install:

.PHONY:
lint: install
poetry run black --check kube_resource_report tests
poetry run flake8
poetry run mypy --ignore-missing-imports kube_resource_report/
poetry run pre-commit run --all-files

.PHONY:
test: install lint
poetry run coverage run --source=kube_resource_report -m py.test
poetry run coverage report

docker:
docker:
docker build --build-arg "VERSION=$(VERSION)" -t "$(IMAGE):$(TAG)" .
@echo 'Docker image $(IMAGE):$(TAG) can now be used.'

Expand Down
19 changes: 10 additions & 9 deletions kube_resource_report/cluster_discovery.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,15 @@
import re
import time
from pathlib import Path
from urllib.parse import urljoin

from typing import List
from urllib.parse import urljoin

import requests
import tokens
from pykube import HTTPClient
from pykube import KubeConfig
from requests.auth import AuthBase

from pykube import HTTPClient, KubeConfig

# default URL points to kubectl proxy
DEFAULT_CLUSTERS = "http://localhost:8001/"
CLUSTER_ID_INVALID_CHARS = re.compile("[^a-z0-9:-]")
Expand All @@ -22,16 +21,16 @@


def generate_cluster_id(url: str):
"""Generate some "cluster ID" from given API server URL"""
"""Generate some "cluster ID" from given API server URL."""
for prefix in ("https://", "http://"):
if url.startswith(prefix):
url = url[len(prefix) :]
return CLUSTER_ID_INVALID_CHARS.sub("-", url.lower()).strip("-")


class OAuthTokenAuth(AuthBase):
"""Dynamic authentication using the "tokens" library to load OAuth tokens from file
(potentially mounted from a Kubernetes secret)"""

"""Dynamic authentication using the "tokens" library to load OAuth tokens from file (potentially mounted from a Kubernetes secret)."""

def __init__(self, token_name):
self.token_name = token_name
Expand Down Expand Up @@ -118,8 +117,10 @@ def refresh(self):
)
self._clusters = clusters
self._last_cache_refresh = time.time()
except:
logger.exception(f"Failed to refresh from cluster registry {self._url}")
except Exception as e:
logger.exception(
f"Failed to refresh from cluster registry {self._url}: {e}"
)

def get_clusters(self):
now = time.time()
Expand Down
12 changes: 5 additions & 7 deletions kube_resource_report/main.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
import click
from .cluster_discovery import DEFAULT_CLUSTERS
from pathlib import Path
import logging
import os
import time
from pathlib import Path

import click

from .cluster_discovery import DEFAULT_CLUSTERS
from .report import generate_report


Expand Down Expand Up @@ -135,10 +136,7 @@ def main(
node_labels,
debug,
):
"""Kubernetes Resource Report
Generate a static HTML report to OUTPUT_DIR for all clusters in ~/.kube/config or Cluster Registry.
"""
"""Kubernetes Resource Report generates a static HTML report to OUTPUT_DIR for all clusters in ~/.kube/config or Cluster Registry."""

if debug:
logging.basicConfig(level=logging.DEBUG)
Expand Down
102 changes: 102 additions & 0 deletions kube_resource_report/metrics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
import collections
import logging

import pykube
from pykube.objects import APIObject
from pykube.objects import NamespacedAPIObject

from .utils import parse_resource

logger = logging.getLogger(__name__)


class NodeMetrics(APIObject):

"""
Kubernetes API object for Node metrics.
See https://github.com/kubernetes/community/blob/master/contributors/design-proposals/instrumentation/resource-metrics-api.md
"""

version = "metrics.k8s.io/v1beta1"
endpoint = "nodes"
kind = "NodeMetrics"


class PodMetrics(NamespacedAPIObject):

"""
Kubernetes API object for Pod metrics.
See https://github.com/kubernetes/community/blob/master/contributors/design-proposals/instrumentation/resource-metrics-api.md
"""

version = "metrics.k8s.io/v1beta1"
endpoint = "pods"
kind = "PodMetrics"


def get_ema(curr_value: float, prev_value: float, alpha: float = 1.0):
"""
Calculate the Exponential Moving Average.
More info about EMA: https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
The coefficient alpha represents the degree of weighting decrease, a constant smoothing
factor between 0 and 1. A higher alpha discounts older observations faster.
Alpha 1 - only the current observation.
Alpha 0 - only the previous observation.
Choosing the initial smoothed value - https://en.wikipedia.org/wiki/Exponential_smoothing#Choosing_the_initial_smoothed_value
"""
if prev_value is None:
# it is the first run, we do not have any information about the past
return curr_value

return prev_value + alpha * (curr_value - prev_value)


def get_node_usage(cluster, nodes: dict, prev_nodes: dict, alpha_ema: float):
try:
for node_metrics in NodeMetrics.objects(cluster.client):
key = node_metrics.name
node = nodes.get(key)
prev_node = prev_nodes.get(key, {})

if node:
usage: dict = collections.defaultdict(float)
prev_usage = prev_node.get("usage", {})

for k, v in node_metrics.obj.get("usage", {}).items():
curr_value = parse_resource(v)
prev_value = prev_usage.get(k)
usage[k] = get_ema(curr_value, prev_value, alpha_ema)
node["usage"] = usage
except Exception:
logger.exception("Failed to get node usage metrics")


def get_pod_usage(cluster, pods: dict, prev_pods: dict, alpha_ema: float):
try:
for pod_metrics in PodMetrics.objects(cluster.client, namespace=pykube.all):
key = (pod_metrics.namespace, pod_metrics.name)
pod = pods.get(key)
prev_pod = prev_pods.get(key, {})

if pod:
usage: dict = collections.defaultdict(float)
prev_usage = prev_pod.get("usage", {})

for container in pod_metrics.obj["containers"]:
for k, v in container.get("usage", {}).items():
usage[k] += parse_resource(v)

for k, v in usage.items():
curr_value = v
prev_value = prev_usage.get(k)
usage[k] = get_ema(curr_value, prev_value, alpha_ema)

pod["usage"] = usage
except Exception:
logger.exception("Failed to get pod usage metrics")
6 changes: 4 additions & 2 deletions kube_resource_report/output.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
import logging
import shutil

from pathlib import Path
from jinja2 import Environment, FileSystemLoader, select_autoescape

from jinja2 import Environment
from jinja2 import FileSystemLoader
from jinja2 import select_autoescape

from kube_resource_report import filters

Expand Down
4 changes: 2 additions & 2 deletions kube_resource_report/pricing.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,8 +258,8 @@ def generate_ec2_price_list():
and sw == "NA"
and "BoxUsage:" in usagetype
):
for k, v in entry["terms"]["OnDemand"].items():
for k_, v_ in v["priceDimensions"].items():
for _k, v in entry["terms"]["OnDemand"].items():
for _, v_ in v["priceDimensions"].items():
if v_["unit"] == "Hrs":
price = float(v_["pricePerUnit"]["USD"])
if price == 0:
Expand Down
Loading

0 comments on commit 8305141

Please sign in to comment.