rathouse migration #49

Merged
forest merged 16 commits from rathouse into main 2024-02-12 01:15:05 +00:00
16 changed files with 1423 additions and 61 deletions

View File

@ -17,11 +17,11 @@ lazy-object-proxy = "==1.4.3"
MarkupSafe = "==1.1.1" MarkupSafe = "==1.1.1"
mccabe = "==0.6.1" mccabe = "==0.6.1"
nanoid = "==2.0.0" nanoid = "==2.0.0"
psycopg2 = "==2.8.5" psycopg = "==3.1.14"
psycopg_pool = "==3.2.0"
pylint = "==2.5.2" pylint = "==2.5.2"
six = "==1.14.0" six = "==1.14.0"
toml = "==0.10.0" toml = "==0.10.0"
typed-ast = "==1.4.1"
Werkzeug = "==1.0.1" Werkzeug = "==1.0.1"
wrapt = "==1.12.1" wrapt = "==1.12.1"
stripe = "*" stripe = "*"

1315
Pipfile.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -28,11 +28,18 @@ pipenv run flask run
``` ```
``` ```
# these are some notes I kept related to pipenv troubleshooting: # these are some notes I kept related to pipenv troubleshooting:
python3 -m pip uninstall pipenv python3 -m pip uninstall pipenv
python3 -m pip uninstall virtualenv python3 -m pip uninstall virtualenv
python3 -m pip install pipenv python3 -m pip install pipenv
python3 -m pipenv install python3 -m pipenv install
...
apt get install python3.11 python3.11-distutils
pipenv --venv
pipenv --rm
``` ```
Interested in learning more? How about a trip to the the `docs/` folder: Interested in learning more? How about a trip to the the `docs/` folder:

View File

@ -8,7 +8,6 @@ import click
from flask.cli import with_appcontext from flask.cli import with_appcontext
from flask import Blueprint from flask import Blueprint
from flask import current_app from flask import current_app
from psycopg2 import ProgrammingError
from flask_mail import Message from flask_mail import Message
from capsulflask.db import get_model from capsulflask.db import get_model

View File

@ -1,9 +1,9 @@
import psycopg2 import psycopg
import psycopg_pool
import re import re
import sys import sys
from os import listdir from os import listdir
from os.path import isfile, join from os.path import isfile, join
from psycopg2 import pool
from flask import current_app from flask import current_app
from flask import g from flask import g
@ -13,10 +13,10 @@ from capsulflask.shared import my_exec_info_message
def init_app(app, is_running_server): def init_app(app, is_running_server):
try: try:
app.config['PSYCOPG2_CONNECTION_POOL'] = psycopg2.pool.SimpleConnectionPool( app.config['PSYCOPG_CONNECTION_POOL'] = psycopg_pool.ConnectionPool(
1, conninfo=app.config['POSTGRES_CONNECTION_PARAMETERS'],
20, min_size=1,
app.config['POSTGRES_CONNECTION_PARAMETERS'] max_size=20
) )
except: except:
app.logger.error(f""" app.logger.error(f"""
@ -45,7 +45,7 @@ def init_app(app, is_running_server):
with open(join(schemaMigrationsPath, filename), 'rb') as file: with open(join(schemaMigrationsPath, filename), 'rb') as file:
schemaMigrations[key] = file.read().decode("utf8") schemaMigrations[key] = file.read().decode("utf8")
connection = app.config['PSYCOPG2_CONNECTION_POOL'].getconn() connection = app.config['PSYCOPG_CONNECTION_POOL'].getconn()
hasSchemaVersionTable = False hasSchemaVersionTable = False
actionWasTaken = False actionWasTaken = False
@ -119,7 +119,7 @@ def init_app(app, is_running_server):
cursor.close() cursor.close()
app.config['PSYCOPG2_CONNECTION_POOL'].putconn(connection) app.config['PSYCOPG_CONNECTION_POOL'].putconn(connection)
app.logger.info("{} current schemaVersion: \"{}\"".format( app.logger.info("{} current schemaVersion: \"{}\"".format(
("schema migration completed." if actionWasTaken else "schema is already up to date. "), schemaVersion ("schema migration completed." if actionWasTaken else "schema is already up to date. "), schemaVersion
@ -130,7 +130,7 @@ def init_app(app, is_running_server):
def get_model() -> DBModel: def get_model() -> DBModel:
if 'db_model' not in g: if 'db_model' not in g:
connection = current_app.config['PSYCOPG2_CONNECTION_POOL'].getconn() connection = current_app.config['PSYCOPG_CONNECTION_POOL'].getconn()
cursor = connection.cursor() cursor = connection.cursor()
g.db_model = DBModel(connection, cursor) g.db_model = DBModel(connection, cursor)
return g.db_model return g.db_model
@ -141,5 +141,5 @@ def close_db(e=None):
if db_model is not None: if db_model is not None:
db_model.cursor.close() db_model.cursor.close()
current_app.config['PSYCOPG2_CONNECTION_POOL'].putconn(db_model.connection) current_app.config['PSYCOPG_CONNECTION_POOL'].putconn(db_model.connection)

View File

@ -1,8 +1,7 @@
import re import re
# I was never able to get this type hinting to work correctly import psycopg
# from psycopg2.extensions import connection as Psycopg2Connection, cursor as Psycopg2Cursor
from nanoid import generate from nanoid import generate
from flask import current_app from flask import current_app
from typing import List from typing import List
@ -11,8 +10,7 @@ from capsulflask.shared import OnlineHost
class DBModel: class DBModel:
#def __init__(self, connection: Psycopg2Connection, cursor: Psycopg2Cursor): def __init__(self, connection: psycopg.Connection, cursor: psycopg.Cursor):
def __init__(self, connection, cursor):
self.connection = connection self.connection = connection
self.cursor = cursor self.cursor = cursor
@ -67,7 +65,7 @@ class DBModel:
raise ValueError(f"host_id \"{host_id}\" must match \"^[a-zA-Z0-9_-]+\"") raise ValueError(f"host_id \"{host_id}\" must match \"^[a-zA-Z0-9_-]+\"")
# I kept getting "TypeError: not all arguments converted during string formatting" # I kept getting "TypeError: not all arguments converted during string formatting"
# when I was trying to mix python string templating with psycopg2 safe parameter passing. # when I was trying to mix python string templating with psycopg safe parameter passing.
# so i just did all of it in python and check the user-provided data for safety myself (no sql injection). # so i just did all of it in python and check the user-provided data for safety myself (no sql injection).
self.cursor.execute(f"{query} AND host = '{host_id}'") self.cursor.execute(f"{query} AND host = '{host_id}'")
@ -360,7 +358,7 @@ class DBModel:
raise ValueError(f"host_id \"{host_id}\" must match \"^[a-zA-Z0-9_-]+\"") raise ValueError(f"host_id \"{host_id}\" must match \"^[a-zA-Z0-9_-]+\"")
# I kept getting "TypeError: not all arguments converted during string formatting" # I kept getting "TypeError: not all arguments converted during string formatting"
# when I was trying to mix python query string templating with psycopg2 safe parameter passing. # when I was trying to mix python query string templating with psycopg safe parameter passing.
# so i just did all of it in python and check the user-provided data for safety myself (no sql injection). # so i just did all of it in python and check the user-provided data for safety myself (no sql injection).
self.cursor.execute(f"{query} WHERE hosts.id = '{host_id}'") self.cursor.execute(f"{query} WHERE hosts.id = '{host_id}'")
@ -482,26 +480,22 @@ class DBModel:
return None return None
def claim_operation(self, operation_id: int, host_id: str) -> bool: def claim_operation(self, operation_id: int, host_id: str) -> bool:
# have to make a new cursor to set isolation level
# cursor = self.connection.cursor() # https://www.psycopg.org/psycopg3/docs/basic/transactions.html#transaction-contexts
# self.cursor.execute("SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;") with self.connection.transaction():
# psycopg2.errors.ActiveSqlTransaction: SET TRANSACTION ISOLATION LEVEL must be called before any query self.cursor.execute("""
self.cursor.execute(""" UPDATE host_operation SET assignment_status = 'assigned'
BEGIN TRANSACTION; WHERE host = %s AND operation = %s AND operation != (
UPDATE host_operation SET assignment_status = 'assigned' SELECT COALESCE(
WHERE host = %s AND operation = %s AND operation != ( (SELECT operation FROM host_operation WHERE operation = %s AND assignment_status = 'assigned'),
SELECT COALESCE( -1
(SELECT operation FROM host_operation WHERE operation = %s AND assignment_status = 'assigned'), ) as already_assigned_operation_id
-1 );
) as already_assigned_operation_id """, (host_id, operation_id, operation_id))
); to_return = self.cursor.rowcount != 0
COMMIT TRANSACTION;
""", (host_id, operation_id, operation_id))
to_return = self.cursor.rowcount != 0 # COMMIT is executed at the end of the block.
# The connection is in idle state again.
self.connection.commit()
#cursor.close()
return to_return return to_return
@ -510,5 +504,6 @@ class DBModel:
def set_broadcast_message(self, message): def set_broadcast_message(self, message):
self.cursor.execute("DELETE FROM broadcast_message; INSERT INTO broadcast_message (message) VALUES (%s)", (message, )) self.cursor.execute("DELETE FROM broadcast_message")
self.cursor.execute("INSERT INTO broadcast_message (message) VALUES (%s)", (message, ))
self.connection.commit() self.connection.commit()

View File

@ -165,7 +165,7 @@ def can_claim_create(payload, host_id) -> (str, str):
# hard-code the network name and IP for now until we can implement https://git.cyberia.club/cyberia/capsul-flask/issues/11 # hard-code the network name and IP for now until we can implement https://git.cyberia.club/cyberia/capsul-flask/issues/11
# enable static IP -> capsul mapping via libvirt (manage MAC addresses) # enable static IP -> capsul mapping via libvirt (manage MAC addresses)
payload["network_name"] = 'public3' payload["network_name"] = 'public4'
payload["public_ipv4"] = "" payload["public_ipv4"] = ""
return payload, "" return payload, ""

View File

@ -1,6 +1,7 @@
from flask import Blueprint from flask import Blueprint
from flask import render_template from flask import render_template
from flask import current_app from flask import current_app
from flask import make_response
from capsulflask.db import get_model from capsulflask.db import get_model
@ -8,7 +9,9 @@ bp = Blueprint("landing", __name__, url_prefix="/")
@bp.route("/") @bp.route("/")
def index(): def index():
return render_template("index.html") resp = make_response(render_template("index.html"), 200)
resp.headers['Cache-Control'] = 'no-cache'
return resp
@bp.route("/pricing") @bp.route("/pricing")
def pricing(): def pricing():

View File

@ -87,11 +87,11 @@ def get_plot_bytes(metric, capsulid, duration, size):
# Prometheus queries to pull metrics for VMs # Prometheus queries to pull metrics for VMs
metric_queries = dict( metric_queries = dict(
cpu=f"irate(libvirtd_domain_info_cpu_time_seconds_total{{domain='{capsulid}'}}[30s])", cpu=f"irate(libvirt_domain_info_cpu_time_seconds_total{{domain='{capsulid}'}}[30s])",
memory=f"libvirtd_domain_info_maximum_memory_bytes{{domain='{capsulid}'}}-libvirtd_domain_info_memory_unused_bytes{{domain='{capsulid}'}}", memory=f"libvirt_domain_info_maximum_memory_bytes{{domain='{capsulid}'}}-libvirtd_domain_info_memory_unused_bytes{{domain='{capsulid}'}}",
network_in=f"rate(libvirtd_domain_interface_stats_receive_bytes_total{{domain='{capsulid}'}}[{interval_seconds}s])", network_in=f"rate(libvirt_domain_interface_stats_receive_bytes_total{{domain='{capsulid}'}}[{interval_seconds}s])",
network_out=f"rate(libvirtd_domain_interface_stats_transmit_bytes_total{{domain='{capsulid}'}}[{interval_seconds}s])", network_out=f"rate(libvirt_domain_interface_stats_transmit_bytes_total{{domain='{capsulid}'}}[{interval_seconds}s])",
disk=f"rate(libvirtd_domain_block_stats_read_bytes_total{{domain='{capsulid}'}}[{interval_seconds}s])%2Brate(libvirtd_domain_block_stats_write_bytes_total{{domain='{capsulid}'}}[{interval_seconds}s])", disk=f"rate(libvirt_domain_block_stats_read_bytes_total{{domain='{capsulid}'}}[{interval_seconds}s])%2Brate(libvirt_domain_block_stats_write_bytes_total{{domain='{capsulid}'}}[{interval_seconds}s])",
) )
# These represent the top of the graph for graphs that are designed to be viewed at a glance. # These represent the top of the graph for graphs that are designed to be viewed at a glance.
@ -112,8 +112,10 @@ def get_plot_bytes(metric, capsulid, duration, size):
prometheus_range_url = f"{current_app.config['PROMETHEUS_URL']}/api/v1/query_range" prometheus_range_url = f"{current_app.config['PROMETHEUS_URL']}/api/v1/query_range"
#print(f"{prometheus_range_url}?query={metric_queries[metric]}&{range_and_interval}") #print(f"{prometheus_range_url}?query={metric_queries[metric]}&{range_and_interval}")
try:
prometheus_response = requests.get(f"{prometheus_range_url}?query={metric_queries[metric]}&{range_and_interval}") prometheus_response = requests.get(f"{prometheus_range_url}?query={metric_queries[metric]}&{range_and_interval}")
except:
return (502, None)
if prometheus_response.status_code >= 300: if prometheus_response.status_code >= 300:
return (502, None) return (502, None)
@ -145,7 +147,8 @@ def draw_plot_png_bytes(data, scale, size_x=3, size_y=1):
#current_app.logger.info(json.dumps(data, indent=4, default=str)) #current_app.logger.info(json.dumps(data, indent=4, default=str))
pyplot.style.use("seaborn-dark") # https://stackoverflow.com/questions/74716259/the-seaborn-styles-shipped-by-matplotlib-are-deprecated-since-3-6
pyplot.style.use("seaborn-v0_8-dark")
fig, my_plot = pyplot.subplots(figsize=(size_x, size_y)) fig, my_plot = pyplot.subplots(figsize=(size_x, size_y))
# x=range(1, 15) # x=range(1, 15)
@ -214,7 +217,7 @@ def draw_plot_png_bytes(data, scale, size_x=3, size_y=1):
my_plot.fill_between( x, max_value, color=bg_color, alpha=0.13) my_plot.fill_between( x, max_value, color=bg_color, alpha=0.13)
my_plot.fill_between( x, y, color=highlight_color, alpha=0.3) my_plot.fill_between( x, y, color=highlight_color, alpha=0.3)
my_plot.plot(x, y, 'r-', color=highlight_color) my_plot.plot(x, y, color=highlight_color)
if size_y < 4: if size_y < 4:
my_plot.set_yticks([0, scale]) my_plot.set_yticks([0, scale])
@ -257,4 +260,4 @@ def color_gradient(value):
if value < 0.5: if value < 0.5:
return lerp_rgb_tuples(green, blue, value*2) return lerp_rgb_tuples(green, blue, value*2)
else: else:
return lerp_rgb_tuples(blue, red, (value-0.5)*2) return lerp_rgb_tuples(blue, red, (value-0.5)*2)

View File

@ -3,7 +3,12 @@
# check available RAM and IPv4s # check available RAM and IPv4s
ram_bytes_to_allocate="$1" ram_bytes_to_allocate="$1"
ram_bytes_available=$(grep -E "^(size|memory_available_bytes)" /proc/spl/kstat/zfs/arcstats | awk '{sum+=$3} END {printf "%.0f", sum}') if [ -d /proc/spl/kstat/zfs ]; then
ram_bytes_available=$(grep -E "^(size|memory_available_bytes)" /proc/spl/kstat/zfs/arcstats | awk '{sum+=$3} END {printf "%.0f", sum}')
else
ram_bytes_available=$(grep 'MemAvailable' /proc/meminfo | grep 'kB' | awk '{ printf "%.0f", $2*1000 }')
fi
ram_bytes_remainder="$((ram_bytes_available - ram_bytes_to_allocate))" ram_bytes_remainder="$((ram_bytes_available - ram_bytes_to_allocate))"
if echo "$ram_bytes_to_allocate" | grep -vqE "^[0-9]+$"; then if echo "$ram_bytes_to_allocate" | grep -vqE "^[0-9]+$"; then
@ -20,11 +25,18 @@ if [ "$ram_bytes_remainder" -le $((20 * 1024 * 1024 * 1024)) ]; then
fi fi
ipv4_limit=61 ipv4_limit=61
total_addresses_used=$(virsh net-dhcp-leases public3 | grep -E '.+' | tail -n +3 | wc -l) total_addresses_used=$(virsh net-dhcp-leases public4 | grep -E '.+' | tail -n +3 | wc -l)
if [ "$total_addresses_used" -ge "$ipv4_limit" ]; then if [ "$total_addresses_used" -ge "$ipv4_limit" ]; then
echo "IPv4 address limit reached" echo "IPv4 address limit reached"
exit 1 exit 1
fi fi
disk_kb_free="$(df -Bk | grep -E '/tank$' | awk '{ print $4 }' | sed -E 's/[kK]//')"
if [ "$disk_kb_free" -le $((200 * 1024 * 1024)) ]; then
echo "VM is requesting disk when $(hostname -f) has only 200GB available."
exit 1
fi
echo "yes" echo "yes"

View File

@ -39,7 +39,7 @@
</div> </div>
</nav> </nav>
{% for message in get_flashed_messages() %} {% for message in get_flashed_messages() %}
<div class="flash">{{ message }}</div> <div class="flash green">{{ message }}</div>
{% endfor %} {% endfor %}
{% block custom_flash %}{% endblock %} {% block custom_flash %}{% endblock %}
<main> <main>

View File

@ -8,6 +8,17 @@
{% block subcontent %} {% block subcontent %}
<p> <p>
<ul> <ul>
<li>2024-02-11: EPIC emergency server migration 🤯
<ul>
<li>Baikal (our old server) could no longer handle the load, was constantly crashing</li>
<li>Rathouse (our NEW server) was already racked up and ready to go</li>
<li>We wanted to wait until we could get Virtual Machine disk snapshots working on the new system before we migrated... but real life had other plans for us</li>
<li>We no longer have automatic backups. Sorry. We are working on it.</li>
<li>On the bright side, the new server is faster, has much more reliable disks,
and they are configured in a way that should give you better performance.</li>
<li>Capsul was fully down for about a day and a half.</li>
</ul>
</li>
<li>2022-07-18: Add NixOS support</li> <li>2022-07-18: Add NixOS support</li>
<li>2022-02-11: Added the <span class="code">/add-ssh-key-to-existing-capsul</span> page <li>2022-02-11: Added the <span class="code">/add-ssh-key-to-existing-capsul</span> page
<ul> <ul>

View File

@ -19,7 +19,7 @@
<p> <p>
<ul> <ul>
<li>Low friction: simply log in with your email address and fund your account with Credit/Debit or Cryptocurrency</li> <li>Low friction: simply log in with your email address and fund your account with Credit/Debit or Cryptocurrency</li>
<li>All root disks are backed up at no charge</li> <li><s>All root disks are backed up at no charge</s> This is being worked on</li>
<li>All storage is fast, local, and solid-state</li> <li>All storage is fast, local, and solid-state</li>
<li>All network connections are low latency</li> <li>All network connections are low latency</li>
<li>Supported by amazing volunteers from Cyberia Computer Club</li> <li>Supported by amazing volunteers from Cyberia Computer Club</li>

View File

@ -7,7 +7,7 @@ Capsul has a ["hub and spoke" architecture](./architecture.md). The "Hub" runs t
On your spoke (see [Architecture](./architecture.md) You'll need `libvirtd`, `dnsmasq`, and `qemu-kvm`, plus a `/tank` diectory with some operating system images in it: On your spoke (see [Architecture](./architecture.md) You'll need `libvirtd`, `dnsmasq`, and `qemu-kvm`, plus a `/tank` diectory with some operating system images in it:
``` ```
sudo apt install --no-install-recommends git dnsmasq qemu-system-x86 libvirt-clients libvirt-daemon-system virtinst sudo apt install git dnsmasq qemu-system-x86 libvirt-clients libvirt-daemon-system virtinst cloud-image-utils
sudo mkdir -p /var/www /tank/{vm,img,config} sudo mkdir -p /var/www /tank/{vm,img,config}
sudo mkdir -p /tank/img/debian/10 sudo mkdir -p /tank/img/debian/10
cd !$ cd !$

View File

@ -2,10 +2,10 @@
## <a name="manually"></a>Manually ## <a name="manually"></a>Manually
Ensure you have the pre-requisites for the psycopg2 Postgres database adapter package: Ensure you have the pre-requisites for the psycopg Postgres database adapter package:
```sh ```sh
sudo apt install python3-dev libpq-dev sudo apt install gcc python3-dev libpq-dev
pg_config --version pg_config --version
``` ```
@ -21,12 +21,29 @@ Create python virtual environment and install packages:
pipenv install pipenv install
``` ```
Run an instance of Postgres (I used docker for this, you can use whatever you want, point is its listening on `localhost:5432`): Install and configure Postgres:
```sh ```sh
docker run --rm -it -e POSTGRES_PASSWORD=dev -p 5432:5432 postgres sudo apt install -y postgresql
sudo -u postgres psql
psql (15.5 (Debian 15.5-0+deb12u1))
Type "help" for help.
postgres=# create database "capsul-flask";
CREATE DATABASE
postgres=# create user "capsul-flask" WITH PASSWORD 'blah';
CREATE ROLE
postgres=# alter database "capsul-flask" owner to "capsul-flask";
ALTER DATABASE
postgres=# grant all privileges on database "capsul-flask" to "capsul-flask";
GRANT
postgres=# quit
``` ```
> NOTE: you can also use docker for this if you wish. See the postgres docker hub page for more details.
> `docker run --rm -it -e POSTGRES_PASSWORD=dev -p 5432:5432 postgres`
Run the app Run the app
```sh ```sh

View File

@ -15,7 +15,7 @@ include_package_data = true
install_requires = install_requires =
Flask Flask
Flask-Mail Flask-Mail
psycopg2 psycopg
nanoid nanoid
matplotlib matplotlib
stripe stripe