Skip to content

Examples

This page contains practical examples of using PyConvexity for common energy modeling tasks.

Basic Network Creation

import pyconvexity as px

# Create a new database with schema
px.create_database_with_schema("my_model.db")

# Create a network
with px.database_context("my_model.db") as conn:
    network_req = px.CreateNetworkRequest(
        name="My Energy Network",
        description="Example renewable energy system",
        start_time="2024-01-01 00:00:00",
        end_time="2024-01-02 00:00:00",
        time_resolution="H"
    )
    network_id = px.create_network(conn, network_req)

    # Create carriers (energy types)
    ac_carrier = px.create_carrier(conn, network_id, "AC")

    # Create components
    bus_id = px.create_component(
        conn, network_id, "BUS", "Main Bus",
        latitude=40.7128, longitude=-74.0060,
        carrier_id=ac_carrier
    )

    # Set component attributes
    px.set_static_attribute(conn, bus_id, "v_nom", px.StaticValue(230.0))

    conn.commit()

print(f"✅ Created network {network_id} with bus {bus_id}")

Working with Timeseries Data

Setting Timeseries Data

import pyconvexity as px
import numpy as np

# Generate hourly solar generation profile
hours = 24
solar_profile = np.maximum(0, np.sin(np.linspace(0, np.pi, hours)) * 100)

# Set timeseries data (recommended high-level API)
px.set_timeseries(
    "my_model.db", 
    component_id=generator_id, 
    attribute_name="p_max_pu", 
    values=solar_profile
)

Getting Timeseries Data

# Get full timeseries
ts = px.get_timeseries("my_model.db", component_id=123, attribute_name="p")
print(f"Length: {ts.length}, Values: {ts.values[:5]}")

# Get a subset of the data
ts_subset = px.get_timeseries(
    "my_model.db", 123, "p", 
    start_index=100, end_index=200
)

# Sample large datasets
ts_sampled = px.get_timeseries(
    "my_model.db", 123, "p", 
    max_points=1000
)

# Get metadata without loading full data
metadata = px.get_timeseries_metadata("my_model.db", 123, "p")
print(f"Length: {metadata.length}, Unit: {metadata.unit}")

Batch Timeseries Operations

# Load multiple timeseries efficiently
requests = [
    {"component_id": 123, "attribute_name": "p"},
    {"component_id": 124, "attribute_name": "p"},
    {"component_id": 125, "attribute_name": "p", "scenario_id": 2}
]
timeseries_list = px.get_multiple_timeseries("my_model.db", requests)

NumPy Integration

# Convert to NumPy for analysis
ts = px.get_timeseries("my_model.db", 123, "p")
arr = px.timeseries_to_numpy(ts)
print(f"Mean: {arr.mean():.2f}, Std: {arr.std():.2f}")

# Create from NumPy array
import numpy as np
arr = np.random.normal(100, 10, 8760)  # Hourly data for a year
ts = px.numpy_to_timeseries(arr, unit="MW")
px.set_timeseries("my_model.db", 123, "p_set", ts)

PyPSA Integration

from pyconvexity.solvers.pypsa import build_pypsa_network

# Convert PyConvexity network to PyPSA
with px.database_context("my_model.db") as conn:
    pypsa_network = build_pypsa_network(conn, network_id)

    # Run optimization
    pypsa_network.optimize()

    # Access results
    print("Generator dispatch:")
    print(pypsa_network.generators_t.p)

Excel Import/Export

from pyconvexity.io.excel_exporter import ExcelModelExporter
from pyconvexity.io.excel_importer import ExcelModelImporter

# Export to Excel
with px.database_context("my_model.db") as conn:
    exporter = ExcelModelExporter()
    exporter.export_network(conn, network_id, "my_model.xlsx")

# Import from Excel
with px.database_context("imported_model.db") as conn:
    importer = ExcelModelImporter()
    network_id = importer.import_network(conn, "my_model.xlsx")

Validation and Error Handling

# Validate timeseries alignment
result = px.validate_timeseries_alignment("my_model.db", network_id, values)
if not result["is_valid"]:
    print(f"Validation issues: {result['issues']}")

# Handle errors gracefully
try:
    ts = px.get_timeseries("my_model.db", 999, "p")  # Non-existent component
except px.ComponentNotFound as e:
    print(f"Component not found: {e}")

Performance Tips

Efficient Data Loading

# Use batch operations for multiple timeseries
requests = [{"component_id": i, "attribute_name": "p"} for i in range(100, 200)]
all_timeseries = px.get_multiple_timeseries("my_model.db", requests, max_points=1000)

# Sample large datasets instead of loading everything
ts_sampled = px.get_timeseries("my_model.db", 123, "p", max_points=5000)

Database Optimization

# Optimize database performance
px.optimize_database("my_model.db")

# Check if optimization is needed
if px.should_optimize_database("my_model.db"):
    px.vacuum_database("my_model.db")