diff --git a/src/dash_tools/layout_helper.py b/src/dash_tools/layout_helper.py
new file mode 100644
index 0000000000000000000000000000000000000000..097c3aaa1f02125c629599031f0e783ce5d67a9b
--- /dev/null
+++ b/src/dash_tools/layout_helper.py
@@ -0,0 +1,23 @@
+from dash import html
+
+def create_collapsible_section(title, content, is_open=True):
+    return html.Div([
+        html.Div([
+            html.H3(title, style={'display': 'inline-block', 'marginRight': '10px'}),
+            html.Button(
+                '▼' if is_open else '▶',
+                id={'type': 'collapse-button', 'section': title},
+                style={
+                    'border': 'none',
+                    'background': 'none',
+                    'fontSize': '20px',
+                    'cursor': 'pointer'
+                }
+            )
+        ], style={'marginBottom': '10px'}),
+        html.Div(
+            content,
+            id={'type': 'collapse-content', 'section': title},
+            style={'display': 'block' if is_open else 'none'}
+        )
+    ])
\ No newline at end of file
diff --git a/src/dash_tools/style_configs.py b/src/dash_tools/style_configs.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b7dfac4ef0b76ed6111a4064caf76bc08cd5af6
--- /dev/null
+++ b/src/dash_tools/style_configs.py
@@ -0,0 +1,390 @@
+import plotly.graph_objects as go
+from dash import html, dash_table
+import plotly.graph_objects as go
+from plotly.subplots import make_subplots
+import datetime
+from datetime import timedelta, datetime
+import plotly.express as px
+import pandas as pd
+
+def create_log_table(logs_data):
+    """
+    Creates a configured DataTable for logging display
+    
+    Args:
+        logs_data: List of dictionaries containing log data
+        
+    Returns:
+        dash_table.DataTable: Configured table for log display
+    """
+    columns = [
+        {'name': 'Time', 'id': 'timestamp'},
+        {'name': 'Level', 'id': 'level'},
+        {'name': 'Message', 'id': 'message'},
+        {'name': 'Exception', 'id': 'exception'}
+    ]
+    
+    style_data_conditional = [
+        {
+            'if': {'filter_query': '{level} = "ERROR"'},
+            'backgroundColor': '#ffebee',
+            'color': '#c62828'
+        },
+        {
+            'if': {'filter_query': '{level} = "WARNING"'},
+            'backgroundColor': '#fff3e0',
+            'color': '#ef6c00'
+        }
+    ]
+    
+    style_table = {
+        'overflowX': 'auto'
+    }
+    
+    style_cell = {
+        'textAlign': 'left',
+        'padding': '8px',
+        'whiteSpace': 'normal',
+        'height': 'auto',
+        'fontSize': '12px',
+    }
+    
+    return dash_table.DataTable(
+        columns=columns,
+        data=logs_data,
+        style_data_conditional=style_data_conditional,
+        style_table=style_table,
+        style_cell=style_cell
+    )
+
+
+
+
+def create_historical_plot(df_historical, model_name):
+    """
+    Creates a plotly figure for historical sensor data
+    
+    Args:
+        df_historical: DataFrame with sensor measurements
+        model_name: String name of the model
+    
+    Returns:
+        plotly.graph_objects.Figure: Configured plot
+    """
+    fig = go.Figure()
+    
+    # Add a trace for each sensor
+    for column in df_historical.columns:
+        fig.add_trace(
+            go.Scatter(
+                x=df_historical.index,
+                y=df_historical[column],
+                name=column,
+                mode='lines',
+                hovertemplate='%{y:.2f}<extra>%{x}</extra>'
+            )
+        )
+    
+    # Update layout
+    fig.update_layout(
+        title=f'Sensor Measurements - Last 144 Hours for {model_name}',
+        xaxis_title='Time',
+        yaxis_title='Value',
+        height=600,
+        showlegend=True,
+        legend=dict(
+            yanchor="top",
+            y=0.99,
+            xanchor="left",
+            x=1.05
+        ),
+        margin=dict(r=150)
+    )
+    
+    return fig
+
+def create_historical_table(df_historical):
+    """
+    Creates a formatted DataTable for historical sensor data
+    
+    Args:
+        df_historical: DataFrame with sensor measurements
+    
+    Returns:
+        dash.html.Div: Div containing configured DataTable
+    """
+    df_table = df_historical.reset_index()
+    df_table['tstamp'] = df_table['tstamp'].dt.strftime('%Y-%m-%d %H:%M')
+    
+    # Table styles
+    style_table = {
+        'maxHeight': '1200px',
+        'maxWidth': '1600px',
+        'overflowY': 'auto',
+        'width': '100%'
+    }
+    
+    style_cell = {
+        'textAlign': 'right',
+        'padding': '5px',
+        'minWidth': '100px',
+        'whiteSpace': 'normal',
+        'fontSize': '12px',
+    }
+    
+    style_header = {
+        'backgroundColor': '#f4f4f4',
+        'fontWeight': 'bold',
+        'textAlign': 'center',
+    }
+    
+    # Create conditional styles for blank values and timestamp
+    style_data_conditional = [
+        {
+            'if': {
+                'filter_query': '{{{}}} is blank'.format(col),
+                'column_id': col
+            },
+            'backgroundColor': '#ffebee',
+            'color': '#c62828'
+        } for col in df_table.columns if col != 'tstamp'
+    ] + [
+        {
+            'if': {'column_id': 'tstamp'},
+            'textAlign': 'left',
+            'minWidth': '150px'
+        }
+    ]
+    
+    # Create table columns configuration
+    columns = [{'name': col, 'id': col} for col in df_table.columns]
+    
+    return html.Div(
+        dash_table.DataTable(
+            id='historical-table',
+            columns=columns,
+            data=df_table.to_dict('records'),
+            style_table=style_table,
+            style_cell=style_cell,
+            style_header=style_header,
+            style_data_conditional=style_data_conditional,
+            fixed_columns={'headers': True, 'data': 1},
+            sort_action='native',
+            sort_mode='single',
+        )
+    )
+
+
+
+def create_inp_forecast_status_table(df_forecast):
+    """
+    Creates a status table showing availability of forecasts for each sensor at 3-hour intervals
+    
+    Args:
+        df_forecast: DataFrame with columns tstamp, sensor_name containing forecast data
+        
+    Returns:
+        dash.html.Div: Div containing configured DataTable
+    """
+    # Get unique sensor names
+    sensor_names = sorted(df_forecast['sensor_name'].unique())
+    
+    # Create index of last 48 hours at 3-hour intervals
+    last_required_hour = datetime.now().replace(minute=0, second=0, microsecond=0)
+    while last_required_hour.hour % 3 != 0:
+        last_required_hour -= timedelta(hours=1)
+            
+            
+    time_range = pd.date_range(
+        end=last_required_hour,
+        periods=17,  # 48 hours / 3 + 1
+        freq='3h'
+    )
+    
+    # Initialize result DataFrame with NaN
+    status_df = pd.DataFrame(index=time_range, columns=sensor_names)
+    
+    # For each sensor and timestamp, check if data exists
+    for sensor in sensor_names:
+        sensor_data = df_forecast[df_forecast['sensor_name'] == sensor]
+        for timestamp in time_range:
+            #TODO genauer hier
+            has_data = any(
+                (sensor_data['tstamp'] <= timestamp) & 
+                (sensor_data['tstamp'] + timedelta(hours=48) >= timestamp)
+            )
+            status_df.loc[timestamp, sensor] = 'OK' if has_data else 'Missing'
+    
+    # Reset index to make timestamp a column
+    status_df = status_df.reset_index()
+    status_df['index'] = status_df['index'].dt.strftime('%Y-%m-%d %H:%M')
+    
+    # Configure table styles
+    style_data_conditional = [
+        {
+            'if': {
+                'filter_query': '{{{col}}} = "Missing"'.format(col=col),
+                'column_id': col
+            },
+            'backgroundColor': '#ffebee',
+            'color': '#c62828'
+        } for col in sensor_names
+    ] + [
+        {
+            'if': {
+                'filter_query': '{{{col}}} = "OK"'.format(col=col),
+                'column_id': col
+            },
+            'backgroundColor': '#e8f5e9',
+            'color': '#2e7d32'
+        } for col in sensor_names
+    ]
+    
+    return html.Div(
+        dash_table.DataTable(
+            id='forecast-status-table',
+            columns=[
+                {'name': 'Timestamp', 'id': 'index'},
+                *[{'name': col, 'id': col} for col in sensor_names]
+            ],
+            data=status_df.to_dict('records'),
+            style_table={
+                'overflowX': 'auto',
+                'width': '100%'
+            },
+            style_cell={
+                'textAlign': 'center',
+                'padding': '5px',
+                'minWidth': '100px',
+                'fontSize': '12px',
+            },
+            style_header={
+                'backgroundColor': '#f4f4f4',
+                'fontWeight': 'bold',
+                'textAlign': 'center',
+            },
+            style_data_conditional=style_data_conditional,
+            fixed_columns={'headers': True, 'data': 1},
+            sort_action='native',
+            sort_mode='single',
+        )
+    )
+
+def create_input_forecasts_plot(df_forecast, df_historical):
+    """
+    Creates a figure with subplots for each sensor, showing forecast lines and historical data
+    
+    Args:
+        df_forecast: DataFrame with columns tstamp, sensor_name, member, h1-h48
+        df_historical: DataFrame with sensor_name columns and timestamp index
+        
+    Returns:
+        plotly.graph_objects.Figure: Figure with subplots
+    """
+    # Get unique sensors and members
+    sensors = df_forecast['sensor_name'].unique()
+    
+    # Create a color sequence for different forecast start times
+    colors = px.colors.qualitative.Set3
+    
+    # Create subplot figure
+    fig = make_subplots(
+        rows=len(sensors), 
+        cols=1,
+        subplot_titles=[f'Sensor: {sensor}' for sensor in sensors],
+        vertical_spacing=0.1
+    )
+    
+    # For each sensor
+    for sensor_idx, sensor in enumerate(sensors, 1):
+        # Add historical data
+        if sensor in df_historical.columns:
+            fig.add_trace(
+                go.Scatter(
+                    x=df_historical.index,
+                    y=df_historical[sensor],
+                    name=sensor,
+                    legendgroup=sensor,
+                    showlegend=True,  # Show in legend for all sensors
+                    line=dict(color='black', width=2),
+                    hovertemplate='Time: %{x}<br>Value: %{y:.2f}<br>Historical<extra></extra>'
+                ),
+                row=sensor_idx,
+                col=1
+            )
+        
+        sensor_data = df_forecast[df_forecast['sensor_name'] == sensor]
+        members = sensor_data['member'].unique()
+
+        
+        # Get unique forecast start times for color assignment
+        start_times = sorted(sensor_data['tstamp'].unique())
+        start_time_colors = {t: colors[i % len(colors)] for i, t in enumerate(start_times)}
+        
+        # For each member
+        for member in members:
+            member_data = sensor_data[sensor_data['member'] == member]
+            
+            # For each forecast (row)
+            for _, row in member_data.iterrows():
+                start_time = row['tstamp']
+                legend_group = f'{sensor} {start_time.strftime("%Y%m%d%H%M")}'
+                legendgrouptitle_text = f'{sensor}: {start_time.strftime("%Y-%m-%d %H:%M")}'
+                
+                # Create x values (timestamps) for this forecast
+                timestamps = [start_time + timedelta(hours=i) for i in range(1, 49)]
+                
+                # Get y values (h1 to h48)
+                values = [row[f'h{i}'] for i in range(1, 49)]
+                
+                # Add trace to the subplot
+                fig.add_trace(
+                    go.Scatter(
+                        x=timestamps,
+                        y=values,
+                        name=f'{start_time.strftime("%Y-%m-%d %H:%M")}',
+                        #name=f'M{member} {start_time.strftime("%Y-%m-%d %H:%M")}',
+                        legendgroup=legend_group,
+                        #legendgrouptitle_text=legendgrouptitle_text,
+                        #showlegend=True,  # Show all traces in legend
+                        showlegend=(member == members[0]).item(),# and sensor_idx == 1,  # Show all traces in legend
+                        line=dict(
+                            color=start_time_colors[start_time],
+                            width=1,
+                            dash='solid' if member == members[0] else 'dot'
+                        ),
+                        hovertemplate=(
+                            'Time: %{x}<br>'
+                            'Value: %{y:.2f}<br>'
+                            f'Member: {member}<br>'
+                            f'Start: {start_time}<extra></extra>'
+                        )
+                    ),
+                    row=sensor_idx,
+                    col=1
+                )
+    
+    # Update layout
+    fig.update_layout(
+        height=400 * len(sensors),  # Adjust height based on number of sensors
+        title='Forecast Values by Sensor with Historical Data',
+        showlegend=True,
+        legend=dict(
+            yanchor="top",
+            y=0.99,
+            xanchor="left",
+            x=1.05,
+            groupclick="togglegroup",  # Allows clicking group title to toggle all traces
+            #groupclick="toggleitem",  # Allows clicking group title to toggle all traces
+            itemsizing='constant',  # Makes legend items constant size
+            tracegroupgap=5  # Add small gap between groups
+        ),
+        margin=dict(r=150)
+    )
+    
+    # Update all x and y axes labels
+    for i in range(len(sensors)):
+        fig.update_xaxes(title_text="Time", row=i+1, col=1)
+        fig.update_yaxes(title_text="Value", row=i+1, col=1)
+    
+    return fig
\ No newline at end of file
diff --git a/src/dashboard.py b/src/dashboard.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb2daeaf0d8d55c6031f8e821a4f7ac22618501b
--- /dev/null
+++ b/src/dashboard.py
@@ -0,0 +1,472 @@
+from dash import html, dcc, dash_table, Dash
+from dash.dependencies import Input, Output, State, MATCH
+import plotly.express as px
+import plotly.graph_objs as go
+import pandas as pd
+from datetime import datetime, timedelta
+from sqlalchemy import create_engine, select, and_, desc
+from sqlalchemy.orm import Session
+from utils.orm_classes import Base, InputForecasts, Modell, PegelForecasts, Sensor, Log, ModellSensor, SensorData
+import oracledb
+import os
+from sqlalchemy import select, func
+from dash_tools.style_configs import create_log_table, create_historical_plot, create_historical_table,create_input_forecasts_plot,create_inp_forecast_status_table
+from dash_tools.layout_helper import create_collapsible_section
+
+NUM_RECENT_INPUT_FORECASTS = 3
+
+
+
+class ForecastMonitor:
+    def __init__(self, username, password, dsn):
+        self.db_params = {
+            "user": username,
+            "password": password,
+            "dsn": dsn 
+        }
+        self.con = oracledb.connect(**self.db_params)
+        self.engine = create_engine("oracle+oracledb://", creator=lambda: self.con)
+        
+        self.app = Dash(__name__)
+        self.setup_layout()
+        self.setup_callbacks()
+
+    def get_model_name_from_path(self, modelldatei):
+        if not modelldatei:
+            return None
+        return os.path.basename(modelldatei)
+
+    def get_active_models_status(self):
+        try:
+            now = datetime.now()
+            last_required_hour = now.replace(minute=0, second=0, microsecond=0)
+            while last_required_hour.hour % 3 != 0:
+                last_required_hour -= timedelta(hours=1)
+            
+            with Session(self.engine) as session:
+                active_models = session.query(Modell).filter(Modell.aktiv == 1).all()
+                
+                model_status = []
+                for model in active_models:
+                    actual_model_name = self.get_model_name_from_path(model.modelldatei)
+                    if not actual_model_name:
+                        continue
+                    
+                    current_forecast = session.query(PegelForecasts).filter(
+                        and_(
+                            PegelForecasts.model == actual_model_name,
+                            PegelForecasts.tstamp == last_required_hour
+                        )
+                    ).first()
+                    
+                    last_valid_forecast = session.query(PegelForecasts).filter(
+                        and_(
+                            PegelForecasts.model == actual_model_name,
+                            PegelForecasts.h1 != None
+                        )
+                    ).order_by(PegelForecasts.tstamp.desc()).first()
+                    
+                    model_status.append({
+                        'model_name': model.modellname,
+                        'actual_model_name': actual_model_name,
+                        'model_id': model.id,  # Added for historical data lookup
+                        'sensor_name': last_valid_forecast.sensor_name if last_valid_forecast else None,
+                        'has_current_forecast': current_forecast is not None,
+                        'last_forecast_time': last_valid_forecast.tstamp if last_valid_forecast else None,
+                        'forecast_created': last_valid_forecast.created if last_valid_forecast else None,
+                    })
+            
+            return {
+                'model_status': model_status,
+                'last_check_time': now,
+                'required_timestamp': last_required_hour
+            }
+            
+        except Exception as e:
+            print(f"Error getting model status: {str(e)}")
+            return None
+
+    def get_input_forecasts(self, sensor_names):
+        """Get 3 most recent input forecasts for the given sensor names"""
+        try:
+            with Session(self.engine) as session:
+                # Subquery to rank rows by timestamp for each sensor/member combination
+                subq = (
+                    select(
+                        InputForecasts,
+                        func.row_number()
+                        .over(
+                            partition_by=[InputForecasts.sensor_name, InputForecasts.member],
+                            order_by=InputForecasts.tstamp.desc()
+                        ).label('rn')
+                    )
+                    .where(InputForecasts.sensor_name.in_(sensor_names))
+                    .subquery()
+                )
+                
+                # Main query to get only the top 3 rows
+                stmt = (
+                    select(subq)
+                    .where(subq.c.rn <= NUM_RECENT_INPUT_FORECASTS)
+                    .order_by(
+                        subq.c.sensor_name,
+                        subq.c.member,
+                        subq.c.tstamp.desc()
+                    )
+                )
+                
+                df = pd.read_sql(sql=stmt, con=self.engine)
+                df.drop(columns=['rn'], inplace=True)
+                return df
+
+        except Exception as e:
+            raise RuntimeError(f"Error getting input forecasts: {str(e)}")
+
+    
+    def get_recent_logs(self, sensor_name):
+        if sensor_name is None:
+            return []
+        try:
+            with Session(self.engine) as session:
+                logs = session.query(Log).filter(
+                    Log.gauge == sensor_name
+                ).order_by(
+                    desc(Log.created)
+                ).limit(10).all()
+                
+                return [
+                    {
+                        'timestamp': log.created.strftime('%Y-%m-%d %H:%M:%S'),
+                        'level': log.loglevelname,
+                        'sensor': log.gauge,
+                        'message': log.message,
+                        'module': log.module,
+                        'function': log.funcname,
+                        'line': log.lineno,
+                        'exception': log.exception or ''
+                    }
+                    for log in logs
+                ]
+        except Exception as e:
+            print(f"Error getting logs: {str(e)}")
+            return []
+
+    def get_historical_data(self, model_id): #TODO external forecast
+        """Get last 144 hours of sensor data for all sensors associated with the model"""
+        try:
+            with Session(self.engine) as session:
+                # Get all sensors for this model
+                model_sensors = session.query(ModellSensor).filter(
+                    ModellSensor.modell_id == model_id
+                ).all()
+                
+                sensor_names = [ms.sensor_name for ms in model_sensors]
+                
+                # Get last 144 hours of sensor data
+                time_threshold = datetime.now() - timedelta(hours=144)
+                time_threshold= pd.to_datetime("2024-09-13 14:00:00.000") - timedelta(hours=144) #TODO rausnehmen
+
+                stmt = select(SensorData).where(
+                    SensorData.tstamp >= time_threshold,
+                    SensorData.sensor_name.in_(sensor_names))
+                
+                df = pd.read_sql(sql=stmt,con=self.engine,index_col="tstamp")
+                df = df.pivot(columns="sensor_name", values="sensor_value")[sensor_names]
+                
+                return df
+                
+        except Exception as e:
+            print(f"Error getting historical data: {str(e)}")
+            return [], []
+            
+    def setup_layout(self):
+        self.app.layout = html.Div([
+            # Add CSS for resizable columns in the app's assets folder instead of inline
+            html.Div([
+                html.H1("Forecast Monitoring Dashboard", style={'padding': '20px'}),
+                
+                # Main container with resizable columns
+                html.Div([
+                    # Left column
+                    html.Div([
+                        html.Div([
+                            html.H3("Active Models Status"),
+                            html.Div(id='model-status-table'),
+                            dcc.Interval(
+                                id='status-update',
+                                interval=600000,  # Update every 10 minutes
+                            )
+                        ]),
+                        
+                        html.Div([
+                            html.H3("Status Summary"),
+                            dcc.Graph(id='status-summary-chart')
+                        ]),
+                    ], id='left-column', className='column', style={
+                        'width': '40%',
+                        'minWidth': '200px',
+                        'height': 'calc(100vh - 100px)',
+                        'overflow': 'auto'
+                    }),
+                    
+                    # Resizer
+                    html.Div(id='resizer', style={
+                        'cursor': 'col-resize',
+                        'width': '10px',
+                        'backgroundColor': '#f0f0f0',
+                        'transition': 'background-color 0.3s',
+                        ':hover': {
+                            'backgroundColor': '#ccc'
+                        }
+                    }),
+                    # Right column
+                    # Right column
+                    html.Div([
+                        dcc.Store(id='current-sensor-names'),  # Store current sensor names
+                        
+                        create_collapsible_section(
+                            "Recent Logs",
+                            html.Div(id='log-view'),
+                            is_open=True
+                        ),
+                        create_collapsible_section(
+                            "Input Forecasts",
+                            html.Div(id='inp-fcst-view'),
+                            is_open=True
+                        ),
+                        create_collapsible_section(
+                            "Historical Data",
+                            html.Div(id='historical-view'),
+                            is_open=True
+                        ),
+                        
+                    ], id='right-column', className='column', style={
+                        'width': '60%',
+                        'minWidth': '400px',
+                        'height': 'calc(100vh - 100px)',
+                        'overflow': 'auto'
+                    }),
+
+                ], style={
+                    'display': 'flex',
+                    'flexDirection': 'row',
+                    'width': '100%',
+                    'height': 'calc(100vh - 100px)'
+                }),
+                
+                # Add JavaScript for column resizing using dcc.Store to trigger clientside callback
+                dcc.Store(id='column-widths', data={'left': 40, 'right': 60}),
+            ]),
+        ])
+
+        # Add clientside callback for resizing
+        self.app.clientside_callback(
+            """
+            function(trigger) {
+                if (!window.resizeInitialized) {
+                    const resizer = document.getElementById('resizer');
+                    const leftColumn = document.getElementById('left-column');
+                    const rightColumn = document.getElementById('right-column');
+                    
+                    let x = 0;
+                    let leftWidth = 0;
+                    
+                    const mouseDownHandler = function(e) {
+                        x = e.clientX;
+                        leftWidth = leftColumn.getBoundingClientRect().width;
+                        
+                        document.addEventListener('mousemove', mouseMoveHandler);
+                        document.addEventListener('mouseup', mouseUpHandler);
+                    };
+                    
+                    const mouseMoveHandler = function(e) {
+                        const dx = e.clientX - x;
+                        const newLeftWidth = ((leftWidth + dx) / resizer.parentNode.getBoundingClientRect().width) * 100;
+                        
+                        if (newLeftWidth > 20 && newLeftWidth < 80) {
+                            leftColumn.style.width = `${newLeftWidth}%`;
+                            rightColumn.style.width = `${100 - newLeftWidth}%`;
+                        }
+                    };
+                    
+                    const mouseUpHandler = function() {
+                        document.removeEventListener('mousemove', mouseMoveHandler);
+                        document.removeEventListener('mouseup', mouseUpHandler);
+                    };
+                    
+                    resizer.addEventListener('mousedown', mouseDownHandler);
+                    window.resizeInitialized = true;
+                }
+                return window.dash_clientside.no_update;
+            }
+            """,
+            Output('column-widths', 'data'),
+            Input('column-widths', 'data'),
+        )
+
+
+    def setup_callbacks(self):
+        @self.app.callback(
+            [Output('model-status-table', 'children'),
+             Output('status-summary-chart', 'figure')],
+            Input('status-update', 'n_intervals')
+        )
+        def update_dashboard(n):
+            status = self.get_active_models_status()
+            if not status:
+                return html.Div("Error fetching data"), go.Figure()
+            
+            header = html.Div([
+                html.H4(f"Status as of {status['last_check_time'].strftime('%Y-%m-%d %H:%M:%S')}"),
+                html.P(f"Checking forecasts for timestamp: {status['required_timestamp'].strftime('%Y-%m-%d %H:%M:00')}")
+            ])
+            
+            table = dash_table.DataTable(
+                id='status-table',
+                columns=[
+                    {'name': 'Model', 'id': 'model_name'},
+                    {'name': 'Status', 'id': 'has_current_forecast'},
+                    {'name': 'Last Valid', 'id': 'last_forecast_time'},
+                    {'name': 'Created', 'id': 'forecast_created'},
+                    {'name': 'Target Sensor', 'id': 'sensor_name'},
+                    {'name': 'model_id', 'id': 'model_id', 'hideable': True}
+                ],
+                data=[{
+                    'model_name': row['model_name'],
+                    'has_current_forecast': '✓' if row['has_current_forecast'] else '✗',
+                    'last_forecast_time': row['last_forecast_time'].strftime('%Y-%m-%d %H:%M:%S') if row['last_forecast_time'] else 'No valid forecast',
+                    'forecast_created': row['forecast_created'].strftime('%Y-%m-%d %H:%M:%S') if row['forecast_created'] else 'N/A',
+                    'sensor_name': row['sensor_name'],
+                    'model_id': row['model_id']
+                } for row in status['model_status']],
+                style_data_conditional=[
+                    {
+                        'if': {'filter_query': '{has_current_forecast} = "✓"', "column_id": "has_current_forecast"},
+                        'color': 'green'
+                    },
+                    {
+                        'if': {'filter_query': '{has_current_forecast} = "✗"', "column_id": "has_current_forecast"},
+                        'color': 'red'
+                    }
+                ],
+                style_table={'overflowX': 'auto'},
+                style_cell={
+                    'textAlign': 'left',
+                    'padding': '8px',
+                    'whiteSpace': 'normal',
+                    'height': 'auto',
+                    'fontSize': '14px',
+                },
+                style_header={
+                    'backgroundColor': '#f4f4f4',
+                    'fontWeight': 'bold'
+                },
+                row_selectable='single',
+                selected_rows=[],
+            )
+            
+            df_status = pd.DataFrame(status['model_status'])
+            fig = px.bar(
+                df_status.groupby('model_name')['has_current_forecast'].apply(lambda x: (x.sum()/len(x))*100).reset_index(),
+                x='model_name',
+                y='has_current_forecast',
+                title='Forecast Completion Rate by Model (%)',
+                labels={'has_current_forecast': 'Completion Rate (%)', 'model_name': 'Model Name'}
+            )
+            fig.update_layout(yaxis_range=[0, 100])
+            
+            return html.Div([header, table]), fig
+        
+
+
+        @self.app.callback(
+            [Output('log-view', 'children'),
+            Output('historical-view', 'children'),
+            Output('inp-fcst-view', 'children'),
+            Output('current-sensor-names', 'data')],  # Removed input-forecasts-view
+            [Input('status-table', 'selected_rows')],
+            [State('status-table', 'data')]
+        )
+        def update_right_column(selected_rows, table_data):
+            if not selected_rows:
+                return (html.Div("Select a model to view logs"),
+                        html.Div("Select a model to view Input Forecasts"),
+                        html.Div("Select a model to view historical data"),
+                        None)  # Removed input forecasts return
+            
+            selected_row = table_data[selected_rows[0]]
+            sensor_name = selected_row['sensor_name']
+            model_id = selected_row['model_id']
+            model_name = selected_row['model_name']
+            
+            # Get logs
+            logs = self.get_recent_logs(sensor_name)
+            log_table = create_log_table(logs)
+            log_view = html.Div([
+                html.H4(f"Recent Logs for {model_name}"),
+                log_table
+            ])
+
+            # Get historical data
+            df_historical = self.get_historical_data(model_id)
+            sensor_names = list(df_historical.columns)
+            
+            if not df_historical.empty:
+                fig = create_historical_plot(df_historical, model_name)
+                historical_table = create_historical_table(df_historical)
+                historical_view = html.Div([
+                    html.H4(f"Historical Data for {model_name}"),
+                    dcc.Graph(figure=fig),
+                    html.H4("Sensor Data Table", style={'marginTop': '20px', 'marginBottom': '10px'}),
+                    html.Div(historical_table, style={'width': '100%', 'padding': '10px'})
+                ])
+            else:
+                historical_view = html.Div("No historical data available")
+
+            # Get Input Forecasts
+            df_inp_fcst = self.get_input_forecasts(sensor_names)
+            if not df_inp_fcst.empty:
+                fig_fcst = create_input_forecasts_plot(df_inp_fcst,df_historical)
+                inp_fcst_table = create_inp_forecast_status_table(df_inp_fcst)
+                inp_fcst_view = html.Div([
+                    html.H4(f"Input Forecasts for {model_name}"),
+                    dcc.Graph(figure=fig_fcst),
+                    html.H4("Input Forecast Status", style={'marginTop': '20px', 'marginBottom': '10px'}),
+                    html.Div(inp_fcst_table, style={'width': '100%', 'padding': '10px'})
+                ])
+            else:
+                inp_fcst_view = html.Div("No input forecasts available")
+            
+            return (log_view,
+                    historical_view,
+                    inp_fcst_view,
+                    sensor_names)  # Removed input forecasts return
+
+
+
+        @self.app.callback(
+            Output({'type': 'collapse-content', 'section': MATCH}, 'style'),
+            Output({'type': 'collapse-button', 'section': MATCH}, 'children'),
+            Input({'type': 'collapse-button', 'section': MATCH}, 'n_clicks'),
+            State({'type': 'collapse-content', 'section': MATCH}, 'style'),
+            prevent_initial_call=True
+        )
+        def toggle_collapse(n_clicks, current_style):
+            if current_style is None:
+                current_style = {}
+            
+            if current_style.get('display') == 'none':
+                return {'display': 'block'}, '▼'
+            else:
+                return {'display': 'none'}, '▶'
+    
+    def run(self, host='0.0.0.0', port=8050, debug=True):
+        self.app.run_server(host=host, port=port, debug=debug)
+
+if __name__ == '__main__':
+    monitor = ForecastMonitor(
+        username="c##mspils",
+        password="cobalt_deviancy",
+        dsn="localhost/XE"
+    )
+    monitor.run()
\ No newline at end of file
diff --git a/src/utils/db_tools.py b/src/utils/db_tools.py
index 043c2e2a911345b5259d004765c3b857ec222a9d..151ae09efae3fdcf44fda9b10465280e05a3151a 100644
--- a/src/utils/db_tools.py
+++ b/src/utils/db_tools.py
@@ -5,7 +5,6 @@ A collection of classes and functions for interacting with the oracle based Wavo
 import logging
 from datetime import datetime
 from pathlib import Path
-from re import I
 from typing import List
 import warnings