Updated Documentation with MCP Setup and Data Integration Steps
Our project combines three main components:
-- Database locationD:\\Projects\\LegalTextAnalysis\\data\\legal_text.db
-- Verified tables structurelegal_cases
text_metrics
tf_idf_scores
We successfully configured MCP after troubleshooting:
{ "mcpServers": { "sqlite": { "command": "uvx", "args": ["mcp-server-sqlite", "--db-path", "./data/legal_text.db"], "cwd": "D:/Projects/LegalTextAnalysis" } }}
# Verify database existenceTest-Path "D:\\Projects\\LegalTextAnalysis\\data\\legal_text.db"# Start MCP serveruvx mcp-server-sqlite --db-path "./data/legal_text.db"
To import your legal_text_classification.csv into the database:
import pandas as pd
import sqlite3
def load_legal_data():
# Read the CSV file df = pd.read_csv('legal_text_classification.csv')
# Connect to the database conn = sqlite3.connect('D:/Projects/LegalTextAnalysis/data/legal_text.db')
# Load into legal_cases table df.to_sql('legal_cases', conn, if_exists='append', index=False)
return "Data loaded successfully!"
Your Jupyter notebooks likely contain valuable analysis. We’ll:
# analysis_integration.pyimport pandas as pd
import sqlite3
from sklearn.feature_extraction.text import TfidfVectorizer
class LegalTextIntegration:
def __init__(self, db_path):
self.db_path = db_path
def connect_db(self):
return sqlite3.connect(self.db_path)
def import_csv(self, csv_path):
df = pd.read_csv(csv_path)
with self.connect_db() as conn:
df.to_sql('legal_cases', conn,
if_exists='replace',
index=False)
def update_metrics(self, metrics_df):
with self.connect_db() as conn:
metrics_df.to_sql('text_metrics', conn,
if_exists='append',
index=False)