Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion EssentialCSharp.Chat.Shared/Models/BookContentChunk.cs
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,19 @@ public sealed class BookContentChunk
public string ChunkText { get; set; } = string.Empty;

/// <summary>
/// Chapter number extracted from filename (e.g., "Chapter01.md" -> 1)
/// Chapter number extracted from filename (e.g., "Chapter01.md" -> 1).
/// Null for files that do not follow the ChapterNN naming pattern.
/// </summary>
[VectorStoreData]
public int? ChapterNumber { get; set; }

/// <summary>
/// Zero-based ordinal of this chunk within its source file.
/// Together with FileName, forms the basis for the deterministic Id.
/// </summary>
[VectorStoreData]
public int ChunkIndex { get; set; }

/// <summary>
/// SHA256 hash of the chunk content for change detection
/// </summary>
Expand Down
26 changes: 22 additions & 4 deletions EssentialCSharp.Chat.Shared/Services/AISearchService.cs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
using System.Diagnostics;
using System.Diagnostics;
using EssentialCSharp.Chat.Common.Models;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.VectorData;
Expand Down Expand Up @@ -35,11 +35,29 @@ public async Task<IReadOnlyList<VectorSearchResult<BookContentChunk>>> ExecuteVe
{
try
{
var results = new List<VectorSearchResult<BookContentChunk>>();
await foreach (var result in collection.SearchAsync(searchVector, options: vectorSearchOptions, top: top, cancellationToken: cancellationToken))
// Fetch more candidates than needed so we can deduplicate by heading.
// Multiple chunks from the same section share the same Heading; without dedup
// all top-N results could come from one long section, reducing context diversity.
int candidates = top * 3;

var candidatesList = new List<VectorSearchResult<BookContentChunk>>();
await foreach (var result in collection.SearchAsync(searchVector, options: vectorSearchOptions, top: candidates, cancellationToken: cancellationToken))
{
results.Add(result);
candidatesList.Add(result);
}

// Keep only the highest-scoring chunk per unique heading, then take the globally
// top-N by score. GroupBy on a materialized list preserves insertion (score desc)
// order, but we make the ordering explicit via OrderByDescending so the result
// is correct regardless of provider sort guarantees.
// MaxBy on a non-empty IGrouping never returns null; ! asserts this invariant.
var results = candidatesList
.GroupBy(r => r.Record.Heading)
.Select(g => g.MaxBy(r => r.Score)!)
.OrderByDescending(r => r.Score)
.Take(top)
.ToList();

return results;
}
catch (PostgresException ex) when (ex.SqlState == "28000" && attempt == 0)
Expand Down
38 changes: 17 additions & 21 deletions EssentialCSharp.Chat.Shared/Services/ChunkingResultExtensions.cs
Original file line number Diff line number Diff line change
@@ -1,27 +1,35 @@
using System.Security.Cryptography;
using System.Text;
using System.Linq;
using EssentialCSharp.Chat.Common.Models;

namespace EssentialCSharp.Chat.Common.Services;

public static partial class ChunkingResultExtensions
{
/// <summary>
/// Converts a <see cref="FileChunkingResult"/> into a list of <see cref="BookContentChunk"/> records
/// ready for embedding and vector store upload.
/// </summary>
/// <remarks>
/// <see cref="BookContentChunk.ChapterNumber"/> is set to null for files that do not match
/// the <c>ChapterNN</c> naming pattern (e.g. appendix or non-chapter markdown files).
/// </remarks>
public static List<BookContentChunk> ToBookContentChunks(this FileChunkingResult result)
{
int? chapterNumber = ExtractChapterNumber(result.FileName);

var chunks = result.Chunks
.Select(chunkText =>
.Select((markdownChunk, index) =>
{
var contentHash = ComputeSha256Hash(chunkText);
var contentHash = ComputeSha256Hash(markdownChunk.ChunkText);
return new BookContentChunk
{
Id = Guid.NewGuid().ToString(),
Id = $"{result.FileName}_{index}",
FileName = result.FileName,
Heading = ExtractHeading(chunkText),
ChunkText = chunkText,
Heading = markdownChunk.Heading,
ChunkText = markdownChunk.ChunkText,
ChapterNumber = chapterNumber,
ChunkIndex = index,
ContentHash = contentHash
};
})
Expand All @@ -30,25 +38,13 @@ public static List<BookContentChunk> ToBookContentChunks(this FileChunkingResult
return chunks;
}

private static string ExtractHeading(string chunkText)
private static int? ExtractChapterNumber(string fileName)
{
// get characters until the first " - " or newline
var firstLine = chunkText.Split(["\r\n", "\r", "\n"], StringSplitOptions.None)[0];
var headingParts = firstLine.Split([" - "], StringSplitOptions.None);
return headingParts.Length > 0 ? headingParts[0].Trim() : string.Empty;
}

private static int ExtractChapterNumber(string fileName)
{
// Example: "Chapter01.md" -> 1
// Regex: Chapter(?<ChapterNumber>[0-9]{2})
// Example: "Chapter01.md" -> 1; non-chapter files return null.
var match = ChapterNumberRegex().Match(fileName);
if (match.Success && int.TryParse(match.Groups["ChapterNumber"].Value, out int chapterNumber))

{
return chapterNumber;
}
throw new InvalidOperationException($"File name '{fileName}' does not contain a valid chapter number in the expected format.");
return null;
}

private static string ComputeSha256Hash(string text)
Expand Down
142 changes: 122 additions & 20 deletions EssentialCSharp.Chat.Shared/Services/EmbeddingService.cs
Original file line number Diff line number Diff line change
@@ -1,51 +1,153 @@
using System.Text.RegularExpressions;
using EssentialCSharp.Chat.Common.Models;
using Microsoft.Extensions.AI;
using Microsoft.Extensions.VectorData;
using Npgsql;

namespace EssentialCSharp.Chat.Common.Services;

/// <summary>
/// Service for generating embeddings for markdown chunks using Azure OpenAI
/// Service for generating embeddings for markdown chunks using Azure OpenAI and uploading
/// them to a PostgreSQL vector store via a staging-then-swap pattern to avoid downtime.
/// </summary>
public class EmbeddingService(VectorStore vectorStore, IEmbeddingGenerator<string, Embedding<float>> embeddingGenerator)
public class EmbeddingService(
VectorStore vectorStore,
IEmbeddingGenerator<string, Embedding<float>> embeddingGenerator,
NpgsqlDataSource dataSource)
{
public static string CollectionName { get; } = "markdown_chunks";

/// <summary>
/// Maximum number of inputs per Azure OpenAI embedding batch call.
/// </summary>
private const int EmbeddingBatchSize = 2048;

// Only allow simple identifiers: letters, digits, and underscores, starting with a letter or underscore.
private static readonly Regex _safeIdentifierRegex = new(@"^[a-zA-Z_][a-zA-Z0-9_]*$", RegexOptions.Compiled);

/// <summary>
/// Generate an embedding for the given text.
/// </summary>
/// <param name="text">The text to generate an embedding for.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns>A search vector as ReadOnlyMemory&lt;float&gt;.</returns>
public async Task<ReadOnlyMemory<float>> GenerateEmbeddingAsync(string text, CancellationToken cancellationToken = default)
{
var embedding = await embeddingGenerator.GenerateAsync(text, cancellationToken: cancellationToken);
return embedding.Vector;
}

/// <summary>
/// Generate an embedding for each text paragraph and upload it to the specified collection.
/// Generate embeddings for all chunks in batches and upload them to the vector store
/// using a staging-then-atomic-swap pattern so the live collection stays queryable
/// throughout the rebuild.
///
/// Steps:
/// 1. Create a staging collection ({collectionName}_staging).
/// 2. Embed all chunks in batches of <see cref="EmbeddingBatchSize"/> (Azure OpenAI limit).
/// 3. Batch-upsert all chunks into staging.
/// 4. Atomically swap tables in a single transaction using two SQL RENAME operations
/// (live → old, staging → live). PostgreSQL ALTER TABLE acquires
/// AccessExclusiveLock automatically; no explicit LOCK TABLE is needed. The
/// transaction ensures no reader sees an intermediate state.
/// 5. Drop the old live backup table with DROP TABLE.
///
/// If an error occurs before the swap, only the staging table is affected — the live
/// collection is untouched.
/// </summary>
/// <param name="collectionName">The name of the collection to upload the text paragraphs to.</param>
/// <returns>An async task.</returns>
public async Task GenerateBookContentEmbeddingsAndUploadToVectorStore(IEnumerable<BookContentChunk> bookContents, CancellationToken cancellationToken, string? collectionName = null)
public async Task GenerateBookContentEmbeddingsAndUploadToVectorStore(
IEnumerable<BookContentChunk> bookContents,
CancellationToken cancellationToken,
string? collectionName = null)
{
collectionName ??= CollectionName;

var collection = vectorStore.GetCollection<string, BookContentChunk>(collectionName);
await collection.EnsureCollectionDeletedAsync(cancellationToken);
await collection.EnsureCollectionExistsAsync(cancellationToken);
if (!_safeIdentifierRegex.IsMatch(collectionName))
throw new ArgumentException(
$"Collection name '{collectionName}' contains unsafe characters. Use only letters, digits, and underscores.",
nameof(collectionName));

string stagingName = $"{collectionName}_staging";
string oldName = $"{collectionName}_old";

// ── Step 1: Prepare staging collection ────────────────────────────────────────
var staging = vectorStore.GetCollection<string, BookContentChunk>(stagingName);
await staging.EnsureCollectionDeletedAsync(cancellationToken);
await staging.EnsureCollectionExistsAsync(cancellationToken);

// ── Step 2: Batch-embed all chunks ────────────────────────────────────────────
// Azure OpenAI supports at most EmbeddingBatchSize inputs per GenerateAsync call.
var chunkList = bookContents.ToList();
var texts = chunkList.Select(c => c.ChunkText).ToList();

int uploadedCount = 0;
var allEmbeddings = new List<Embedding<float>>(chunkList.Count);
foreach (var batch in texts.Chunk(EmbeddingBatchSize))
{
var batchEmbeddings = await embeddingGenerator.GenerateAsync(batch, cancellationToken: cancellationToken);
allEmbeddings.AddRange(batchEmbeddings);
}

foreach (var chunk in bookContents)
if (allEmbeddings.Count != chunkList.Count)
throw new InvalidOperationException(
$"Embedding count mismatch: expected {chunkList.Count}, got {allEmbeddings.Count}.");

for (int i = 0; i < chunkList.Count; i++)
{
cancellationToken.ThrowIfCancellationRequested();
chunk.TextEmbedding = await GenerateEmbeddingAsync(chunk.ChunkText, cancellationToken);
await collection.UpsertAsync(chunk, cancellationToken);
Console.WriteLine($"Uploaded chunk '{chunk.Id}' to collection '{collectionName}' for file '{chunk.FileName}' with heading '{chunk.Heading}'.");
uploadedCount++;
chunkList[i].TextEmbedding = allEmbeddings[i].Vector;
}
Console.WriteLine($"Successfully generated embeddings and uploaded {uploadedCount} chunks to collection '{collectionName}'.");

// ── Step 3: Batch-upsert all chunks into staging ──────────────────────────────
try
{
await staging.UpsertAsync(chunkList, cancellationToken);
Console.WriteLine($"Uploaded {chunkList.Count} chunks to staging collection '{stagingName}'.");
}
catch
{
// Best-effort cleanup: drop the partially-populated staging table so the
// next run starts clean. Do not let this secondary failure mask the original.
try
{
await staging.EnsureCollectionDeletedAsync(cancellationToken);
}
catch (Exception cleanupEx) when (cleanupEx is not OperationCanceledException)
{
Console.Error.WriteLine($"Warning: failed to clean up staging collection '{stagingName}' after upsert failure: {cleanupEx.Message}");
}
throw;
}

// ── Step 4: Atomic swap — staging → live ──────────────────────────────────────
// Two ALTER TABLE RENAME operations in one transaction (live → old, staging → live).
// Each RENAME auto-acquires AccessExclusiveLock on its table; the transaction
// guarantees both renames are visible atomically to other sessions.
await using var conn = await dataSource.OpenConnectionAsync(cancellationToken);
await using var tx = await conn.BeginTransactionAsync(cancellationToken);

await using (var cmd = conn.CreateCommand())
{
cmd.Transaction = tx;

// Drop any leftover backup from a previous run
cmd.CommandText = $"DROP TABLE IF EXISTS \"{oldName}\"";
await cmd.ExecuteNonQueryAsync(cancellationToken);

// Rename live → old. IF EXISTS is a no-op on first run when no live table exists.
cmd.CommandText = $"ALTER TABLE IF EXISTS \"{collectionName}\" RENAME TO \"{oldName}\"";
await cmd.ExecuteNonQueryAsync(cancellationToken);

// Rename staging → live
cmd.CommandText = $"ALTER TABLE \"{stagingName}\" RENAME TO \"{collectionName}\"";
await cmd.ExecuteNonQueryAsync(cancellationToken);
Comment on lines +128 to +138
Copy link

Copilot AI Apr 26, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These SQL statements interpolate collectionName/derived names directly into identifier-quoted SQL. If collectionName can be influenced outside trusted code, this becomes identifier-injection (quotes can be escaped/broken). Consider restricting collectionName to a safe identifier regex (e.g., letters/digits/underscore) before composing SQL, and use Npgsql's identifier-quoting helper to build the final identifiers consistently.

Copilot uses AI. Check for mistakes.
}

await tx.CommitAsync(cancellationToken);
Console.WriteLine($"Swapped '{stagingName}' → '{collectionName}' atomically.");

// ── Step 5: Drop the old backup ───────────────────────────────────────────────
await using (var cmd = conn.CreateCommand())
{
cmd.CommandText = $"DROP TABLE IF EXISTS \"{oldName}\"";
await cmd.ExecuteNonQueryAsync(cancellationToken);
}

Console.WriteLine($"Successfully generated embeddings and uploaded {chunkList.Count} chunks to collection '{collectionName}'.");
}
}
9 changes: 8 additions & 1 deletion EssentialCSharp.Chat.Shared/Services/FileChunkingResult.cs
Original file line number Diff line number Diff line change
@@ -1,5 +1,12 @@
namespace EssentialCSharp.Chat.Common.Services;

/// <summary>
/// A single chunk from a markdown file, paired with the section heading it belongs to.
/// </summary>
/// <param name="Heading">Full breadcrumb heading for the section (e.g. "Chapter: 1: Intro: Summary").</param>
/// <param name="ChunkText">The raw chunk text, including the "Heading - " prefix prepended by TextChunker.</param>
public record MarkdownChunk(string Heading, string ChunkText);

/// <summary>
/// Data structure to hold chunking results for a single file
/// </summary>
Expand All @@ -9,6 +16,6 @@ public class FileChunkingResult
public string FilePath { get; set; } = string.Empty;
public int OriginalCharCount { get; set; }
public int ChunkCount { get; set; }
public List<string> Chunks { get; set; } = [];
public List<MarkdownChunk> Chunks { get; set; } = [];
public int TotalChunkCharacters { get; set; }
}
37 changes: 26 additions & 11 deletions EssentialCSharp.Chat.Shared/Services/MarkdownChunkingService.cs
Original file line number Diff line number Diff line change
Expand Up @@ -61,15 +61,24 @@ public async Task<List<FileChunkingResult>> ProcessMarkdownFilesAsync(
public FileChunkingResult ProcessSingleMarkdownFile(
string[] fileContent, string fileName, string filePath)
{
// Remove all multiple empty lines so there is no more than one empty line between paragraphs
string[] lines = [.. fileContent
.Select(line => line.Trim())
.Where(line => !string.IsNullOrWhiteSpace(line))];

// Collapse consecutive blank lines to at most one blank line. Single blank lines must
// be preserved because TextChunker.SplitMarkdownParagraphs uses them as paragraph
// separators — stripping all blanks defeats paragraph-aware chunking.
var normalizedLines = new List<string>(fileContent.Length);
bool lastWasBlank = false;
foreach (var raw in fileContent)
{
var line = raw.Trim();
var isBlank = string.IsNullOrWhiteSpace(line);
if (!isBlank || !lastWasBlank)
normalizedLines.Add(line);
lastWasBlank = isBlank;
}
Comment on lines +69 to +76
string[] lines = [.. normalizedLines];
string content = string.Join(Environment.NewLine, lines);

var sections = MarkdownContentToHeadersAndSection(content);
var allChunks = new List<string>();
var allChunks = new List<MarkdownChunk>();
int totalChunkCharacters = 0;
int chunkCount = 0;

Expand All @@ -83,7 +92,7 @@ public FileChunkingResult ProcessSingleMarkdownFile(
chunkHeader: Header + " - "
);
#pragma warning restore SKEXP0050
allChunks.AddRange(chunks);
allChunks.AddRange(chunks.Select(c => new MarkdownChunk(Header, c)));
chunkCount += chunks.Count;
totalChunkCharacters += chunks.Sum(c => c.Length);
}
Expand Down Expand Up @@ -155,18 +164,24 @@ public FileChunkingResult ProcessSingleMarkdownFile(
}
i++;

// Collect content until next header
// Collect content until next header, preserving blank lines as paragraph separators
// for TextChunker.SplitMarkdownParagraphs.
var contentLines = new List<string>();
while (i < lines.Length && !headerRegex.IsMatch(lines[i]))
{
if (!string.IsNullOrWhiteSpace(lines[i]))
contentLines.Add(lines[i]);
contentLines.Add(lines[i]);
i++;
}

// Strip leading and trailing blank lines; keep internal blanks for paragraph detection.
while (contentLines.Count > 0 && string.IsNullOrWhiteSpace(contentLines[0]))
contentLines.RemoveAt(0);
while (contentLines.Count > 0 && string.IsNullOrWhiteSpace(contentLines[^1]))
contentLines.RemoveAt(contentLines.Count - 1);

// Compose full header context
var fullHeader = string.Join(": ", headerStack.Select(h => h.Text));
if (contentLines.Count > 0)
if (contentLines.Any(l => !string.IsNullOrWhiteSpace(l)))
sections.Add((fullHeader, contentLines));
}
return sections;
Expand Down
Loading
Loading