Parsing database logs from Dynamics F&O
Update 2026-04-10 SQL
This is an update to the Power Query method shown below. This is much better - it pushes all of the heavy lifting to the SQL engine upstream, and reduces the demand of Power BI processing it all locally. The main idea is exactly the same: split the update string by the delimiter, group it all into three, number them one to three, and then call them "Field", "Value", and "PreviousValue".
-- Claude Opus 4.5 (claude-opus-4-5-20251101) - 2026-04-10
-- Parse F&O Database Log entries into Field/Value1/Value2 columns
WITH
-- First, assign a unique row number to each log entry
NumberedRows AS (
SELECT
ROW_NUMBER() OVER (ORDER BY [Created date and time], [Record Identification]) AS RowId,
[Table],
[TableName],
UPPER([Company]) AS [Company],
[Record Identification],
[Type of Change],
[Update],
[Created date and time],
[Change Originated By]
FROM F.VendorsDatabaseLog
WHERE [Created date and time] > DATEADD(year, -1, GETDATE())
),
-- Split the update column by the delimiter, and unpivot so that it's one row per element (i.e. one line in Update is broken out into 52 rows)
Tokenised AS (
SELECT
RowId,
ROW_NUMBER() OVER (PARTITION BY RowId ORDER BY (SELECT NULL)) AS TokenIndex,
TRIM(value) AS Token,
[Table],
[TableName],
[Company],
[Record Identification],
[Type of Change],
[Created date and time],
[Change Originated By]
FROM NumberedRows
CROSS APPLY STRING_SPLIT(REPLACE([Update], N'ÿþ', '|'), '|')
),
-- The updates are logged in groups of three
Grouped AS (
SELECT
RowId,
(TokenIndex - 1) / 3 AS GroupNum, -- This is the group (e.g. the Bank Account change)
(TokenIndex - 1) % 3 AS Position, -- This is the position (i.e. field, value, previousvalue)
TRIM(REPLACE(Token, ',', '')) AS Token, -- Trim off commas
[Table],
[TableName],
[Company],
[Record Identification],
[Type of Change],
[Created date and time],
[Change Originated By]
FROM Tokenised
WHERE Token IS NOT NULL AND LTRIM(RTRIM(Token)) <> ''
),
-- Make fields
Fields AS (
SELECT
[Table],
[TableName],
[Company],
[Record Identification],
[Type of Change],
[Created date and time],
[Change Originated By],
MAX(CASE WHEN Position = 0 THEN Token END) AS Field, -- MAX so that it's filled up
MAX(CASE WHEN Position = 1 THEN Token END) AS Value,
MAX(CASE WHEN Position = 2 THEN Token END) AS PreviousValue
FROM Grouped
GROUP BY RowId, [Table], [TableName], [Company], [Record Identification], [Type of Change], [Created date and time], [Change Originated By], GroupNum
)
SELECT * FROM Fields
WHERE
[Value] <> [PreviousValue] -- Only pick up the values that actually did change
AND [Field] NOT IN (
'RecVersion' -- Don't care about version changes
, 'ModifiedDateTime'
, 'ModifiedBy'
)
Original 2025-10-02 Power Query
parseDatabaseLogUpdate = (updateText as text) as table =>
let
// define what the delimiter character is
delim = "ÿþ",
split = List.Transform(
Text.Split(updateText, delim),
Text.Trim
),
#"Converted to Table" = Table.FromList(split, Splitter.SplitByNothing(), null, null, ExtraValues.Error),
index = Table.AddIndexColumn(#"Converted to Table", "Index", 0, 1, Int64.Type),
// the changes happen in sets of three. This adds an "batch index" so that the rows can be bathed up into sets of three.
addBatchIndex = Table.TransformColumns(
index,
{
{
"Index",
each Number.IntegerDivide(_, 3),
type number
}
}
),
groupByBatch = Table.Group(
addBatchIndex,
{"Index"},
{{"Table", each [Column1], type list}}
),
// the first thing in the batch is the field name; the second thing is the original value; the third thing is the changed value
createRecord = Table.TransformColumns(
groupByBatch,
{
{
"Table",
each [
Field = _{0}?,
Value1 = _{1}?,
Value2 = _{2}?
]
}
}
),
#"Expanded Table" = Table.ExpandRecordColumn(
createRecord,
"Table",
{"Field", "Value1", "Value2"},
{"Field", "Value1", "Value2"}
),
// only interested in records where there is a change from Value1 to Value2
changesOnly = Table.SelectRows(
#"Expanded Table",
each ([Value1] <> [Value2])
),
cleanUpCommas = Table.ReplaceValue(changesOnly,",","",Replacer.ReplaceText,{"Field"})
in
cleanUpCommas