合并图像处理库,删除图像lib库

This commit is contained in:
李伟
2026-04-13 13:40:37 +08:00
parent 2a762396d5
commit c7ce4ea6a1
105 changed files with 16341 additions and 133 deletions
@@ -0,0 +1,257 @@
// ============================================================================
// Copyright © 2026 Hexagon Technology Center GmbH. All Rights Reserved.
// 文件名: ColorLayerProcessor.cs
// 描述: 色彩分层算子,将灰度图像按亮度区间分层
// 功能:
// - 将灰度图像按指定层数均匀分层
// - 支持自定义分层数(2~16层)
// - 支持均匀分层和基于 Otsu 的自适应分层
// - 可选保留原始灰度或映射为等间距灰度
// 算法: 灰度量化 / 多阈值分割
// 作者: 李伟 wei.lw.li@hexagon.com
// ============================================================================
using Emgu.CV;
using Emgu.CV.CvEnum;
using Emgu.CV.Structure;
using ImageProcessing.Core;
using Serilog;
namespace ImageProcessing.Processors;
/// <summary>
/// 色彩分层算子,将灰度图像按亮度区间分为多个层级
/// </summary>
public class ColorLayerProcessor : ImageProcessorBase
{
private static readonly ILogger _logger = Log.ForContext<ColorLayerProcessor>();
public ColorLayerProcessor()
{
Name = LocalizationHelper.GetString("ColorLayerProcessor_Name");
Description = LocalizationHelper.GetString("ColorLayerProcessor_Description");
}
protected override void InitializeParameters()
{
Parameters.Add("Layers", new ProcessorParameter(
"Layers",
LocalizationHelper.GetString("ColorLayerProcessor_Layers"),
typeof(int),
4,
2,
16,
LocalizationHelper.GetString("ColorLayerProcessor_Layers_Desc")));
Parameters.Add("Method", new ProcessorParameter(
"Method",
LocalizationHelper.GetString("ColorLayerProcessor_Method"),
typeof(string),
"Uniform",
null,
null,
LocalizationHelper.GetString("ColorLayerProcessor_Method_Desc"),
new string[] { "Uniform", "Otsu" }));
Parameters.Add("OutputMode", new ProcessorParameter(
"OutputMode",
LocalizationHelper.GetString("ColorLayerProcessor_OutputMode"),
typeof(string),
"EqualSpaced",
null,
null,
LocalizationHelper.GetString("ColorLayerProcessor_OutputMode_Desc"),
new string[] { "EqualSpaced", "MidValue" }));
Parameters.Add("TargetLayer", new ProcessorParameter(
"TargetLayer",
LocalizationHelper.GetString("ColorLayerProcessor_TargetLayer"),
typeof(int),
0,
0,
16,
LocalizationHelper.GetString("ColorLayerProcessor_TargetLayer_Desc")));
_logger.Debug("InitializeParameters");
}
public override Image<Gray, byte> Process(Image<Gray, byte> inputImage)
{
int layers = GetParameter<int>("Layers");
string method = GetParameter<string>("Method");
string outputMode = GetParameter<string>("OutputMode");
int targetLayer = GetParameter<int>("TargetLayer");
// 限制 targetLayer 范围
if (targetLayer < 0 || targetLayer > layers)
targetLayer = 0;
_logger.Debug("Process: Layers={Layers}, Method={Method}, OutputMode={OutputMode}, TargetLayer={TargetLayer}",
layers, method, outputMode, targetLayer);
// 计算分层阈值
byte[] thresholds = method == "Otsu"
? ComputeOtsuMultiThresholds(inputImage, layers)
: ComputeUniformThresholds(layers);
// 计算每层的输出灰度值
byte[] layerValues = ComputeLayerValues(thresholds, layers, outputMode);
// 应用分层映射
int width = inputImage.Width;
int height = inputImage.Height;
var result = new Image<Gray, byte>(width, height);
var srcData = inputImage.Data;
var dstData = result.Data;
if (targetLayer == 0)
{
// 输出全部层
Parallel.For(0, height, y =>
{
for (int x = 0; x < width; x++)
{
byte pixel = srcData[y, x, 0];
int layerIdx = GetLayerIndex(pixel, thresholds);
dstData[y, x, 0] = layerValues[layerIdx];
}
});
}
else
{
// 只输出指定层:选中层为 255(白),其余为 0(黑)
int target = targetLayer - 1; // 参数从1开始,内部索引从0开始
Parallel.For(0, height, y =>
{
for (int x = 0; x < width; x++)
{
byte pixel = srcData[y, x, 0];
int layerIdx = GetLayerIndex(pixel, thresholds);
dstData[y, x, 0] = (layerIdx == target) ? (byte)255 : (byte)0;
}
});
}
_logger.Debug("Process completed: {Layers} layers, target={TargetLayer}", layers, targetLayer);
return result;
}
/// <summary>
/// 均匀分层阈值:将 [0, 255] 等分
/// </summary>
private static byte[] ComputeUniformThresholds(int layers)
{
var thresholds = new byte[layers - 1];
double step = 256.0 / layers;
for (int i = 0; i < layers - 1; i++)
thresholds[i] = (byte)Math.Clamp((int)((i + 1) * step), 0, 255);
return thresholds;
}
/// <summary>
/// 基于 Otsu 的多阈值分层:递归二分
/// </summary>
private static byte[] ComputeOtsuMultiThresholds(Image<Gray, byte> image, int layers)
{
// 计算直方图
int[] histogram = new int[256];
var data = image.Data;
int h = image.Height, w = image.Width;
for (int y = 0; y < h; y++)
for (int x = 0; x < w; x++)
histogram[data[y, x, 0]]++;
// 递归 Otsu 分割
var thresholds = new List<byte>();
RecursiveOtsu(histogram, 0, 255, layers, thresholds);
thresholds.Sort();
return thresholds.ToArray();
}
/// <summary>
/// 递归 Otsu:在 [low, high] 范围内找最佳阈值,然后递归分割
/// </summary>
private static void RecursiveOtsu(int[] histogram, int low, int high, int layers, List<byte> thresholds)
{
if (layers <= 1 || low >= high)
return;
// 在 [low, high] 范围内找 Otsu 阈值
long totalPixels = 0;
long totalSum = 0;
for (int i = low; i <= high; i++)
{
totalPixels += histogram[i];
totalSum += (long)i * histogram[i];
}
if (totalPixels == 0) return;
long bgPixels = 0, bgSum = 0;
double maxVariance = 0;
int bestThreshold = (low + high) / 2;
for (int t = low; t < high; t++)
{
bgPixels += histogram[t];
bgSum += (long)t * histogram[t];
long fgPixels = totalPixels - bgPixels;
if (bgPixels == 0 || fgPixels == 0) continue;
double bgMean = (double)bgSum / bgPixels;
double fgMean = (double)(totalSum - bgSum) / fgPixels;
double variance = (double)bgPixels * fgPixels * (bgMean - fgMean) * (bgMean - fgMean);
if (variance > maxVariance)
{
maxVariance = variance;
bestThreshold = t;
}
}
thresholds.Add((byte)bestThreshold);
// 递归分割左右两半
int leftLayers = layers / 2;
int rightLayers = layers - leftLayers;
RecursiveOtsu(histogram, low, bestThreshold, leftLayers, thresholds);
RecursiveOtsu(histogram, bestThreshold + 1, high, rightLayers, thresholds);
}
/// <summary>
/// 计算每层的输出灰度值
/// </summary>
private static byte[] ComputeLayerValues(byte[] thresholds, int layers, string outputMode)
{
var values = new byte[layers];
if (outputMode == "EqualSpaced")
{
// 等间距输出:0, 255/(n-1), 2*255/(n-1), ..., 255
for (int i = 0; i < layers; i++)
values[i] = (byte)Math.Clamp((int)(255.0 * i / (layers - 1)), 0, 255);
}
else // MidValue
{
// 每层取区间中值
values[0] = (byte)(thresholds.Length > 0 ? thresholds[0] / 2 : 128);
for (int i = 1; i < layers - 1; i++)
values[i] = (byte)((thresholds[i - 1] + thresholds[i]) / 2);
values[layers - 1] = (byte)(thresholds.Length > 0 ? (thresholds[^1] + 255) / 2 : 128);
}
return values;
}
/// <summary>
/// 根据阈值数组确定像素所属层级
/// </summary>
private static int GetLayerIndex(byte pixel, byte[] thresholds)
{
for (int i = 0; i < thresholds.Length; i++)
{
if (pixel < thresholds[i])
return i;
}
return thresholds.Length;
}
}
@@ -0,0 +1,172 @@
// ============================================================================
// Copyright © 2026 Hexagon Technology Center GmbH. All Rights Reserved.
// 文件名: ContrastProcessor.cs
// 描述: 对比度调整算子,用于增强图像对比度
// 功能:
// - 线性对比度和亮度调整
// - 自动对比度拉伸
// - CLAHE(对比度受限自适应直方图均衡化)
// - 支持多种对比度增强方法
// 算法: 线性变换、直方图均衡化、CLAHE
// 作者: 李伟 wei.lw.li@hexagon.com
// ============================================================================
using Emgu.CV;
using Emgu.CV.Structure;
using ImageProcessing.Core;
using Serilog;
using System.Drawing;
namespace ImageProcessing.Processors;
/// <summary>
/// 对比度调整算子
/// </summary>
public class ContrastProcessor : ImageProcessorBase
{
private static readonly ILogger _logger = Log.ForContext<ContrastProcessor>();
public ContrastProcessor()
{
Name = LocalizationHelper.GetString("ContrastProcessor_Name");
Description = LocalizationHelper.GetString("ContrastProcessor_Description");
}
protected override void InitializeParameters()
{
Parameters.Add("Contrast", new ProcessorParameter(
"Contrast",
LocalizationHelper.GetString("ContrastProcessor_Contrast"),
typeof(double),
1.0,
0.1,
3.0,
LocalizationHelper.GetString("ContrastProcessor_Contrast_Desc")));
Parameters.Add("Brightness", new ProcessorParameter(
"Brightness",
LocalizationHelper.GetString("ContrastProcessor_Brightness"),
typeof(int),
0,
-100,
100,
LocalizationHelper.GetString("ContrastProcessor_Brightness_Desc")));
Parameters.Add("AutoContrast", new ProcessorParameter(
"AutoContrast",
LocalizationHelper.GetString("ContrastProcessor_AutoContrast"),
typeof(bool),
false,
null,
null,
LocalizationHelper.GetString("ContrastProcessor_AutoContrast_Desc")));
Parameters.Add("UseCLAHE", new ProcessorParameter(
"UseCLAHE",
LocalizationHelper.GetString("ContrastProcessor_UseCLAHE"),
typeof(bool),
false,
null,
null,
LocalizationHelper.GetString("ContrastProcessor_UseCLAHE_Desc")));
Parameters.Add("ClipLimit", new ProcessorParameter(
"ClipLimit",
LocalizationHelper.GetString("ContrastProcessor_ClipLimit"),
typeof(double),
2.0,
1.0,
10.0,
LocalizationHelper.GetString("ContrastProcessor_ClipLimit_Desc")));
_logger.Debug("InitializeParameters");
}
public override Image<Gray, byte> Process(Image<Gray, byte> inputImage)
{
double contrast = GetParameter<double>("Contrast");
int brightness = GetParameter<int>("Brightness");
bool autoContrast = GetParameter<bool>("AutoContrast");
bool useCLAHE = GetParameter<bool>("UseCLAHE");
double clipLimit = GetParameter<double>("ClipLimit");
var result = inputImage.Clone();
if (useCLAHE)
{
result = ApplyCLAHE(inputImage, clipLimit);
}
else if (autoContrast)
{
result = AutoContrastStretch(inputImage);
}
else
{
result = inputImage * contrast + brightness;
}
_logger.Debug("Process: Contrast = {contrast},Brightness = {brightness}," +
"AutoContrast = {autoContrast},UseCLAHE = {useCLAHE}, ClipLimit = {clipLimit}", contrast, brightness, autoContrast, useCLAHE, clipLimit);
return result;
}
private Image<Gray, byte> AutoContrastStretch(Image<Gray, byte> inputImage)
{
double minVal = 0, maxVal = 0;
Point minLoc = new Point();
Point maxLoc = new Point();
CvInvoke.MinMaxLoc(inputImage, ref minVal, ref maxVal, ref minLoc, ref maxLoc);
if (minVal == 0 && maxVal == 255)
{
return inputImage.Clone();
}
var floatImage = inputImage.Convert<Gray, float>();
if (maxVal > minVal)
{
floatImage = (floatImage - minVal) * (255.0 / (maxVal - minVal));
}
_logger.Debug("AutoContrastStretch");
return floatImage.Convert<Gray, byte>();
}
private Image<Gray, byte> ApplyCLAHE(Image<Gray, byte> inputImage, double clipLimit)
{
int tileSize = 8;
int width = inputImage.Width;
int height = inputImage.Height;
int tilesX = (width + tileSize - 1) / tileSize;
int tilesY = (height + tileSize - 1) / tileSize;
var result = new Image<Gray, byte>(width, height);
for (int ty = 0; ty < tilesY; ty++)
{
for (int tx = 0; tx < tilesX; tx++)
{
int x = tx * tileSize;
int y = ty * tileSize;
int w = Math.Min(tileSize, width - x);
int h = Math.Min(tileSize, height - y);
var roi = new System.Drawing.Rectangle(x, y, w, h);
inputImage.ROI = roi;
var tile = inputImage.Copy();
inputImage.ROI = System.Drawing.Rectangle.Empty;
var equalizedTile = new Image<Gray, byte>(tile.Size);
CvInvoke.EqualizeHist(tile, equalizedTile);
result.ROI = roi;
equalizedTile.CopyTo(result);
result.ROI = System.Drawing.Rectangle.Empty;
tile.Dispose();
equalizedTile.Dispose();
}
}
_logger.Debug("ApplyCLAHE");
return result;
}
}
@@ -0,0 +1,100 @@
// ============================================================================
// Copyright © 2026 Hexagon Technology Center GmbH. All Rights Reserved.
// 文件名: GammaProcessor.cs
// 描述: Gamma校正算子,用于调整图像亮度和对比度
// 功能:
// - Gamma非线性校正
// - 增益调整
// - 使用查找表(LUT)加速处理
// - 适用于图像显示和亮度调整
// 算法: Gamma校正公式 output = (input^(1/gamma)) * gain
// 作者: 李伟 wei.lw.li@hexagon.com
// ============================================================================
using Emgu.CV;
using Emgu.CV.Structure;
using ImageProcessing.Core;
using Serilog;
namespace ImageProcessing.Processors;
/// <summary>
/// Gamma校正算子
/// </summary>
public class GammaProcessor : ImageProcessorBase
{
private byte[] _lookupTable;
private static readonly ILogger _logger = Log.ForContext<GammaProcessor>();
public GammaProcessor()
{
Name = LocalizationHelper.GetString("GammaProcessor_Name");
Description = LocalizationHelper.GetString("GammaProcessor_Description");
_lookupTable = new byte[256];
}
protected override void InitializeParameters()
{
Parameters.Add("Gamma", new ProcessorParameter(
"Gamma",
LocalizationHelper.GetString("GammaProcessor_Gamma"),
typeof(double),
1.0,
0.1,
5.0,
LocalizationHelper.GetString("GammaProcessor_Gamma_Desc")));
Parameters.Add("Gain", new ProcessorParameter(
"Gain",
LocalizationHelper.GetString("GammaProcessor_Gain"),
typeof(double),
1.0,
0.1,
3.0,
LocalizationHelper.GetString("GammaProcessor_Gain_Desc")));
_logger.Debug("InitializeParameters");
}
public override Image<Gray, byte> Process(Image<Gray, byte> inputImage)
{
double gamma = GetParameter<double>("Gamma");
double gain = GetParameter<double>("Gain");
BuildLookupTable(gamma, gain);
var result = inputImage.Clone();
ApplyLookupTable(result);
_logger.Debug("Process:Gamma = {0}, Gain = {1}", gamma, gain);
return result;
}
private void BuildLookupTable(double gamma, double gain)
{
double invGamma = 1.0 / gamma;
for (int i = 0; i < 256; i++)
{
double normalized = i / 255.0;
double corrected = Math.Pow(normalized, invGamma) * gain;
int value = (int)(corrected * 255.0);
_lookupTable[i] = (byte)Math.Max(0, Math.Min(255, value));
}
_logger.Debug("Gamma and gain values recorded: gamma = {Gamma}, gain = {Gain}", gamma, gain);
}
private void ApplyLookupTable(Image<Gray, byte> image)
{
int width = image.Width;
int height = image.Height;
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
byte pixelValue = image.Data[y, x, 0];
image.Data[y, x, 0] = _lookupTable[pixelValue];
}
}
}
}
@@ -0,0 +1,549 @@
// ============================================================================
// Copyright © 2026 Hexagon Technology Center GmbH. All Rights Reserved.
// 文件名: HDREnhancementProcessor.cs
// 描述: 高动态范围(HDR)图像增强算子
// 功能:
// - 局部色调映射(Local Tone Mapping
// - 自适应对数映射(Adaptive Logarithmic Mapping
// - Drago色调映射
// - 双边滤波色调映射
// - 增强图像暗部和亮部细节
// 算法: 基于色调映射的HDR增强
// 作者: 李伟 wei.lw.li@hexagon.com
// ============================================================================
using Emgu.CV;
using Emgu.CV.Structure;
using ImageProcessing.Core;
using Serilog;
namespace ImageProcessing.Processors;
/// <summary>
/// 高动态范围图像增强算子
/// </summary>
public class HDREnhancementProcessor : ImageProcessorBase
{
private static readonly ILogger _logger = Log.ForContext<HDREnhancementProcessor>();
public HDREnhancementProcessor()
{
Name = LocalizationHelper.GetString("HDREnhancementProcessor_Name");
Description = LocalizationHelper.GetString("HDREnhancementProcessor_Description");
}
protected override void InitializeParameters()
{
Parameters.Add("Method", new ProcessorParameter(
"Method",
LocalizationHelper.GetString("HDREnhancementProcessor_Method"),
typeof(string),
"LocalToneMap",
null,
null,
LocalizationHelper.GetString("HDREnhancementProcessor_Method_Desc"),
new string[] { "LocalToneMap", "AdaptiveLog", "Drago", "BilateralToneMap" }));
Parameters.Add("Gamma", new ProcessorParameter(
"Gamma",
LocalizationHelper.GetString("HDREnhancementProcessor_Gamma"),
typeof(double),
1.0,
0.1,
5.0,
LocalizationHelper.GetString("HDREnhancementProcessor_Gamma_Desc")));
Parameters.Add("Saturation", new ProcessorParameter(
"Saturation",
LocalizationHelper.GetString("HDREnhancementProcessor_Saturation"),
typeof(double),
1.0,
0.0,
3.0,
LocalizationHelper.GetString("HDREnhancementProcessor_Saturation_Desc")));
Parameters.Add("DetailBoost", new ProcessorParameter(
"DetailBoost",
LocalizationHelper.GetString("HDREnhancementProcessor_DetailBoost"),
typeof(double),
1.5,
0.0,
5.0,
LocalizationHelper.GetString("HDREnhancementProcessor_DetailBoost_Desc")));
Parameters.Add("SigmaSpace", new ProcessorParameter(
"SigmaSpace",
LocalizationHelper.GetString("HDREnhancementProcessor_SigmaSpace"),
typeof(double),
20.0,
1.0,
100.0,
LocalizationHelper.GetString("HDREnhancementProcessor_SigmaSpace_Desc")));
Parameters.Add("SigmaColor", new ProcessorParameter(
"SigmaColor",
LocalizationHelper.GetString("HDREnhancementProcessor_SigmaColor"),
typeof(double),
30.0,
1.0,
100.0,
LocalizationHelper.GetString("HDREnhancementProcessor_SigmaColor_Desc")));
Parameters.Add("Bias", new ProcessorParameter(
"Bias",
LocalizationHelper.GetString("HDREnhancementProcessor_Bias"),
typeof(double),
0.85,
0.0,
1.0,
LocalizationHelper.GetString("HDREnhancementProcessor_Bias_Desc")));
_logger.Debug("InitializeParameters");
}
public override Image<Gray, byte> Process(Image<Gray, byte> inputImage)
{
string method = GetParameter<string>("Method");
double gamma = GetParameter<double>("Gamma");
double saturation = GetParameter<double>("Saturation");
double detailBoost = GetParameter<double>("DetailBoost");
double sigmaSpace = GetParameter<double>("SigmaSpace");
double sigmaColor = GetParameter<double>("SigmaColor");
double bias = GetParameter<double>("Bias");
Image<Gray, byte> result;
switch (method)
{
case "AdaptiveLog":
result = AdaptiveLogarithmicMapping(inputImage, gamma, bias);
break;
case "Drago":
result = DragoToneMapping(inputImage, gamma, bias);
break;
case "BilateralToneMap":
result = BilateralToneMapping(inputImage, gamma, sigmaSpace, sigmaColor, detailBoost);
break;
default: // LocalToneMap
result = LocalToneMapping(inputImage, gamma, sigmaSpace, detailBoost, saturation);
break;
}
_logger.Debug("Process: Method={Method}, Gamma={Gamma}, Saturation={Saturation}, DetailBoost={DetailBoost}, SigmaSpace={SigmaSpace}, SigmaColor={SigmaColor}, Bias={Bias}",
method, gamma, saturation, detailBoost, sigmaSpace, sigmaColor, bias);
return result;
}
/// <summary>
/// 局部色调映射
/// 将图像分解为基础层(光照)和细节层,分别处理后合成
/// Base = GaussianBlur(log(I))
/// Detail = log(I) - Base
/// Output = exp(Base_compressed + Detail * boost)
/// </summary>
private Image<Gray, byte> LocalToneMapping(Image<Gray, byte> inputImage,
double gamma, double sigmaSpace, double detailBoost, double saturation)
{
int width = inputImage.Width;
int height = inputImage.Height;
// 转换为浮点并归一化到 (0, 1]
var floatImage = inputImage.Convert<Gray, float>();
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++)
floatImage.Data[y, x, 0] = floatImage.Data[y, x, 0] / 255.0f + 0.001f;
// 对数域
var logImage = new Image<Gray, float>(width, height);
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++)
logImage.Data[y, x, 0] = (float)Math.Log(floatImage.Data[y, x, 0]);
// 基础层:大尺度高斯模糊提取光照分量
int kernelSize = (int)(sigmaSpace * 6) | 1;
if (kernelSize < 3) kernelSize = 3;
var baseLayer = new Image<Gray, float>(width, height);
CvInvoke.GaussianBlur(logImage, baseLayer, new System.Drawing.Size(kernelSize, kernelSize), sigmaSpace);
// 细节层
var detailLayer = logImage - baseLayer;
// 压缩基础层的动态范围
double baseMin = double.MaxValue, baseMax = double.MinValue;
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
float v = baseLayer.Data[y, x, 0];
if (v < baseMin) baseMin = v;
if (v > baseMax) baseMax = v;
}
}
double baseRange = baseMax - baseMin;
if (baseRange < 0.001) baseRange = 0.001;
// 目标动态范围(对数域)
double targetRange = Math.Log(256.0);
double compressionFactor = targetRange / baseRange;
var compressedBase = new Image<Gray, float>(width, height);
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
float normalized = (float)((baseLayer.Data[y, x, 0] - baseMin) / baseRange);
compressedBase.Data[y, x, 0] = (float)(normalized * targetRange + Math.Log(0.01));
}
}
// 合成:压缩后的基础层 + 增强的细节层
var combined = new Image<Gray, float>(width, height);
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
float val = compressedBase.Data[y, x, 0] + detailLayer.Data[y, x, 0] * (float)detailBoost;
combined.Data[y, x, 0] = val;
}
}
// 指数变换回线性域
var linearResult = new Image<Gray, float>(width, height);
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++)
linearResult.Data[y, x, 0] = (float)Math.Exp(combined.Data[y, x, 0]);
// Gamma校正
if (Math.Abs(gamma - 1.0) > 0.01)
{
double invGamma = 1.0 / gamma;
double maxVal = 0;
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++)
if (linearResult.Data[y, x, 0] > maxVal) maxVal = linearResult.Data[y, x, 0];
if (maxVal > 0)
{
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++)
{
double normalized = linearResult.Data[y, x, 0] / maxVal;
linearResult.Data[y, x, 0] = (float)(Math.Pow(normalized, invGamma) * maxVal);
}
}
}
// 饱和度增强(对比度微调)
if (Math.Abs(saturation - 1.0) > 0.01)
{
double mean = 0;
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++)
mean += linearResult.Data[y, x, 0];
mean /= (width * height);
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++)
{
double diff = linearResult.Data[y, x, 0] - mean;
linearResult.Data[y, x, 0] = (float)(mean + diff * saturation);
}
}
// 归一化到 [0, 255]
var result = NormalizeToByteImage(linearResult);
floatImage.Dispose();
logImage.Dispose();
baseLayer.Dispose();
detailLayer.Dispose();
compressedBase.Dispose();
combined.Dispose();
linearResult.Dispose();
return result;
}
/// <summary>
/// 自适应对数映射
/// 根据场景的整体亮度自适应调整对数映射曲线
/// L_out = (log(1 + L_in) / log(1 + L_max)) ^ (1/gamma)
/// 使用局部自适应:L_max 根据邻域计算
/// </summary>
private Image<Gray, byte> AdaptiveLogarithmicMapping(Image<Gray, byte> inputImage,
double gamma, double bias)
{
int width = inputImage.Width;
int height = inputImage.Height;
var floatImage = inputImage.Convert<Gray, float>();
// 归一化到 [0, 1]
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++)
floatImage.Data[y, x, 0] /= 255.0f;
// 计算全局最大亮度
float globalMax = 0;
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++)
if (floatImage.Data[y, x, 0] > globalMax)
globalMax = floatImage.Data[y, x, 0];
if (globalMax < 0.001f) globalMax = 0.001f;
// 计算对数平均亮度
double logAvg = 0;
int count = 0;
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
float v = floatImage.Data[y, x, 0];
if (v > 0.001f)
{
logAvg += Math.Log(v);
count++;
}
}
}
logAvg = Math.Exp(logAvg / Math.Max(count, 1));
// 自适应对数映射
// bias 控制暗部和亮部的平衡
double logBase = Math.Log(2.0 + 8.0 * Math.Pow(logAvg / globalMax, Math.Log(bias) / Math.Log(0.5)));
var result = new Image<Gray, float>(width, height);
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
float lum = floatImage.Data[y, x, 0];
double mapped = Math.Log(1.0 + lum) / logBase;
result.Data[y, x, 0] = (float)mapped;
}
}
// Gamma校正
if (Math.Abs(gamma - 1.0) > 0.01)
{
double invGamma = 1.0 / gamma;
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++)
result.Data[y, x, 0] = (float)Math.Pow(Math.Max(0, result.Data[y, x, 0]), invGamma);
}
var byteResult = NormalizeToByteImage(result);
floatImage.Dispose();
result.Dispose();
return byteResult;
}
/// <summary>
/// Drago色调映射
/// 使用自适应对数基底进行色调映射
/// L_out = log_base(1 + L_in) / log_base(1 + L_max)
/// base = 2 + 8 * (L_in / L_max) ^ (ln(bias) / ln(0.5))
/// </summary>
private Image<Gray, byte> DragoToneMapping(Image<Gray, byte> inputImage,
double gamma, double bias)
{
int width = inputImage.Width;
int height = inputImage.Height;
var floatImage = inputImage.Convert<Gray, float>();
// 归一化到 [0, 1]
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++)
floatImage.Data[y, x, 0] /= 255.0f;
// 全局最大亮度
float maxLum = 0;
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++)
if (floatImage.Data[y, x, 0] > maxLum)
maxLum = floatImage.Data[y, x, 0];
if (maxLum < 0.001f) maxLum = 0.001f;
double biasP = Math.Log(bias) / Math.Log(0.5);
double divider = Math.Log10(1.0 + maxLum);
if (divider < 0.001) divider = 0.001;
var result = new Image<Gray, float>(width, height);
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
float lum = floatImage.Data[y, x, 0];
// 自适应对数基底
double adaptBase = 2.0 + 8.0 * Math.Pow(lum / maxLum, biasP);
double logAdapt = Math.Log(1.0 + lum) / Math.Log(adaptBase);
double mapped = logAdapt / divider;
result.Data[y, x, 0] = (float)Math.Max(0, Math.Min(1.0, mapped));
}
}
// Gamma校正
if (Math.Abs(gamma - 1.0) > 0.01)
{
double invGamma = 1.0 / gamma;
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++)
result.Data[y, x, 0] = (float)Math.Pow(result.Data[y, x, 0], invGamma);
}
var byteResult = NormalizeToByteImage(result);
floatImage.Dispose();
result.Dispose();
return byteResult;
}
/// <summary>
/// 双边滤波色调映射
/// 使用双边滤波分离基础层和细节层
/// 双边滤波保边特性使得细节层更加精确
/// </summary>
private Image<Gray, byte> BilateralToneMapping(Image<Gray, byte> inputImage,
double gamma, double sigmaSpace, double sigmaColor, double detailBoost)
{
int width = inputImage.Width;
int height = inputImage.Height;
// 转换为浮点并取对数
var floatImage = inputImage.Convert<Gray, float>();
var logImage = new Image<Gray, float>(width, height);
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++)
logImage.Data[y, x, 0] = (float)Math.Log(floatImage.Data[y, x, 0] / 255.0f + 0.001);
// 双边滤波提取基础层(保边平滑)
int diameter = (int)(sigmaSpace * 2) | 1;
if (diameter < 3) diameter = 3;
if (diameter > 31) diameter = 31;
var baseLayer = new Image<Gray, float>(width, height);
// 转换为 byte 进行双边滤波,再转回 float
var logNorm = NormalizeToByteImage(logImage);
var baseNorm = new Image<Gray, byte>(width, height);
CvInvoke.BilateralFilter(logNorm, baseNorm, diameter, sigmaColor, sigmaSpace);
// 将基础层转回浮点对数域
double logMin = double.MaxValue, logMax = double.MinValue;
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++)
{
float v = logImage.Data[y, x, 0];
if (v < logMin) logMin = v;
if (v > logMax) logMax = v;
}
double logRange = logMax - logMin;
if (logRange < 0.001) logRange = 0.001;
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++)
baseLayer.Data[y, x, 0] = (float)(baseNorm.Data[y, x, 0] / 255.0 * logRange + logMin);
// 细节层 = 对数图像 - 基础层
var detailLayer = logImage - baseLayer;
// 压缩基础层
double baseMin = double.MaxValue, baseMax = double.MinValue;
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++)
{
float v = baseLayer.Data[y, x, 0];
if (v < baseMin) baseMin = v;
if (v > baseMax) baseMax = v;
}
double bRange = baseMax - baseMin;
if (bRange < 0.001) bRange = 0.001;
double targetRange = Math.Log(256.0);
double compression = targetRange / bRange;
// 合成
var combined = new Image<Gray, float>(width, height);
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++)
{
float compBase = (float)((baseLayer.Data[y, x, 0] - baseMin) * compression + Math.Log(0.01));
combined.Data[y, x, 0] = compBase + detailLayer.Data[y, x, 0] * (float)detailBoost;
}
// 指数变换回线性域
var linearResult = new Image<Gray, float>(width, height);
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++)
linearResult.Data[y, x, 0] = (float)Math.Exp(combined.Data[y, x, 0]);
// Gamma校正
if (Math.Abs(gamma - 1.0) > 0.01)
{
double invGamma = 1.0 / gamma;
double maxVal = 0;
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++)
if (linearResult.Data[y, x, 0] > maxVal) maxVal = linearResult.Data[y, x, 0];
if (maxVal > 0)
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++)
linearResult.Data[y, x, 0] = (float)(Math.Pow(linearResult.Data[y, x, 0] / maxVal, invGamma) * maxVal);
}
var result = NormalizeToByteImage(linearResult);
floatImage.Dispose();
logImage.Dispose();
logNorm.Dispose();
baseNorm.Dispose();
baseLayer.Dispose();
detailLayer.Dispose();
combined.Dispose();
linearResult.Dispose();
return result;
}
/// <summary>
/// 归一化浮点图像到字节图像
/// </summary>
private Image<Gray, byte> NormalizeToByteImage(Image<Gray, float> floatImage)
{
double minVal = double.MaxValue;
double maxVal = double.MinValue;
for (int y = 0; y < floatImage.Height; y++)
for (int x = 0; x < floatImage.Width; x++)
{
float val = floatImage.Data[y, x, 0];
if (val < minVal) minVal = val;
if (val > maxVal) maxVal = val;
}
var result = new Image<Gray, byte>(floatImage.Size);
double range = maxVal - minVal;
if (range > 0)
{
for (int y = 0; y < floatImage.Height; y++)
for (int x = 0; x < floatImage.Width; x++)
{
int normalized = (int)((floatImage.Data[y, x, 0] - minVal) / range * 255.0);
result.Data[y, x, 0] = (byte)Math.Max(0, Math.Min(255, normalized));
}
}
return result;
}
}
@@ -0,0 +1,213 @@
// ============================================================================
// Copyright © 2026 Hexagon Technology Center GmbH. All Rights Reserved.
// 文件名: HierarchicalEnhancementProcessor.cs
// 描述: 层次增强算子,基于多尺度高斯分解对不同尺度细节独立增强
// 功能:
// - 将图像分解为多层细节层 + 基础层
// - 对每层细节独立控制增益
// - 支持基础层亮度调整和对比度限制
// 算法: 多尺度高斯差分分解与重建
// 作者: 李伟 wei.lw.li@hexagon.com
// ============================================================================
using Emgu.CV;
using Emgu.CV.CvEnum;
using Emgu.CV.Structure;
using ImageProcessing.Core;
using Serilog;
namespace ImageProcessing.Processors;
/// <summary>
/// 层次增强算子,基于多尺度高斯差分对不同尺度的图像细节进行独立增强
/// </summary>
public class HierarchicalEnhancementProcessor : ImageProcessorBase
{
private static readonly ILogger _logger = Log.ForContext<HierarchicalEnhancementProcessor>();
public HierarchicalEnhancementProcessor()
{
Name = LocalizationHelper.GetString("HierarchicalEnhancementProcessor_Name");
Description = LocalizationHelper.GetString("HierarchicalEnhancementProcessor_Description");
}
protected override void InitializeParameters()
{
Parameters.Add("Levels", new ProcessorParameter(
"Levels",
LocalizationHelper.GetString("HierarchicalEnhancementProcessor_Levels"),
typeof(int),
4,
2,
8,
LocalizationHelper.GetString("HierarchicalEnhancementProcessor_Levels_Desc")));
Parameters.Add("FineGain", new ProcessorParameter(
"FineGain",
LocalizationHelper.GetString("HierarchicalEnhancementProcessor_FineGain"),
typeof(double),
2.0,
0.0,
10.0,
LocalizationHelper.GetString("HierarchicalEnhancementProcessor_FineGain_Desc")));
Parameters.Add("MediumGain", new ProcessorParameter(
"MediumGain",
LocalizationHelper.GetString("HierarchicalEnhancementProcessor_MediumGain"),
typeof(double),
1.5,
0.0,
10.0,
LocalizationHelper.GetString("HierarchicalEnhancementProcessor_MediumGain_Desc")));
Parameters.Add("CoarseGain", new ProcessorParameter(
"CoarseGain",
LocalizationHelper.GetString("HierarchicalEnhancementProcessor_CoarseGain"),
typeof(double),
1.0,
0.0,
10.0,
LocalizationHelper.GetString("HierarchicalEnhancementProcessor_CoarseGain_Desc")));
Parameters.Add("BaseGain", new ProcessorParameter(
"BaseGain",
LocalizationHelper.GetString("HierarchicalEnhancementProcessor_BaseGain"),
typeof(double),
1.0,
0.0,
3.0,
LocalizationHelper.GetString("HierarchicalEnhancementProcessor_BaseGain_Desc")));
Parameters.Add("ClipLimit", new ProcessorParameter(
"ClipLimit",
LocalizationHelper.GetString("HierarchicalEnhancementProcessor_ClipLimit"),
typeof(double),
0.0,
0.0,
50.0,
LocalizationHelper.GetString("HierarchicalEnhancementProcessor_ClipLimit_Desc")));
_logger.Debug("InitializeParameters");
}
public override Image<Gray, byte> Process(Image<Gray, byte> inputImage)
{
int levels = GetParameter<int>("Levels");
double fineGain = GetParameter<double>("FineGain");
double mediumGain = GetParameter<double>("MediumGain");
double coarseGain = GetParameter<double>("CoarseGain");
double baseGain = GetParameter<double>("BaseGain");
double clipLimit = GetParameter<double>("ClipLimit");
_logger.Debug("Process: Levels={Levels}, Fine={Fine}, Medium={Medium}, Coarse={Coarse}, Base={Base}, Clip={Clip}",
levels, fineGain, mediumGain, coarseGain, baseGain, clipLimit);
int h = inputImage.Height;
int w = inputImage.Width;
// === 多尺度高斯差分分解(全部在原始分辨率上操作,无需金字塔上下采样) ===
// 用递增 sigma 的高斯模糊生成平滑层序列:G0(原图), G1, G2, ..., G_n(基础层)
// 细节层 D_i = G_i - G_{i+1}
// 重建:output = sum(D_i * gain_i) + G_n * baseGain
// 计算每层的高斯 sigma(指数递增)
var sigmas = new double[levels];
for (int i = 0; i < levels; i++)
sigmas[i] = Math.Pow(2, i + 1); // 2, 4, 8, 16, ...
// 生成平滑层序列(float 数组,避免 Emgu float Image 的问题)
var smoothLayers = new float[levels + 1][]; // [0]=原图, [1..n]=高斯模糊
smoothLayers[0] = new float[h * w];
var srcData = inputImage.Data;
Parallel.For(0, h, y =>
{
int row = y * w;
for (int x = 0; x < w; x++)
smoothLayers[0][row + x] = srcData[y, x, 0];
});
for (int i = 0; i < levels; i++)
{
int ksize = ((int)(sigmas[i] * 3)) | 1; // 确保奇数
if (ksize < 3) ksize = 3;
using var src = new Image<Gray, byte>(w, h);
// 从上一层 float 转 byte 做高斯模糊
var prevLayer = smoothLayers[i];
var sd = src.Data;
Parallel.For(0, h, y =>
{
int row = y * w;
for (int x = 0; x < w; x++)
sd[y, x, 0] = (byte)Math.Clamp((int)Math.Round(prevLayer[row + x]), 0, 255);
});
using var dst = new Image<Gray, byte>(w, h);
CvInvoke.GaussianBlur(src, dst, new System.Drawing.Size(ksize, ksize), sigmas[i]);
smoothLayers[i + 1] = new float[h * w];
var dd = dst.Data;
var nextLayer = smoothLayers[i + 1];
Parallel.For(0, h, y =>
{
int row = y * w;
for (int x = 0; x < w; x++)
nextLayer[row + x] = dd[y, x, 0];
});
}
// === 计算增益插值并直接重建 ===
var gains = new double[levels];
for (int i = 0; i < levels; i++)
{
double t = levels <= 1 ? 0.0 : (double)i / (levels - 1);
if (t <= 0.5)
{
double t2 = t * 2.0;
gains[i] = fineGain * (1.0 - t2) + mediumGain * t2;
}
else
{
double t2 = (t - 0.5) * 2.0;
gains[i] = mediumGain * (1.0 - t2) + coarseGain * t2;
}
}
// 重建:output = baseGain * G_n + sum(gain_i * (G_i - G_{i+1}))
float fBaseGain = (float)baseGain;
float fClip = (float)clipLimit;
var baseLayerData = smoothLayers[levels];
var result = new Image<Gray, byte>(w, h);
var resultData = result.Data;
// 预转换 gains 为 float
var fGains = new float[levels];
for (int i = 0; i < levels; i++)
fGains[i] = (float)gains[i];
Parallel.For(0, h, y =>
{
int row = y * w;
for (int x = 0; x < w; x++)
{
int idx = row + x;
float val = baseLayerData[idx] * fBaseGain;
for (int i = 0; i < levels; i++)
{
float detail = smoothLayers[i][idx] - smoothLayers[i + 1][idx];
detail *= fGains[i];
if (fClip > 0)
detail = Math.Clamp(detail, -fClip, fClip);
val += detail;
}
resultData[y, x, 0] = (byte)Math.Clamp((int)Math.Round(val), 0, 255);
}
});
_logger.Debug("Process completed: {Levels} levels, output={W}x{H}", levels, w, h);
return result;
}
}
@@ -0,0 +1,142 @@
// ============================================================================
// Copyright © 2026 Hexagon Technology Center GmbH. All Rights Reserved.
// 文件名: HistogramEqualizationProcessor.cs
// 描述: 直方图均衡化算子,用于增强图像对比度
// 功能:
// - 全局直方图均衡化
// - 自适应直方图均衡化(CLAHE)
// - 限制对比度增强
// - 改善图像的整体对比度
// 算法: 直方图均衡化、CLAHE
// 作者: 李伟 wei.lw.li@hexagon.com
// ============================================================================
using Emgu.CV;
using Emgu.CV.Structure;
using ImageProcessing.Core;
using Serilog;
namespace ImageProcessing.Processors;
/// <summary>
/// 直方图均衡化算子
/// </summary>
public class HistogramEqualizationProcessor : ImageProcessorBase
{
private static readonly ILogger _logger = Log.ForContext<HistogramEqualizationProcessor>();
public HistogramEqualizationProcessor()
{
Name = LocalizationHelper.GetString("HistogramEqualizationProcessor_Name");
Description = LocalizationHelper.GetString("HistogramEqualizationProcessor_Description");
}
protected override void InitializeParameters()
{
Parameters.Add("Method", new ProcessorParameter(
"Method",
LocalizationHelper.GetString("HistogramEqualizationProcessor_Method"),
typeof(string),
"Global",
null,
null,
LocalizationHelper.GetString("HistogramEqualizationProcessor_Method_Desc"),
new string[] { "Global", "CLAHE" }));
Parameters.Add("ClipLimit", new ProcessorParameter(
"ClipLimit",
LocalizationHelper.GetString("HistogramEqualizationProcessor_ClipLimit"),
typeof(double),
2.0,
1.0,
10.0,
LocalizationHelper.GetString("HistogramEqualizationProcessor_ClipLimit_Desc")));
Parameters.Add("TileSize", new ProcessorParameter(
"TileSize",
LocalizationHelper.GetString("HistogramEqualizationProcessor_TileSize"),
typeof(int),
8,
4,
32,
LocalizationHelper.GetString("HistogramEqualizationProcessor_TileSize_Desc")));
_logger.Debug("InitializeParameters");
}
public override Image<Gray, byte> Process(Image<Gray, byte> inputImage)
{
string method = GetParameter<string>("Method");
double clipLimit = GetParameter<double>("ClipLimit");
int tileSize = GetParameter<int>("TileSize");
Image<Gray, byte> result;
if (method == "CLAHE")
{
result = ApplyCLAHE(inputImage, clipLimit, tileSize);
}
else // Global
{
result = new Image<Gray, byte>(inputImage.Size);
CvInvoke.EqualizeHist(inputImage, result);
}
_logger.Debug("Process: Method = {Method}, ClipLimit = {ClipLimit}, TileSize = {TileSize}",
method, clipLimit, tileSize);
return result;
}
private Image<Gray, byte> ApplyCLAHE(Image<Gray, byte> inputImage, double clipLimit, int tileSize)
{
int width = inputImage.Width;
int height = inputImage.Height;
int tilesX = (width + tileSize - 1) / tileSize;
int tilesY = (height + tileSize - 1) / tileSize;
var result = new Image<Gray, byte>(width, height);
// 对每个tile进行直方图均衡化
for (int ty = 0; ty < tilesY; ty++)
{
for (int tx = 0; tx < tilesX; tx++)
{
int x = tx * tileSize;
int y = ty * tileSize;
int w = Math.Min(tileSize, width - x);
int h = Math.Min(tileSize, height - y);
var roi = new System.Drawing.Rectangle(x, y, w, h);
inputImage.ROI = roi;
var tile = inputImage.Copy();
inputImage.ROI = System.Drawing.Rectangle.Empty;
// 应用直方图均衡化
var equalizedTile = new Image<Gray, byte>(tile.Size);
CvInvoke.EqualizeHist(tile, equalizedTile);
// 应用限制(简化版本)
var floatTile = tile.Convert<Gray, float>();
var floatEqualized = equalizedTile.Convert<Gray, float>();
var diff = floatEqualized - floatTile;
var limited = floatTile + diff * Math.Min(clipLimit / 10.0, 1.0);
var limitedByte = limited.Convert<Gray, byte>();
// 复制到结果图像
result.ROI = roi;
limitedByte.CopyTo(result);
result.ROI = System.Drawing.Rectangle.Empty;
tile.Dispose();
equalizedTile.Dispose();
floatTile.Dispose();
floatEqualized.Dispose();
diff.Dispose();
limited.Dispose();
limitedByte.Dispose();
}
}
return result;
}
}
@@ -0,0 +1,266 @@
// ============================================================================
// Copyright © 2026 Hexagon Technology Center GmbH. All Rights Reserved.
// 文件名: HistogramOverlayProcessor.cs
// 描述: 直方图叠加算子,计算灰度直方图并以蓝色柱状图绘制到结果图像左上角
// 功能:
// - 计算输入图像的灰度直方图
// - 将直方图绘制为蓝色半透明柱状图叠加到图像左上角
// - 输出直方图统计表格数据
// 算法: 灰度直方图统计 + 彩色图像叠加
// 作者: 李伟 wei.lw.li@hexagon.com
// ============================================================================
using Emgu.CV;
using Emgu.CV.CvEnum;
using Emgu.CV.Structure;
using ImageProcessing.Core;
using Serilog;
using System.Drawing;
using System.Text;
namespace ImageProcessing.Processors;
/// <summary>
/// 直方图叠加算子,计算灰度直方图并以蓝色柱状图绘制到结果图像左上角,同时输出统计表格
/// </summary>
public class HistogramOverlayProcessor : ImageProcessorBase
{
private static readonly ILogger _logger = Log.ForContext<HistogramOverlayProcessor>();
// 固定参数
private const int ChartWidth = 256; // 柱状图绘图区宽度
private const int ChartHeight = 200; // 柱状图绘图区高度
private const int AxisMarginLeft = 50; // Y轴标签预留宽度
private const int AxisMarginBottom = 25; // X轴标签预留高度
private const int Padding = 8; // 背景额外内边距
private const int PaddingRight = 25; // 右侧额外内边距(容纳X轴末尾刻度文字)
private const int Margin = 10; // 距图像左上角边距
private const float BgAlpha = 0.6f;
private const double FontScale = 0.35;
private const int FontThickness = 1;
public HistogramOverlayProcessor()
{
Name = LocalizationHelper.GetString("HistogramOverlayProcessor_Name");
Description = LocalizationHelper.GetString("HistogramOverlayProcessor_Description");
}
protected override void InitializeParameters()
{
// 无可调参数
}
public override Image<Gray, byte> Process(Image<Gray, byte> inputImage)
{
int h = inputImage.Height;
int w = inputImage.Width;
var srcData = inputImage.Data;
// === 1. 计算灰度直方图 ===
var hist = new int[256];
for (int y = 0; y < h; y++)
for (int x = 0; x < w; x++)
hist[srcData[y, x, 0]]++;
int maxCount = 0;
long totalPixels = (long)h * w;
for (int i = 0; i < 256; i++)
if (hist[i] > maxCount) maxCount = hist[i];
// === 2. 计算统计信息 ===
double mean = 0, variance = 0;
int minVal = 255, maxVal = 0;
int modeVal = 0, modeCount = 0;
long medianTarget = totalPixels / 2, cumulative = 0;
int medianVal = 0;
bool medianFound = false;
for (int i = 0; i < 256; i++)
{
if (hist[i] > 0)
{
if (i < minVal) minVal = i;
if (i > maxVal) maxVal = i;
}
if (hist[i] > modeCount) { modeCount = hist[i]; modeVal = i; }
mean += (double)i * hist[i];
cumulative += hist[i];
if (!medianFound && cumulative >= medianTarget) { medianVal = i; medianFound = true; }
}
mean /= totalPixels;
for (int i = 0; i < 256; i++)
variance += hist[i] * (i - mean) * (i - mean);
variance /= totalPixels;
double stdDev = Math.Sqrt(variance);
// === 3. 输出表格数据 ===
var sb = new StringBuilder();
sb.AppendLine("=== 灰度直方图统计 ===");
sb.AppendLine($"图像尺寸: {w} x {h}");
sb.AppendLine($"总像素数: {totalPixels}");
sb.AppendLine($"最小灰度: {minVal}");
sb.AppendLine($"最大灰度: {maxVal}");
sb.AppendLine($"平均灰度: {mean:F2}");
sb.AppendLine($"中位灰度: {medianVal}");
sb.AppendLine($"众数灰度: {modeVal} (出现 {modeCount} 次)");
sb.AppendLine($"标准差: {stdDev:F2}");
sb.AppendLine();
sb.AppendLine("灰度值\t像素数\t占比(%)");
for (int i = 0; i < 256; i++)
{
if (hist[i] > 0)
sb.AppendLine($"{i}\t{hist[i]}\t{(double)hist[i] / totalPixels * 100.0:F4}");
}
OutputData["HistogramTable"] = sb.ToString();
OutputData["Histogram"] = hist;
// === 4. 生成彩色叠加图像(蓝色柱状图 + XY轴坐标) ===
var colorImage = inputImage.Convert<Bgr, byte>();
var colorData = colorImage.Data;
// 布局:背景区域包含 Padding + Y轴标签 + 绘图区 + Padding(水平)
// Padding + 绘图区 + X轴标签 + Padding(垂直)
int totalW = Padding + AxisMarginLeft + ChartWidth + PaddingRight;
int totalH = Padding + ChartHeight + AxisMarginBottom + Padding;
int bgW = Math.Min(totalW, w - Margin);
int bgH = Math.Min(totalH, h - Margin);
if (bgW > Padding + AxisMarginLeft && bgH > Padding + AxisMarginBottom)
{
int plotW = Math.Min(ChartWidth, bgW - Padding - AxisMarginLeft - PaddingRight);
int plotH = Math.Min(ChartHeight, bgH - Padding - AxisMarginBottom - Padding);
if (plotW <= 0 || plotH <= 0) goto SkipOverlay;
// 绘图区左上角在图像中的坐标
int plotX0 = Margin + Padding + AxisMarginLeft;
int plotY0 = Margin + Padding;
// 计算每列柱高
double binWidth = (double)plotW / 256.0;
var barHeights = new int[plotW];
for (int px = 0; px < plotW; px++)
{
int bin = Math.Min((int)(px / binWidth), 255);
barHeights[px] = maxCount > 0 ? (int)((long)hist[bin] * (plotH - 1) / maxCount) : 0;
}
float alpha = BgAlpha;
float inv = 1.0f - alpha;
// 绘制半透明黑色背景(覆盖整个区域含坐标轴和内边距)
Parallel.For(0, bgH, dy =>
{
int imgY = Margin + dy;
if (imgY >= h) return;
for (int dx = 0; dx < bgW; dx++)
{
int imgX = Margin + dx;
if (imgX >= w) break;
colorData[imgY, imgX, 0] = (byte)(int)(colorData[imgY, imgX, 0] * inv);
colorData[imgY, imgX, 1] = (byte)(int)(colorData[imgY, imgX, 1] * inv);
colorData[imgY, imgX, 2] = (byte)(int)(colorData[imgY, imgX, 2] * inv);
}
});
// 绘制蓝色柱状图
Parallel.For(0, plotH, dy =>
{
int imgY = plotY0 + dy;
if (imgY >= h) return;
int rowFromBottom = plotH - 1 - dy;
for (int dx = 0; dx < plotW; dx++)
{
int imgX = plotX0 + dx;
if (imgX >= w) break;
if (rowFromBottom < barHeights[dx])
{
byte curB = colorData[imgY, imgX, 0];
byte curG = colorData[imgY, imgX, 1];
byte curR = colorData[imgY, imgX, 2];
colorData[imgY, imgX, 0] = (byte)Math.Clamp(curB + (int)(255 * alpha), 0, 255);
colorData[imgY, imgX, 1] = (byte)Math.Clamp(curG + (int)(50 * alpha), 0, 255);
colorData[imgY, imgX, 2] = (byte)Math.Clamp(curR + (int)(50 * alpha), 0, 255);
}
}
});
// === 5. 绘制坐标轴线和刻度标注 ===
var white = new MCvScalar(255, 255, 255);
var gray = new MCvScalar(180, 180, 180);
// Y轴线
CvInvoke.Line(colorImage,
new Point(plotX0, plotY0),
new Point(plotX0, plotY0 + plotH),
white, 1);
// X轴线
CvInvoke.Line(colorImage,
new Point(plotX0, plotY0 + plotH),
new Point(plotX0 + plotW, plotY0 + plotH),
white, 1);
// X轴刻度: 0, 64, 128, 192, 255
int[] xTicks = { 0, 64, 128, 192, 255 };
foreach (int tick in xTicks)
{
int tx = plotX0 + (int)(tick * binWidth);
if (tx >= w) break;
CvInvoke.Line(colorImage,
new Point(tx, plotY0 + plotH),
new Point(tx, plotY0 + plotH + 4),
white, 1);
string label = tick.ToString();
CvInvoke.PutText(colorImage, label,
new Point(tx - 8, plotY0 + plotH + 18),
FontFace.HersheySimplex, FontScale, white, FontThickness);
}
// Y轴刻度: 0%, 25%, 50%, 75%, 100%
for (int i = 0; i <= 4; i++)
{
int val = maxCount * i / 4;
int ty = plotY0 + plotH - (int)((long)plotH * i / 4);
CvInvoke.Line(colorImage,
new Point(plotX0 - 4, ty),
new Point(plotX0, ty),
white, 1);
// 网格虚线
if (i > 0 && i < 4)
{
for (int gx = plotX0 + 2; gx < plotX0 + plotW; gx += 6)
{
int gxEnd = Math.Min(gx + 2, plotX0 + plotW);
CvInvoke.Line(colorImage,
new Point(gx, ty),
new Point(gxEnd, ty),
gray, 1);
}
}
string label = FormatCount(val);
CvInvoke.PutText(colorImage, label,
new Point(Margin + Padding, ty + 4),
FontFace.HersheySimplex, FontScale, white, FontThickness);
}
}
SkipOverlay:
OutputData["PseudoColorImage"] = colorImage;
_logger.Debug("Process completed: histogram overlay, mean={Mean:F2}, stdDev={Std:F2}", mean, stdDev);
return inputImage.Clone();
}
/// <summary>
/// 格式化像素计数为紧凑字符串(如 12345 → "12.3K"
/// </summary>
private static string FormatCount(int count)
{
if (count >= 1_000_000) return $"{count / 1_000_000.0:F1}M";
if (count >= 1_000) return $"{count / 1_000.0:F1}K";
return count.ToString();
}
}
@@ -0,0 +1,320 @@
// ============================================================================
// Copyright © 2026 Hexagon Technology Center GmbH. All Rights Reserved.
// 文件名: RetinexProcessor.cs
// 描述: 基于Retinex的多尺度阴影校正算子
// 功能:
// - 单尺度Retinex (SSR)
// - 多尺度Retinex (MSR)
// - 带色彩恢复的多尺度Retinex (MSRCR)
// - 光照不均匀校正
// - 阴影去除
// 算法: Retinex理论 - 将图像分解为反射分量和光照分量
// 作者: 李伟 wei.lw.li@hexagon.com
// ============================================================================
using Emgu.CV;
using Emgu.CV.Structure;
using ImageProcessing.Core;
using Serilog;
namespace ImageProcessing.Processors;
/// <summary>
/// Retinex多尺度阴影校正算子
/// </summary>
public class RetinexProcessor : ImageProcessorBase
{
private static readonly ILogger _logger = Log.ForContext<RetinexProcessor>();
public RetinexProcessor()
{
Name = LocalizationHelper.GetString("RetinexProcessor_Name");
Description = LocalizationHelper.GetString("RetinexProcessor_Description");
}
protected override void InitializeParameters()
{
Parameters.Add("Method", new ProcessorParameter(
"Method",
LocalizationHelper.GetString("RetinexProcessor_Method"),
typeof(string),
"MSR",
null,
null,
LocalizationHelper.GetString("RetinexProcessor_Method_Desc"),
new string[] { "SSR", "MSR", "MSRCR" }));
Parameters.Add("Sigma1", new ProcessorParameter(
"Sigma1",
LocalizationHelper.GetString("RetinexProcessor_Sigma1"),
typeof(double),
15.0,
1.0,
100.0,
LocalizationHelper.GetString("RetinexProcessor_Sigma1_Desc")));
Parameters.Add("Sigma2", new ProcessorParameter(
"Sigma2",
LocalizationHelper.GetString("RetinexProcessor_Sigma2"),
typeof(double),
80.0,
1.0,
200.0,
LocalizationHelper.GetString("RetinexProcessor_Sigma2_Desc")));
Parameters.Add("Sigma3", new ProcessorParameter(
"Sigma3",
LocalizationHelper.GetString("RetinexProcessor_Sigma3"),
typeof(double),
250.0,
1.0,
500.0,
LocalizationHelper.GetString("RetinexProcessor_Sigma3_Desc")));
Parameters.Add("Gain", new ProcessorParameter(
"Gain",
LocalizationHelper.GetString("RetinexProcessor_Gain"),
typeof(double),
1.0,
0.1,
5.0,
LocalizationHelper.GetString("RetinexProcessor_Gain_Desc")));
Parameters.Add("Offset", new ProcessorParameter(
"Offset",
LocalizationHelper.GetString("RetinexProcessor_Offset"),
typeof(int),
0,
-100,
100,
LocalizationHelper.GetString("RetinexProcessor_Offset_Desc")));
_logger.Debug("InitializeParameters");
}
public override Image<Gray, byte> Process(Image<Gray, byte> inputImage)
{
string method = GetParameter<string>("Method");
double sigma1 = GetParameter<double>("Sigma1");
double sigma2 = GetParameter<double>("Sigma2");
double sigma3 = GetParameter<double>("Sigma3");
double gain = GetParameter<double>("Gain");
int offset = GetParameter<int>("Offset");
Image<Gray, byte> result;
if (method == "SSR")
{
// 单尺度Retinex
result = SingleScaleRetinex(inputImage, sigma2, gain, offset);
}
else if (method == "MSR")
{
// 多尺度Retinex
result = MultiScaleRetinex(inputImage, new[] { sigma1, sigma2, sigma3 }, gain, offset);
}
else // MSRCR
{
// 带色彩恢复的多尺度Retinex
result = MultiScaleRetinexCR(inputImage, new[] { sigma1, sigma2, sigma3 }, gain, offset);
}
_logger.Debug("Process: Method = {Method}, Sigma1 = {Sigma1}, Sigma2 = {Sigma2}, Sigma3 = {Sigma3}, Gain = {Gain}, Offset = {Offset}",
method, sigma1, sigma2, sigma3, gain, offset);
return result;
}
/// <summary>
/// 单尺度Retinex (SSR)
/// R(x,y) = log(I(x,y)) - log(I(x,y) * G(x,y))
/// </summary>
private Image<Gray, byte> SingleScaleRetinex(Image<Gray, byte> inputImage, double sigma, double gain, int offset)
{
// 转换为浮点图像并添加小常数避免log(0)
Image<Gray, float> floatImage = inputImage.Convert<Gray, float>();
floatImage = floatImage + 1.0f;
// 计算log(I)
Image<Gray, float> logImage = new Image<Gray, float>(inputImage.Size);
for (int y = 0; y < inputImage.Height; y++)
{
for (int x = 0; x < inputImage.Width; x++)
{
logImage.Data[y, x, 0] = (float)Math.Log(floatImage.Data[y, x, 0]);
}
}
// 高斯模糊得到光照分量
Image<Gray, float> blurred = new Image<Gray, float>(inputImage.Size);
int kernelSize = (int)(sigma * 6) | 1; // 确保为奇数
if (kernelSize < 3) kernelSize = 3;
CvInvoke.GaussianBlur(floatImage, blurred, new System.Drawing.Size(kernelSize, kernelSize), sigma);
// 计算log(I * G)
Image<Gray, float> logBlurred = new Image<Gray, float>(inputImage.Size);
for (int y = 0; y < inputImage.Height; y++)
{
for (int x = 0; x < inputImage.Width; x++)
{
logBlurred.Data[y, x, 0] = (float)Math.Log(blurred.Data[y, x, 0]);
}
}
// R = log(I) - log(I*G)
Image<Gray, float> retinex = logImage - logBlurred;
// 应用增益和偏移
retinex = retinex * gain + offset;
// 归一化到0-255
Image<Gray, byte> result = NormalizeToByteImage(retinex);
floatImage.Dispose();
logImage.Dispose();
blurred.Dispose();
logBlurred.Dispose();
retinex.Dispose();
return result;
}
/// <summary>
/// 多尺度Retinex (MSR)
/// MSR = Σ(w_i * SSR_i) / N
/// </summary>
private Image<Gray, byte> MultiScaleRetinex(Image<Gray, byte> inputImage, double[] sigmas, double gain, int offset)
{
// 转换为浮点图像
Image<Gray, float> floatImage = inputImage.Convert<Gray, float>();
floatImage = floatImage + 1.0f;
// 计算log(I)
Image<Gray, float> logImage = new Image<Gray, float>(inputImage.Size);
for (int y = 0; y < inputImage.Height; y++)
{
for (int x = 0; x < inputImage.Width; x++)
{
logImage.Data[y, x, 0] = (float)Math.Log(floatImage.Data[y, x, 0]);
}
}
// 累加多个尺度的结果
Image<Gray, float> msrResult = new Image<Gray, float>(inputImage.Size);
msrResult.SetZero();
foreach (double sigma in sigmas)
{
// 高斯模糊
Image<Gray, float> blurred = new Image<Gray, float>(inputImage.Size);
int kernelSize = (int)(sigma * 6) | 1;
if (kernelSize < 3) kernelSize = 3;
CvInvoke.GaussianBlur(floatImage, blurred, new System.Drawing.Size(kernelSize, kernelSize), sigma);
// 计算log(I*G)
Image<Gray, float> logBlurred = new Image<Gray, float>(inputImage.Size);
for (int y = 0; y < inputImage.Height; y++)
{
for (int x = 0; x < inputImage.Width; x++)
{
logBlurred.Data[y, x, 0] = (float)Math.Log(blurred.Data[y, x, 0]);
}
}
// 累加 SSR
msrResult = msrResult + (logImage - logBlurred);
blurred.Dispose();
logBlurred.Dispose();
}
// 平均
msrResult = msrResult / sigmas.Length;
// 应用增益和偏移
msrResult = msrResult * gain + offset;
// 归一化
Image<Gray, byte> result = NormalizeToByteImage(msrResult);
floatImage.Dispose();
logImage.Dispose();
msrResult.Dispose();
return result;
}
/// <summary>
/// 带色彩恢复的多尺度Retinex (MSRCR)
/// 对于灰度图像,使用简化版本
/// </summary>
private Image<Gray, byte> MultiScaleRetinexCR(Image<Gray, byte> inputImage, double[] sigmas, double gain, int offset)
{
// 先执行MSR
Image<Gray, byte> msrResult = MultiScaleRetinex(inputImage, sigmas, gain, offset);
// 对于灰度图像,色彩恢复简化为对比度增强
Image<Gray, float> floatMsr = msrResult.Convert<Gray, float>();
Image<Gray, float> floatInput = inputImage.Convert<Gray, float>();
// 简单的色彩恢复:增强局部对比度
Image<Gray, float> enhanced = new Image<Gray, float>(inputImage.Size);
for (int y = 0; y < inputImage.Height; y++)
{
for (int x = 0; x < inputImage.Width; x++)
{
float msr = floatMsr.Data[y, x, 0];
float original = floatInput.Data[y, x, 0];
// 色彩恢复因子
float c = (float)Math.Log(original + 1.0) / (float)Math.Log(128.0);
enhanced.Data[y, x, 0] = msr * c;
}
}
Image<Gray, byte> result = NormalizeToByteImage(enhanced);
msrResult.Dispose();
floatMsr.Dispose();
floatInput.Dispose();
enhanced.Dispose();
return result;
}
/// <summary>
/// 归一化浮点图像到字节图像
/// </summary>
private Image<Gray, byte> NormalizeToByteImage(Image<Gray, float> floatImage)
{
// 找到最小值和最大值
double minVal = double.MaxValue;
double maxVal = double.MinValue;
for (int y = 0; y < floatImage.Height; y++)
{
for (int x = 0; x < floatImage.Width; x++)
{
float val = floatImage.Data[y, x, 0];
if (val < minVal) minVal = val;
if (val > maxVal) maxVal = val;
}
}
// 归一化到0-255
Image<Gray, byte> result = new Image<Gray, byte>(floatImage.Size);
double range = maxVal - minVal;
if (range > 0)
{
for (int y = 0; y < floatImage.Height; y++)
{
for (int x = 0; x < floatImage.Width; x++)
{
float val = floatImage.Data[y, x, 0];
int normalized = (int)((val - minVal) / range * 255.0);
result.Data[y, x, 0] = (byte)Math.Max(0, Math.Min(255, normalized));
}
}
}
return result;
}
}
@@ -0,0 +1,141 @@
// ============================================================================
// Copyright © 2026 Hexagon Technology Center GmbH. All Rights Reserved.
// 文件名: SharpenProcessor.cs
// 描述: 锐化算子,用于增强图像边缘和细节
// 功能:
// - 拉普拉斯锐化
// - 非锐化掩蔽(Unsharp Masking
// - 可调节锐化强度
// - 支持多种锐化核
// 算法: 拉普拉斯算子、非锐化掩蔽
// 作者: 李伟 wei.lw.li@hexagon.com
// ============================================================================
using Emgu.CV;
using Emgu.CV.CvEnum;
using Emgu.CV.Structure;
using ImageProcessing.Core;
using Serilog;
namespace ImageProcessing.Processors;
/// <summary>
/// 锐化算子
/// </summary>
public class SharpenProcessor : ImageProcessorBase
{
private static readonly ILogger _logger = Log.ForContext<SharpenProcessor>();
public SharpenProcessor()
{
Name = LocalizationHelper.GetString("SharpenProcessor_Name");
Description = LocalizationHelper.GetString("SharpenProcessor_Description");
}
protected override void InitializeParameters()
{
Parameters.Add("Method", new ProcessorParameter(
"Method",
LocalizationHelper.GetString("SharpenProcessor_Method"),
typeof(string),
"Laplacian",
null,
null,
LocalizationHelper.GetString("SharpenProcessor_Method_Desc"),
new string[] { "Laplacian", "UnsharpMask" }));
Parameters.Add("Strength", new ProcessorParameter(
"Strength",
LocalizationHelper.GetString("SharpenProcessor_Strength"),
typeof(double),
1.0,
0.1,
5.0,
LocalizationHelper.GetString("SharpenProcessor_Strength_Desc")));
Parameters.Add("KernelSize", new ProcessorParameter(
"KernelSize",
LocalizationHelper.GetString("SharpenProcessor_KernelSize"),
typeof(int),
3,
1,
15,
LocalizationHelper.GetString("SharpenProcessor_KernelSize_Desc")));
_logger.Debug("InitializeParameters");
}
public override Image<Gray, byte> Process(Image<Gray, byte> inputImage)
{
string method = GetParameter<string>("Method");
double strength = GetParameter<double>("Strength");
int kernelSize = GetParameter<int>("KernelSize");
if (kernelSize % 2 == 0) kernelSize++;
Image<Gray, byte> result;
if (method == "UnsharpMask")
{
result = ApplyUnsharpMask(inputImage, kernelSize, strength);
}
else // Laplacian
{
result = ApplyLaplacianSharpening(inputImage, strength);
}
_logger.Debug("Process: Method = {Method}, Strength = {Strength}, KernelSize = {KernelSize}",
method, strength, kernelSize);
return result;
}
private Image<Gray, byte> ApplyLaplacianSharpening(Image<Gray, byte> inputImage, double strength)
{
// 计算拉普拉斯算子
var laplacian = new Image<Gray, float>(inputImage.Size);
CvInvoke.Laplacian(inputImage, laplacian, DepthType.Cv32F, 1);
// 转换为字节类型
var laplacianByte = laplacian.Convert<Gray, byte>();
// 将拉普拉斯结果加到原图上进行锐化
var floatImage = inputImage.Convert<Gray, float>();
var sharpened = floatImage + laplacian * strength;
// 限制范围并转换回字节类型
var result = sharpened.Convert<Gray, byte>();
laplacian.Dispose();
laplacianByte.Dispose();
floatImage.Dispose();
sharpened.Dispose();
return result;
}
private Image<Gray, byte> ApplyUnsharpMask(Image<Gray, byte> inputImage, int kernelSize, double strength)
{
// 创建模糊图像
var blurred = new Image<Gray, byte>(inputImage.Size);
CvInvoke.GaussianBlur(inputImage, blurred,
new System.Drawing.Size(kernelSize, kernelSize), 0);
// 计算差异(细节)
var floatInput = inputImage.Convert<Gray, float>();
var floatBlurred = blurred.Convert<Gray, float>();
var detail = floatInput - floatBlurred;
// 将细节加回原图
var sharpened = floatInput + detail * strength;
// 转换回字节类型
var result = sharpened.Convert<Gray, byte>();
blurred.Dispose();
floatInput.Dispose();
floatBlurred.Dispose();
detail.Dispose();
sharpened.Dispose();
return result;
}
}
@@ -0,0 +1,127 @@
// ============================================================================
// Copyright © 2016-2025 Hexagon Technology Center GmbH. All Rights Reserved.
// 文件名: SubPixelZoomProcessor.cs
// 描述: 亚像素放大算子,通过高质量插值实现图像的亚像素级放大
// 功能:
// - 支持任意倍率放大(含小数倍率如 1.5x、2.3x)
// - 多种插值方法(最近邻、双线性、双三次、Lanczos)
// - 可选锐化补偿(抵消插值模糊)
// - 可选指定输出尺寸
// 算法: 基于 OpenCV Resize 的高质量插值放大
// 作者: 李伟 wei.lw.li@hexagon.com
// ============================================================================
using Emgu.CV;
using Emgu.CV.CvEnum;
using Emgu.CV.Structure;
using ImageProcessing.Core;
using Serilog;
using System.Drawing;
namespace ImageProcessing.Processors;
/// <summary>
/// 亚像素放大算子
/// </summary>
public class SubPixelZoomProcessor : ImageProcessorBase
{
private static readonly ILogger _logger = Log.ForContext<SubPixelZoomProcessor>();
public SubPixelZoomProcessor()
{
Name = LocalizationHelper.GetString("SubPixelZoomProcessor_Name");
Description = LocalizationHelper.GetString("SubPixelZoomProcessor_Description");
}
protected override void InitializeParameters()
{
Parameters.Add("ScaleFactor", new ProcessorParameter(
"ScaleFactor",
LocalizationHelper.GetString("SubPixelZoomProcessor_ScaleFactor"),
typeof(double),
2.0,
1.0,
16.0,
LocalizationHelper.GetString("SubPixelZoomProcessor_ScaleFactor_Desc")));
Parameters.Add("Interpolation", new ProcessorParameter(
"Interpolation",
LocalizationHelper.GetString("SubPixelZoomProcessor_Interpolation"),
typeof(string),
"Lanczos",
null,
null,
LocalizationHelper.GetString("SubPixelZoomProcessor_Interpolation_Desc"),
new string[] { "Nearest", "Bilinear", "Bicubic", "Lanczos" }));
Parameters.Add("SharpenAfter", new ProcessorParameter(
"SharpenAfter",
LocalizationHelper.GetString("SubPixelZoomProcessor_SharpenAfter"),
typeof(bool),
false,
null,
null,
LocalizationHelper.GetString("SubPixelZoomProcessor_SharpenAfter_Desc")));
Parameters.Add("SharpenStrength", new ProcessorParameter(
"SharpenStrength",
LocalizationHelper.GetString("SubPixelZoomProcessor_SharpenStrength"),
typeof(double),
0.5,
0.1,
3.0,
LocalizationHelper.GetString("SubPixelZoomProcessor_SharpenStrength_Desc")));
_logger.Debug("InitializeParameters");
}
public override Image<Gray, byte> Process(Image<Gray, byte> inputImage)
{
double scaleFactor = GetParameter<double>("ScaleFactor");
string interpolation = GetParameter<string>("Interpolation");
bool sharpenAfter = GetParameter<bool>("SharpenAfter");
double sharpenStrength = GetParameter<double>("SharpenStrength");
Inter interMethod = interpolation switch
{
"Nearest" => Inter.Nearest,
"Bilinear" => Inter.Linear,
"Bicubic" => Inter.Cubic,
_ => Inter.Lanczos4
};
int newWidth = (int)Math.Round(inputImage.Width * scaleFactor);
int newHeight = (int)Math.Round(inputImage.Height * scaleFactor);
// 确保最小尺寸为 1
newWidth = Math.Max(1, newWidth);
newHeight = Math.Max(1, newHeight);
var result = new Image<Gray, byte>(newWidth, newHeight);
CvInvoke.Resize(inputImage, result, new Size(newWidth, newHeight), 0, 0, interMethod);
// 锐化补偿
if (sharpenAfter)
{
// Unsharp Masking: result = result + strength * (result - blur)
int ksize = Math.Max(3, (int)(scaleFactor * 2) | 1); // 奇数核
using var blurred = result.SmoothGaussian(ksize);
for (int y = 0; y < newHeight; y++)
{
for (int x = 0; x < newWidth; x++)
{
float val = result.Data[y, x, 0];
float blur = blurred.Data[y, x, 0];
float sharpened = val + (float)(sharpenStrength * (val - blur));
result.Data[y, x, 0] = (byte)Math.Clamp((int)sharpened, 0, 255);
}
}
}
_logger.Debug("Process: Scale={Scale}, Interp={Interp}, Size={W}x{H}, Sharpen={Sharpen}",
scaleFactor, interpolation, newWidth, newHeight, sharpenAfter);
return result;
}
}
@@ -0,0 +1,319 @@
// ============================================================================
// Copyright © 2026 Hexagon Technology Center GmbH. All Rights Reserved.
// 文件名: SuperResolutionProcessor.cs
// 描述: 基于深度学习的超分辨率算子
// 功能:
// - 支持 EDSR 和 FSRCNN 超分辨率模型(ONNX 格式)
// - 支持 2x、3x、4x 放大倍率
// - 灰度图像自动转换为三通道输入,推理后转回灰度
// - 模型文件自动搜索,支持自定义路径
// - 使用 Microsoft.ML.OnnxRuntime 进行推理
// 算法: EDSR (Enhanced Deep Residual SR) / FSRCNN (Fast SR CNN)
// 作者: 李伟 wei.lw.li@hexagon.com
// ============================================================================
using Emgu.CV;
using Emgu.CV.CvEnum;
using Emgu.CV.Structure;
using ImageProcessing.Core;
using Microsoft.ML.OnnxRuntime;
using Microsoft.ML.OnnxRuntime.Tensors;
using Serilog;
using System.IO;
namespace ImageProcessing.Processors;
/// <summary>
/// 基于深度学习的超分辨率算子(EDSR / FSRCNN),使用 ONNX Runtime 推理
/// </summary>
public class SuperResolutionProcessor : ImageProcessorBase
{
private static readonly ILogger _logger = Log.ForContext<SuperResolutionProcessor>();
// 会话缓存,避免重复加载
private static InferenceSession? _cachedSession;
private static string _cachedModelKey = string.Empty;
public SuperResolutionProcessor()
{
Name = LocalizationHelper.GetString("SuperResolutionProcessor_Name");
Description = LocalizationHelper.GetString("SuperResolutionProcessor_Description");
}
protected override void InitializeParameters()
{
Parameters.Add("Model", new ProcessorParameter(
"Model",
LocalizationHelper.GetString("SuperResolutionProcessor_Model"),
typeof(string),
"FSRCNN",
null,
null,
LocalizationHelper.GetString("SuperResolutionProcessor_Model_Desc"),
new string[] { "EDSR", "FSRCNN" }));
Parameters.Add("Scale", new ProcessorParameter(
"Scale",
LocalizationHelper.GetString("SuperResolutionProcessor_Scale"),
typeof(string),
"2",
null,
null,
LocalizationHelper.GetString("SuperResolutionProcessor_Scale_Desc"),
new string[] { "2", "3", "4" }));
_logger.Debug("InitializeParameters");
}
public override Image<Gray, byte> Process(Image<Gray, byte> inputImage)
{
string model = GetParameter<string>("Model");
int scale = int.Parse(GetParameter<string>("Scale"));
// 查找模型文件
string modelPath = FindModelFile(model, scale);
if (string.IsNullOrEmpty(modelPath))
{
_logger.Error("Model file not found: {Model}_x{Scale}.onnx", model, scale);
throw new FileNotFoundException(
$"超分辨率模型文件未找到: {model}_x{scale}.onnx\n" +
$"请将模型文件放置到以下任一目录:\n" +
$" 1. 程序目录/Models/\n" +
$" 2. 程序目录/\n" +
$"模型需要 ONNX 格式。\n" +
$"可使用 tf2onnx 从 .pb 转换:\n" +
$" pip install tf2onnx\n" +
$" python -m tf2onnx.convert --input {model}_x{scale}.pb --output {model}_x{scale}.onnx --inputs input:0 --outputs output:0");
}
// 加载或复用会话
string modelKey = $"{model}_{scale}";
InferenceSession session;
if (_cachedModelKey == modelKey && _cachedSession != null)
{
session = _cachedSession;
_logger.Debug("Reusing cached session: {ModelKey}", modelKey);
}
else
{
_cachedSession?.Dispose();
var options = new SessionOptions();
options.GraphOptimizationLevel = GraphOptimizationLevel.ORT_ENABLE_ALL;
try
{
options.AppendExecutionProvider_CUDA(0);
_logger.Information("Using CUDA GPU for inference");
}
catch
{
_logger.Warning("CUDA not available, falling back to CPU");
}
session = new InferenceSession(modelPath, options);
_cachedSession = session;
_cachedModelKey = modelKey;
// 记录实际使用的 Execution Provider
var providers = session.ModelMetadata?.CustomMetadataMap;
_logger.Information("Loaded ONNX model: {ModelPath}, Providers: {Providers}",
modelPath, string.Join(", ", session.GetType().Name));
}
int h = inputImage.Height;
int w = inputImage.Width;
_logger.Information("Input image size: {W}x{H}, Model: {Model}, Scale: {Scale}", w, h, model, scale);
// 对大图使用分块推理策略,避免单次推理过慢/OOM
const int TileSize = 256;
bool useTiling = (model.StartsWith("EDSR", StringComparison.OrdinalIgnoreCase)) && (h > TileSize || w > TileSize);
if (useTiling)
{
return ProcessTiled(session, inputImage, scale, TileSize);
}
return ProcessSingle(session, inputImage, scale);
}
/// <summary>
/// 单次推理(小图或 FSRCNN)
/// </summary>
private Image<Gray, byte> ProcessSingle(InferenceSession session, Image<Gray, byte> inputImage, int scale)
{
int h = inputImage.Height;
int w = inputImage.Width;
// 获取模型输入信息
string inputName = session.InputMetadata.Keys.First();
var inputMeta = session.InputMetadata[inputName];
int[] dims = inputMeta.Dimensions;
// dims 格式: [1, H, W, C] (NHWC)C 可能是 1 或 3
int inputChannels = dims[^1]; // 最后一维是通道数
// 构建输入 tensor: [1, H, W, C] (NHWC)
// 使用底层数组 + Parallel.For 避免逐元素索引开销
DenseTensor<float> inputTensor;
if (inputChannels == 1)
{
// FSRCNN: 单通道灰度输入
inputTensor = new DenseTensor<float>(new[] { 1, h, w, 1 });
float[] buf = inputTensor.Buffer.ToArray();
var imgData = inputImage.Data;
Parallel.For(0, h, y =>
{
int rowOffset = y * w;
for (int x = 0; x < w; x++)
buf[rowOffset + x] = imgData[y, x, 0];
});
inputTensor = new DenseTensor<float>(buf, new[] { 1, h, w, 1 });
}
else
{
// EDSR: 三通道 BGR 输入
using var colorInput = new Image<Bgr, byte>(w, h);
CvInvoke.CvtColor(inputImage, colorInput, ColorConversion.Gray2Bgr);
var buf = new float[h * w * 3];
var imgData = colorInput.Data;
Parallel.For(0, h, y =>
{
int rowOffset = y * w * 3;
for (int x = 0; x < w; x++)
{
int px = rowOffset + x * 3;
buf[px] = imgData[y, x, 0];
buf[px + 1] = imgData[y, x, 1];
buf[px + 2] = imgData[y, x, 2];
}
});
inputTensor = new DenseTensor<float>(buf, new[] { 1, h, w, 3 });
}
// 推理
var inputs = new List<NamedOnnxValue>
{
NamedOnnxValue.CreateFromTensor(inputName, inputTensor)
};
using var results = session.Run(inputs);
var outputTensor = results.First().AsTensor<float>();
// 输出 shape: [1, C, H*scale, W*scale] (NCHW,模型输出经过 Transpose)
var shape = outputTensor.Dimensions;
int outC = shape[1];
int outH = shape[2];
int outW = shape[3];
// 转换为灰度图像
// 使用 Parallel.For + 直接内存操作
Image<Gray, byte> result;
if (outC == 1)
{
// FSRCNN: 单通道输出 [1, 1, outH, outW]
result = new Image<Gray, byte>(outW, outH);
var outData = result.Data;
Parallel.For(0, outH, y =>
{
for (int x = 0; x < outW; x++)
outData[y, x, 0] = (byte)Math.Clamp((int)outputTensor[0, 0, y, x], 0, 255);
});
}
else
{
// EDSR: 三通道输出 [1, 3, outH, outW] → 灰度
// 直接计算灰度值,跳过中间 BGR 图像分配
result = new Image<Gray, byte>(outW, outH);
var outData = result.Data;
Parallel.For(0, outH, y =>
{
for (int x = 0; x < outW; x++)
{
float b = outputTensor[0, 0, y, x];
float g = outputTensor[0, 1, y, x];
float r = outputTensor[0, 2, y, x];
// BT.601 灰度公式: 0.299*R + 0.587*G + 0.114*B
int gray = (int)(0.299f * r + 0.587f * g + 0.114f * b);
outData[y, x, 0] = (byte)Math.Clamp(gray, 0, 255);
}
});
}
_logger.Debug("ProcessSingle: Scale={Scale}, Output={W}x{H}", scale, outW, outH);
return result;
}
/// <summary>
/// 分块推理(大图 EDSR),将图像切成小块分别推理后拼接
/// </summary>
private Image<Gray, byte> ProcessTiled(InferenceSession session, Image<Gray, byte> inputImage, int scale, int tileSize)
{
int h = inputImage.Height;
int w = inputImage.Width;
int overlap = 8; // 重叠像素,减少拼接边缘伪影
var result = new Image<Gray, byte>(w * scale, h * scale);
int tilesX = (int)Math.Ceiling((double)w / (tileSize - overlap));
int tilesY = (int)Math.Ceiling((double)h / (tileSize - overlap));
_logger.Information("Tiled processing: {TilesX}x{TilesY} tiles, tileSize={TileSize}", tilesX, tilesY, tileSize);
for (int ty = 0; ty < tilesY; ty++)
{
for (int tx = 0; tx < tilesX; tx++)
{
int srcX = Math.Min(tx * (tileSize - overlap), w - tileSize);
int srcY = Math.Min(ty * (tileSize - overlap), h - tileSize);
srcX = Math.Max(srcX, 0);
srcY = Math.Max(srcY, 0);
int tw = Math.Min(tileSize, w - srcX);
int th = Math.Min(tileSize, h - srcY);
// 裁剪 tile
inputImage.ROI = new System.Drawing.Rectangle(srcX, srcY, tw, th);
var tile = inputImage.Copy();
inputImage.ROI = System.Drawing.Rectangle.Empty;
// 推理单个 tile
var srTile = ProcessSingle(session, tile, scale);
tile.Dispose();
// 写入结果
int dstX = srcX * scale;
int dstY = srcY * scale;
result.ROI = new System.Drawing.Rectangle(dstX, dstY, srTile.Width, srTile.Height);
srTile.CopyTo(result);
result.ROI = System.Drawing.Rectangle.Empty;
srTile.Dispose();
}
}
_logger.Debug("ProcessTiled: Scale={Scale}, Output={W}x{H}", scale, result.Width, result.Height);
return result;
}
/// <summary>
/// 查找模型文件,按优先级搜索多个目录(.onnx 格式)
/// </summary>
private static string FindModelFile(string model, int scale)
{
string baseDir = AppDomain.CurrentDomain.BaseDirectory;
string fileName = $"{model}_x{scale}.onnx";
string[] searchPaths = new[]
{
Path.Combine(baseDir, "Models", fileName),
Path.Combine(baseDir, fileName),
Path.Combine(Directory.GetCurrentDirectory(), "Models", fileName),
Path.Combine(Directory.GetCurrentDirectory(), fileName),
};
foreach (var path in searchPaths)
{
if (File.Exists(path))
{
_logger.Debug("Found model file: {Path}", path);
return path;
}
}
_logger.Warning("Model file not found: {Model}_x{Scale}.onnx", model, scale);
return string.Empty;
}
}