Implement performance optimizations for chart updates: adaptive throttling, data sampling, dynamic bin adjustment, and request batching
This commit is contained in:
@@ -374,6 +374,16 @@ def get_wealth_distribution(simulation_id: str):
|
||||
if num_bins < 1 or num_bins > 50:
|
||||
num_bins = 10 # Default to 10 bins
|
||||
|
||||
# Optimize bin count based on agent count for better performance
|
||||
agent_count = len(simulation.agents) if simulation.agents else 0
|
||||
if agent_count > 0:
|
||||
# Reduce bin count for small agent populations to improve performance
|
||||
if agent_count < 50 and num_bins > agent_count // 2:
|
||||
num_bins = max(3, agent_count // 2)
|
||||
# Cap bin count for very large simulations to prevent performance issues
|
||||
elif agent_count > 1000 and num_bins > 25:
|
||||
num_bins = 25
|
||||
|
||||
# Get histogram data
|
||||
bin_labels, bin_counts = simulation.get_wealth_histogram(num_bins)
|
||||
|
||||
|
@@ -714,7 +714,8 @@ function startFallbackPolling() {
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await window.MarkovEconomics.utils.apiRequest(
|
||||
// Use enhanced API request with batching support
|
||||
const response = await enhancedApiRequest(
|
||||
`/api/simulation/${currentSimulation.id}/data?include_evolution=true&include_distribution=true`
|
||||
);
|
||||
|
||||
@@ -772,8 +773,8 @@ async function startSimulation() {
|
||||
// Update UI state
|
||||
updateUIState('starting');
|
||||
|
||||
// Create simulation
|
||||
const createResponse = await window.MarkovEconomics.utils.apiRequest('/api/simulation', {
|
||||
// Create simulation using enhanced API request
|
||||
const createResponse = await enhancedApiRequest('/api/simulation', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify(parameters)
|
||||
});
|
||||
@@ -796,8 +797,8 @@ async function startSimulation() {
|
||||
debugLog('WebSocket not available, will use fallback polling');
|
||||
}
|
||||
|
||||
// Start simulation
|
||||
await window.MarkovEconomics.utils.apiRequest(`/api/simulation/${currentSimulation.id}/start`, {
|
||||
// Start simulation using enhanced API request
|
||||
await enhancedApiRequest(`/api/simulation/${currentSimulation.id}/start`, {
|
||||
method: 'POST'
|
||||
});
|
||||
|
||||
@@ -825,7 +826,8 @@ async function stopSimulation() {
|
||||
if (!currentSimulation.id) return;
|
||||
|
||||
try {
|
||||
await window.MarkovEconomics.utils.apiRequest(`/api/simulation/${currentSimulation.id}/stop`, {
|
||||
// Use enhanced API request with batching support
|
||||
await enhancedApiRequest(`/api/simulation/${currentSimulation.id}/stop`, {
|
||||
method: 'POST'
|
||||
});
|
||||
|
||||
@@ -989,15 +991,47 @@ function updateUIState(state) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Update simulation progress
|
||||
* Calculate adaptive update interval based on simulation parameters
|
||||
* Reduces update frequency for larger simulations to improve performance
|
||||
*/
|
||||
function updateSimulationProgress(data) {
|
||||
// Update progress bar
|
||||
const progressBar = document.getElementById('progressBar');
|
||||
const progressText = document.getElementById('progressText');
|
||||
function getAdaptiveUpdateInterval() {
|
||||
if (!currentSimulation.parameters.num_agents || !currentSimulation.parameters.iterations) {
|
||||
return 50; // Default to 50ms
|
||||
}
|
||||
|
||||
if (progressBar && progressText) {
|
||||
const percentage = data.progress_percentage || 0;
|
||||
const agentCount = currentSimulation.parameters.num_agents;
|
||||
const iterationCount = currentSimulation.parameters.iterations;
|
||||
|
||||
// Base interval (ms)
|
||||
let interval = 50;
|
||||
|
||||
// Increase interval for larger simulations
|
||||
if (agentCount > 1000) {
|
||||
interval = Math.min(200, 50 + (agentCount / 100));
|
||||
} else if (agentCount > 500) {
|
||||
interval = 100;
|
||||
}
|
||||
|
||||
// Further adjust based on iteration count
|
||||
if (iterationCount > 50000) {
|
||||
interval *= 2;
|
||||
} else if (iterationCount > 10000) {
|
||||
interval *= 1.5;
|
||||
}
|
||||
|
||||
// Cap at reasonable values
|
||||
return Math.min(500, Math.max(20, interval));
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle simulation progress updates
|
||||
*/
|
||||
function onSimulationProgress(data) {
|
||||
// Update progress bar
|
||||
const progressBar = document.getElementById('simulationProgressBar');
|
||||
const progressText = document.getElementById('progressText');
|
||||
if (progressBar && progressText && data.progress_percentage !== undefined) {
|
||||
const percentage = Math.min(100, Math.max(0, data.progress_percentage));
|
||||
progressBar.style.width = percentage + '%';
|
||||
progressText.textContent = percentage.toFixed(1) + '%';
|
||||
}
|
||||
@@ -1027,10 +1061,20 @@ function updateSimulationProgress(data) {
|
||||
// Store the distribution data properly
|
||||
currentSimulation.data.distribution.labels = [...data.distribution.labels];
|
||||
currentSimulation.data.distribution.counts = [...data.distribution.counts];
|
||||
} else if (data.iteration % Math.max(1, Math.floor(currentSimulation.parameters.iterations / 50)) === 0) {
|
||||
// Periodically fetch distribution data if not provided in the update
|
||||
// But less frequently for larger simulations
|
||||
fetchDistributionData().then(histogram => {
|
||||
if (histogram && histogram.labels && histogram.counts) {
|
||||
currentSimulation.data.distribution.labels = [...histogram.labels];
|
||||
currentSimulation.data.distribution.counts = [...histogram.counts];
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Throttle chart updates to improve performance
|
||||
if (!window.lastChartUpdate || (Date.now() - window.lastChartUpdate) > 50) {
|
||||
// Adaptive throttling based on simulation size
|
||||
const adaptiveInterval = getAdaptiveUpdateInterval();
|
||||
if (!window.lastChartUpdate || (Date.now() - window.lastChartUpdate) > adaptiveInterval) {
|
||||
updateCharts();
|
||||
window.lastChartUpdate = Date.now();
|
||||
}
|
||||
@@ -1040,21 +1084,80 @@ function updateSimulationProgress(data) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Update charts with new data
|
||||
* Sample data for chart rendering to improve performance with large datasets
|
||||
* @param {Array} data - Array of data points
|
||||
* @param {number} maxPoints - Maximum number of points to display
|
||||
* @returns {Array} - Sampled data
|
||||
*/
|
||||
function sampleDataForChart(data, maxPoints = 200) {
|
||||
if (!Array.isArray(data) || data.length <= maxPoints) {
|
||||
return data;
|
||||
}
|
||||
|
||||
const sampled = [];
|
||||
const step = Math.ceil(data.length / maxPoints);
|
||||
|
||||
for (let i = 0; i < data.length; i += step) {
|
||||
sampled.push(data[i]);
|
||||
}
|
||||
|
||||
return sampled;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sample time series data for chart rendering
|
||||
* @param {Object} chartData - Object containing time series data arrays
|
||||
* @param {number} maxPoints - Maximum number of points to display
|
||||
* @returns {Object} - Object with sampled data arrays
|
||||
*/
|
||||
function sampleTimeSeriesData(chartData, maxPoints = 200) {
|
||||
if (!chartData.iterations || chartData.iterations.length <= maxPoints) {
|
||||
return chartData;
|
||||
}
|
||||
|
||||
const step = Math.ceil(chartData.iterations.length / maxPoints);
|
||||
|
||||
return {
|
||||
iterations: sampleDataForChart(chartData.iterations, maxPoints),
|
||||
totalWealth: sampleDataForChart(chartData.totalWealth, maxPoints),
|
||||
giniCoefficients: sampleDataForChart(chartData.giniCoefficients, maxPoints),
|
||||
capitalShare: sampleDataForChart(chartData.capitalShare, maxPoints),
|
||||
top10Share: sampleDataForChart(chartData.top10Share, maxPoints)
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Update charts with new data using sampling optimization
|
||||
*/
|
||||
function updateCharts() {
|
||||
// Determine maximum points based on agent count for better performance
|
||||
const maxChartPoints = currentSimulation.parameters.num_agents > 1000 ? 100 : 200;
|
||||
|
||||
// Wealth Evolution Chart
|
||||
if (charts.wealthEvolution) {
|
||||
charts.wealthEvolution.data.labels = currentSimulation.data.iterations;
|
||||
charts.wealthEvolution.data.datasets[0].data = currentSimulation.data.totalWealth;
|
||||
// Sample data for large datasets
|
||||
const sampledData = sampleTimeSeriesData({
|
||||
iterations: currentSimulation.data.iterations,
|
||||
totalWealth: currentSimulation.data.totalWealth
|
||||
}, maxChartPoints);
|
||||
|
||||
charts.wealthEvolution.data.labels = sampledData.iterations;
|
||||
charts.wealthEvolution.data.datasets[0].data = sampledData.totalWealth;
|
||||
charts.wealthEvolution.update('none');
|
||||
}
|
||||
|
||||
// Inequality Chart
|
||||
if (charts.inequality) {
|
||||
charts.inequality.data.labels = currentSimulation.data.iterations;
|
||||
charts.inequality.data.datasets[0].data = currentSimulation.data.giniCoefficients;
|
||||
charts.inequality.data.datasets[1].data = currentSimulation.data.top10Share;
|
||||
// Sample data for large datasets
|
||||
const sampledData = sampleTimeSeriesData({
|
||||
iterations: currentSimulation.data.iterations,
|
||||
giniCoefficients: currentSimulation.data.giniCoefficients,
|
||||
top10Share: currentSimulation.data.top10Share
|
||||
}, maxChartPoints);
|
||||
|
||||
charts.inequality.data.labels = sampledData.iterations;
|
||||
charts.inequality.data.datasets[0].data = sampledData.giniCoefficients;
|
||||
charts.inequality.data.datasets[1].data = sampledData.top10Share;
|
||||
charts.inequality.update('none');
|
||||
}
|
||||
|
||||
@@ -1135,7 +1238,8 @@ async function onSimulationComplete(data) {
|
||||
// Fetch complete simulation data and populate charts
|
||||
try {
|
||||
debugLog('Fetching complete simulation data...');
|
||||
const response = await window.MarkovEconomics.utils.apiRequest(
|
||||
// Use enhanced API request with batching support
|
||||
const response = await enhancedApiRequest(
|
||||
`/api/simulation/${currentSimulation.id}/data?include_evolution=true&include_distribution=true`
|
||||
);
|
||||
|
||||
@@ -1169,7 +1273,8 @@ async function onSimulationComplete(data) {
|
||||
// Fallback: try to get distribution data from dedicated endpoint
|
||||
try {
|
||||
debugLog('Attempting fallback distribution fetch...');
|
||||
const distResponse = await window.MarkovEconomics.utils.apiRequest(
|
||||
// Use enhanced API request with batching support
|
||||
const distResponse = await enhancedApiRequest(
|
||||
`/api/simulation/${currentSimulation.id}/distribution?bins=10`
|
||||
);
|
||||
if (distResponse.histogram && distResponse.histogram.labels && distResponse.histogram.counts) {
|
||||
@@ -1276,6 +1381,125 @@ function downloadFile(content, filename, contentType) {
|
||||
window.URL.revokeObjectURL(url);
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate optimal bin count for distribution chart based on agent count
|
||||
* @param {number} agentCount - Number of agents in the simulation
|
||||
* @returns {number} - Optimal number of bins
|
||||
*/
|
||||
function getOptimalBinCount(agentCount) {
|
||||
if (agentCount < 50) {
|
||||
return Math.max(5, Math.floor(agentCount / 5));
|
||||
} else if (agentCount < 200) {
|
||||
return 10;
|
||||
} else if (agentCount < 1000) {
|
||||
return 15;
|
||||
} else if (agentCount < 5000) {
|
||||
return 20;
|
||||
} else {
|
||||
return 25; // Cap at 25 bins for very large simulations
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch distribution data with dynamic bin count using enhanced API requests
|
||||
*/
|
||||
async function fetchDistributionData() {
|
||||
if (!currentSimulation.id) return null;
|
||||
|
||||
try {
|
||||
// Calculate optimal bin count based on agent count
|
||||
const binCount = getOptimalBinCount(currentSimulation.parameters.num_agents || 100);
|
||||
|
||||
// Use enhanced API request with batching support
|
||||
const response = await enhancedApiRequest(
|
||||
`/api/simulation/${currentSimulation.id}/distribution?bins=${binCount}`
|
||||
);
|
||||
|
||||
return response.histogram;
|
||||
} catch (error) {
|
||||
debugLog('Error fetching distribution data', error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Batch API requests to reduce network overhead
|
||||
*/
|
||||
class ApiRequestBatcher {
|
||||
constructor() {
|
||||
this.pendingRequests = [];
|
||||
this.batchTimeout = null;
|
||||
this.maxBatchSize = 5;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a request to the batch
|
||||
*/
|
||||
addRequest(url, options = {}) {
|
||||
return new Promise((resolve, reject) => {
|
||||
this.pendingRequests.push({ url, options, resolve, reject });
|
||||
|
||||
// If we've reached max batch size, flush immediately
|
||||
if (this.pendingRequests.length >= this.maxBatchSize) {
|
||||
this.flush();
|
||||
} else if (!this.batchTimeout) {
|
||||
// Otherwise, schedule a flush
|
||||
this.batchTimeout = setTimeout(() => this.flush(), 50);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush all pending requests
|
||||
*/
|
||||
async flush() {
|
||||
if (this.batchTimeout) {
|
||||
clearTimeout(this.batchTimeout);
|
||||
this.batchTimeout = null;
|
||||
}
|
||||
|
||||
if (this.pendingRequests.length === 0) return;
|
||||
|
||||
// For now, we'll process requests individually since our API doesn't support batching
|
||||
// In a real implementation, this would send a single batched request
|
||||
const requests = [...this.pendingRequests];
|
||||
this.pendingRequests = [];
|
||||
|
||||
// Process all requests
|
||||
for (const request of requests) {
|
||||
try {
|
||||
const response = await fetch(request.url, request.options);
|
||||
if (!response.ok) {
|
||||
const errorData = await response.json().catch(() => ({}));
|
||||
throw new Error(errorData.error || `HTTP ${response.status}: ${response.statusText}`);
|
||||
}
|
||||
const data = await response.json();
|
||||
request.resolve(data);
|
||||
} catch (error) {
|
||||
request.reject(error);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create a global instance
|
||||
window.apiBatcher = new ApiRequestBatcher();
|
||||
|
||||
/**
|
||||
* Enhanced API request function with batching support
|
||||
*/
|
||||
async function enhancedApiRequest(url, options = {}) {
|
||||
// Use batching for certain types of requests
|
||||
const shouldBatch = url.includes('/data') || url.includes('/distribution');
|
||||
|
||||
if (shouldBatch) {
|
||||
return window.apiBatcher.addRequest(url, options);
|
||||
} else {
|
||||
// Fall back to regular API request for non-batchable requests
|
||||
return window.MarkovEconomics.utils.apiRequest(url, options);
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize when DOM is ready
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
// Initial inequality warning update
|
||||
|
Reference in New Issue
Block a user