advanced-games-library
Version:
Advanced Gaming Library for React Native - Four Complete Games with iOS Compatibility Fixes
921 lines (769 loc) • 26.6 kB
text/typescript
/**
* Enhanced AI Engine - מנוע AI מתקדם למשחקים
* מספק בינה מלאכותית חכמה עם machine learning, behavior trees ו-pathfinding
*/
export interface AIPlayerProfile {
id: string;
name: string;
skillLevel: number; // 0-1
personality: AIPersonality;
learningRate: number;
adaptationEnabled: boolean;
behaviorHistory: AIDecision[];
performance: AIPerformanceMetrics;
}
export interface AIPersonality {
aggression: number; // 0-1
risktaking: number; // 0-1
patience: number; // 0-1
creativity: number; // 0-1
consistency: number; // 0-1
adaptability: number; // 0-1
}
export interface AIDecision {
timestamp: number;
gameState: any;
decision: any;
outcome: number; // -1 to 1 (bad to good)
confidence: number; // 0-1
reasoningPath: string[];
}
export interface AIPerformanceMetrics {
totalGames: number;
winRate: number;
averageScore: number;
decisionAccuracy: number;
adaptationRate: number;
learningProgress: number;
responseTime: number;
consistency: number;
}
export interface BehaviorNode {
id: string;
type: 'selector' | 'sequence' | 'decorator' | 'leaf';
name: string;
children?: BehaviorNode[];
condition?: (context: any) => boolean;
action?: (context: any) => Promise<any>;
decorator?: {
type: 'repeat' | 'until_fail' | 'until_success' | 'invert' | 'cooldown';
params?: any;
};
}
export interface GameState {
board?: any[][];
players: Array<{
id: string;
position?: { x: number; y: number };
score: number;
health?: number;
inventory?: any[];
}>;
turn: string;
timeRemaining?: number;
objectives?: Array<{
id: string;
type: string;
completed: boolean;
priority: number;
}>;
environment?: {
obstacles?: Array<{ x: number; y: number; type: string }>;
powerups?: Array<{ x: number; y: number; type: string; value: number }>;
};
}
export interface PathfindingNode {
x: number;
y: number;
g: number; // Cost from start
h: number; // Heuristic cost to goal
f: number; // Total cost
parent?: PathfindingNode;
walkable: boolean;
}
class EnhancedAIEngine {
private aiPlayers: Map<string, AIPlayerProfile> = new Map();
private behaviorTrees: Map<string, BehaviorNode> = new Map();
private neuralNetworks: Map<string, any> = new Map(); // Simplified NN storage
private decisionHistory: AIDecision[] = [];
private pathfindingGrid: PathfindingNode[][] = [];
private learningData: Map<string, any[]> = new Map();
constructor() {
this.initializeDefaultBehaviors();
console.log('🤖 Enhanced AI Engine initialized');
}
/**
* אתחול התנהגויות ברירת מחדל
*/
private initializeDefaultBehaviors(): void {
// Behavior Tree for Puzzle Games
const puzzleBehavior: BehaviorNode = {
id: 'puzzle_root',
type: 'selector',
name: 'Puzzle AI Root',
children: [
{
id: 'find_best_move',
type: 'sequence',
name: 'Find Best Move',
children: [
{
id: 'analyze_board',
type: 'leaf',
name: 'Analyze Board',
action: async (context) => this.analyzePuzzleBoard(context)
},
{
id: 'calculate_moves',
type: 'leaf',
name: 'Calculate Possible Moves',
action: async (context) => this.calculatePossibleMoves(context)
},
{
id: 'select_move',
type: 'leaf',
name: 'Select Best Move',
action: async (context) => this.selectBestMove(context)
}
]
}
]
};
// Behavior Tree for Strategy Games
const strategyBehavior: BehaviorNode = {
id: 'strategy_root',
type: 'selector',
name: 'Strategy AI Root',
children: [
{
id: 'emergency_response',
type: 'sequence',
name: 'Emergency Response',
condition: (context) => this.isInDanger(context),
children: [
{
id: 'find_escape',
type: 'leaf',
name: 'Find Escape Route',
action: async (context) => this.findEscapeRoute(context)
}
]
},
{
id: 'offensive_strategy',
type: 'sequence',
name: 'Offensive Strategy',
condition: (context) => this.shouldAttack(context),
children: [
{
id: 'find_target',
type: 'leaf',
name: 'Find Target',
action: async (context) => this.findBestTarget(context)
},
{
id: 'plan_attack',
type: 'leaf',
name: 'Plan Attack',
action: async (context) => this.planAttack(context)
}
]
},
{
id: 'defensive_strategy',
type: 'leaf',
name: 'Defensive Strategy',
action: async (context) => this.planDefense(context)
}
]
};
this.behaviorTrees.set('puzzle', puzzleBehavior);
this.behaviorTrees.set('strategy', strategyBehavior);
}
/**
* יצירת AI player חדש
*/
createAIPlayer(config: {
name: string;
skillLevel?: number;
personality?: Partial<AIPersonality>;
gameType: string;
}): string {
const id = `ai_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
const defaultPersonality: AIPersonality = {
aggression: 0.5,
risktaking: 0.4,
patience: 0.6,
creativity: 0.5,
consistency: 0.7,
adaptability: 0.6,
};
const aiPlayer: AIPlayerProfile = {
id,
name: config.name,
skillLevel: config.skillLevel || 0.5,
personality: { ...defaultPersonality, ...config.personality },
learningRate: 0.1,
adaptationEnabled: true,
behaviorHistory: [],
performance: {
totalGames: 0,
winRate: 0,
averageScore: 0,
decisionAccuracy: 0,
adaptationRate: 0,
learningProgress: 0,
responseTime: 0,
consistency: 0,
}
};
this.aiPlayers.set(id, aiPlayer);
this.initializeNeuralNetwork(id, config.gameType);
console.log(`🤖 AI Player created: ${config.name} (${id})`);
return id;
}
/**
* אתחול רשת נוירונים עבור AI player
*/
private initializeNeuralNetwork(playerId: string, gameType: string): void {
// Simplified neural network structure
const networkConfig = {
inputSize: this.getInputSize(gameType),
hiddenLayers: [64, 32, 16],
outputSize: this.getOutputSize(gameType),
weights: this.generateRandomWeights(gameType),
biases: this.generateRandomBiases(gameType),
};
this.neuralNetworks.set(playerId, networkConfig);
}
/**
* יצירת החלטה AI
*/
async makeDecision(playerId: string, gameState: GameState, options?: {
thinkingTime?: number;
explainReasoning?: boolean;
}): Promise<{
decision: any;
confidence: number;
reasoning?: string[];
thinkingTime: number;
}> {
const startTime = performance.now();
const aiPlayer = this.aiPlayers.get(playerId);
if (!aiPlayer) {
throw new Error(`AI Player not found: ${playerId}`);
}
// Simulate thinking time based on skill level and personality
const baseThinkingTime = options?.thinkingTime || this.calculateThinkingTime(aiPlayer);
await this.simulateThinking(baseThinkingTime);
const reasoning: string[] = [];
try {
// Analyze game state
reasoning.push('Analyzing current game state...');
const stateAnalysis = this.analyzeGameState(gameState, aiPlayer);
// Use behavior tree for decision making
reasoning.push('Executing behavior tree...');
const behaviorResult = await this.executeBehaviorTree(
this.getBehaviorTreeForGame(gameState),
{ gameState, aiPlayer, analysis: stateAnalysis }
);
// Apply neural network processing
reasoning.push('Processing with neural network...');
const networkOutput = this.processWithNeuralNetwork(playerId, stateAnalysis);
// Combine results and apply personality
reasoning.push('Applying personality traits...');
const decision = this.applyPersonality(behaviorResult, networkOutput, aiPlayer.personality);
// Calculate confidence
const confidence = this.calculateConfidence(decision, stateAnalysis, aiPlayer);
// Learn from this decision (prepare for outcome)
const aiDecision: AIDecision = {
timestamp: Date.now(),
gameState: JSON.parse(JSON.stringify(gameState)),
decision,
outcome: 0, // Will be updated when outcome is known
confidence,
reasoningPath: reasoning
};
aiPlayer.behaviorHistory.push(aiDecision);
this.decisionHistory.push(aiDecision);
const thinkingTime = performance.now() - startTime;
// Update performance metrics
aiPlayer.performance.responseTime =
(aiPlayer.performance.responseTime + thinkingTime) / 2;
console.log(`🧠 AI Decision made by ${aiPlayer.name}: confidence ${confidence.toFixed(2)}`);
return {
decision,
confidence,
reasoning: options?.explainReasoning ? reasoning : undefined,
thinkingTime
};
} catch (error) {
console.error('AI Decision Error:', error);
// Fallback to random decision
const fallbackDecision = this.generateFallbackDecision(gameState);
return {
decision: fallbackDecision,
confidence: 0.1,
reasoning: options?.explainReasoning ? ['Error occurred, using fallback decision'] : undefined,
thinkingTime: performance.now() - startTime
};
}
}
/**
* עדכון תוצאת החלטה (למידה)
*/
updateDecisionOutcome(playerId: string, decisionIndex: number, outcome: number): void {
const aiPlayer = this.aiPlayers.get(playerId);
if (!aiPlayer || !aiPlayer.behaviorHistory[decisionIndex]) return;
const decision = aiPlayer.behaviorHistory[decisionIndex];
decision.outcome = outcome;
// Learn from the outcome
this.learnFromOutcome(playerId, decision);
// Update performance metrics
this.updatePerformanceMetrics(aiPlayer, outcome);
console.log(`📈 AI Learning: ${aiPlayer.name} updated with outcome ${outcome}`);
}
/**
* למידה מתוצאת החלטה
*/
private learnFromOutcome(playerId: string, decision: AIDecision): void {
const aiPlayer = this.aiPlayers.get(playerId);
if (!aiPlayer || !aiPlayer.adaptationEnabled) return;
const network = this.neuralNetworks.get(playerId);
if (!network) return;
// Simplified learning algorithm
const learningRate = aiPlayer.learningRate;
const error = decision.outcome - decision.confidence;
// Adjust weights based on error (simplified backpropagation)
if (Math.abs(error) > 0.1) {
// This would be a more complex neural network update in practice
this.adjustNeuralNetwork(network, error, learningRate);
// Update learning progress
aiPlayer.performance.learningProgress += Math.abs(error) * learningRate;
}
}
/**
* pathfinding עם A* algorithm
*/
findPath(start: { x: number; y: number }, goal: { x: number; y: number }, grid: boolean[][]): { x: number; y: number }[] {
const openSet: PathfindingNode[] = [];
const closedSet: PathfindingNode[] = [];
// Initialize grid
this.pathfindingGrid = grid.map((row, y) =>
row.map((walkable, x) => ({
x, y, g: 0, h: 0, f: 0, walkable, parent: undefined
}))
);
const startNode = this.pathfindingGrid[start.y][start.x];
const goalNode = this.pathfindingGrid[goal.y][goal.x];
openSet.push(startNode);
while (openSet.length > 0) {
// Find node with lowest f score
let currentNode = openSet[0];
let currentIndex = 0;
openSet.forEach((node, index) => {
if (node.f < currentNode.f) {
currentNode = node;
currentIndex = index;
}
});
// Move current node from open to closed set
openSet.splice(currentIndex, 1);
closedSet.push(currentNode);
// Found the goal
if (currentNode.x === goal.x && currentNode.y === goal.y) {
const path: { x: number; y: number }[] = [];
let node = currentNode;
while (node.parent) {
path.unshift({ x: node.x, y: node.y });
node = node.parent;
}
return path;
}
// Check neighbors
const neighbors = this.getNeighbors(currentNode);
for (const neighbor of neighbors) {
if (!neighbor.walkable || closedSet.includes(neighbor)) continue;
const tentativeG = currentNode.g + this.getDistance(currentNode, neighbor);
if (!openSet.includes(neighbor)) {
openSet.push(neighbor);
} else if (tentativeG >= neighbor.g) {
continue;
}
neighbor.parent = currentNode;
neighbor.g = tentativeG;
neighbor.h = this.getDistance(neighbor, goalNode);
neighbor.f = neighbor.g + neighbor.h;
}
}
return []; // No path found
}
/**
* ניתוח מצב משחק
*/
private analyzeGameState(gameState: GameState, aiPlayer: AIPlayerProfile): any {
const analysis = {
boardControl: 0,
threatLevel: 0,
opportunities: [],
playerAdvantage: 0,
timePreassure: 0,
resourceAvailability: 0,
};
// Analyze board control
if (gameState.board) {
analysis.boardControl = this.calculateBoardControl(gameState, aiPlayer.id);
}
// Analyze threats
analysis.threatLevel = this.calculateThreatLevel(gameState, aiPlayer.id);
// Find opportunities
analysis.opportunities = this.findOpportunities(gameState, aiPlayer);
// Calculate player advantage
const aiPlayerData = gameState.players.find(p => p.id === aiPlayer.id);
if (aiPlayerData) {
const avgOpponentScore = gameState.players
.filter(p => p.id !== aiPlayer.id)
.reduce((sum, p) => sum + p.score, 0) / (gameState.players.length - 1);
analysis.playerAdvantage = (aiPlayerData.score - avgOpponentScore) / Math.max(avgOpponentScore, 1);
}
// Time pressure
if (gameState.timeRemaining) {
analysis.timePreassure = Math.max(0, 1 - (gameState.timeRemaining / 60000)); // Assuming 1 minute max
}
return analysis;
}
/**
* חישוב זמן חשיבה
*/
private calculateThinkingTime(aiPlayer: AIPlayerProfile): number {
const baseTime = 500; // ms
const skillMultiplier = 1 - aiPlayer.skillLevel; // Higher skill = faster thinking
const patienceMultiplier = aiPlayer.personality.patience;
return baseTime * skillMultiplier * patienceMultiplier;
}
/**
* סימולציה של זמן חשיבה
*/
private async simulateThinking(duration: number): Promise<void> {
return new Promise(resolve => setTimeout(resolve, duration));
}
/**
* הפעלת behavior tree
*/
private async executeBehaviorTree(node: BehaviorNode, context: any): Promise<any> {
if (!node) return null;
switch (node.type) {
case 'selector':
for (const child of node.children || []) {
try {
const result = await this.executeBehaviorTree(child, context);
if (result !== null && result !== false) return result;
} catch (error) {
continue; // Try next child
}
}
return null;
case 'sequence':
const results = [];
for (const child of node.children || []) {
const result = await this.executeBehaviorTree(child, context);
if (result === null || result === false) return null;
results.push(result);
}
return results;
case 'leaf':
if (node.condition && !node.condition(context)) return null;
if (node.action) return await node.action(context);
return true;
default:
return null;
}
}
/**
* פונקציות עזר למשחק פאזל
*/
private async analyzePuzzleBoard(context: any): Promise<any> {
const { gameState } = context;
// Find empty space and analyze possible moves
let emptyPos = { x: -1, y: -1 };
const size = gameState.board.length;
for (let y = 0; y < size; y++) {
for (let x = 0; x < size; x++) {
if (gameState.board[y][x] === 0) {
emptyPos = { x, y };
break;
}
}
}
return {
emptyPosition: emptyPos,
boardSize: size,
disorder: this.calculatePuzzleDisorder(gameState.board)
};
}
private async calculatePossibleMoves(context: any): Promise<any> {
const { analysis } = context;
const moves = [];
const { x, y } = analysis.emptyPosition;
const directions = [
{ dx: 0, dy: -1, name: 'up' },
{ dx: 0, dy: 1, name: 'down' },
{ dx: -1, dy: 0, name: 'left' },
{ dx: 1, dy: 0, name: 'right' }
];
for (const dir of directions) {
const newX = x + dir.dx;
const newY = y + dir.dy;
if (newX >= 0 && newX < analysis.boardSize &&
newY >= 0 && newY < analysis.boardSize) {
moves.push({
from: { x: newX, y: newY },
to: { x, y },
direction: dir.name
});
}
}
return moves;
}
private async selectBestMove(context: any): Promise<any> {
const { gameState, aiPlayer } = context;
const moves = context.possibleMoves || [];
if (moves.length === 0) return null;
// Evaluate each move
const evaluatedMoves = moves.map((move: any) => {
const newBoard = this.simulateMove(gameState.board, move);
const score = this.evaluatePuzzleBoard(newBoard, aiPlayer);
return { ...move, score };
});
// Sort by score and add some randomness based on personality
evaluatedMoves.sort((a, b) => b.score - a.score);
// Apply creativity - sometimes choose suboptimal moves
const creativity = aiPlayer.personality.creativity;
if (Math.random() < creativity && evaluatedMoves.length > 1) {
const randomIndex = Math.floor(Math.random() * Math.min(3, evaluatedMoves.length));
return evaluatedMoves[randomIndex];
}
return evaluatedMoves[0];
}
/**
* חישוב אי-סדר בפאזל
*/
private calculatePuzzleDisorder(board: number[][]): number {
let disorder = 0;
const size = board.length;
for (let y = 0; y < size; y++) {
for (let x = 0; x < size; x++) {
const value = board[y][x];
if (value === 0) continue;
const targetY = Math.floor((value - 1) / size);
const targetX = (value - 1) % size;
disorder += Math.abs(x - targetX) + Math.abs(y - targetY);
}
}
return disorder;
}
/**
* הערכת לוח פאזל
*/
private evaluatePuzzleBoard(board: number[][], aiPlayer: AIPlayerProfile): number {
const disorder = this.calculatePuzzleDisorder(board);
const maxDisorder = board.length * board.length * 2; // Rough estimate
return (maxDisorder - disorder) / maxDisorder;
}
/**
* סימולציה של מהלך
*/
private simulateMove(board: number[][], move: any): number[][] {
const newBoard = board.map(row => [...row]);
const { from, to } = move;
newBoard[to.y][to.x] = newBoard[from.y][from.x];
newBoard[from.y][from.x] = 0;
return newBoard;
}
/**
* פונקציות עזר נוספות
*/
private isInDanger(context: any): boolean {
const { gameState, aiPlayer } = context;
// Implementation would depend on game type
return false;
}
private shouldAttack(context: any): boolean {
const { aiPlayer } = context;
return Math.random() < aiPlayer.personality.aggression;
}
private async findEscapeRoute(context: any): Promise<any> {
// Find safest position
return { action: 'escape', target: { x: 0, y: 0 } };
}
private async findBestTarget(context: any): Promise<any> {
const { gameState } = context;
// Find weakest opponent or best strategic target
return gameState.players[1];
}
private async planAttack(context: any): Promise<any> {
return { action: 'attack', strategy: 'aggressive' };
}
private async planDefense(context: any): Promise<any> {
return { action: 'defend', focus: 'protection' };
}
private getBehaviorTreeForGame(gameState: GameState): BehaviorNode {
// Determine game type from state and return appropriate behavior tree
if (gameState.board) {
return this.behaviorTrees.get('puzzle')!;
}
return this.behaviorTrees.get('strategy')!;
}
private calculateBoardControl(gameState: GameState, playerId: string): number {
// Calculate how much of the board/game the AI controls
return 0.5; // Placeholder
}
private calculateThreatLevel(gameState: GameState, playerId: string): number {
// Calculate immediate threats to the AI player
return 0.2; // Placeholder
}
private findOpportunities(gameState: GameState, aiPlayer: AIPlayerProfile): any[] {
// Find strategic opportunities
return []; // Placeholder
}
private processWithNeuralNetwork(playerId: string, analysis: any): any {
const network = this.neuralNetworks.get(playerId);
if (!network) return analysis;
// Simplified neural network forward pass
// In practice, this would be a proper implementation
return {
...analysis,
networkConfidence: Math.random(),
preferredAction: 'calculated'
};
}
private applyPersonality(behaviorResult: any, networkOutput: any, personality: AIPersonality): any {
// Modify decision based on personality traits
let decision = behaviorResult;
// Apply risk-taking
if (Math.random() < personality.risktaking) {
decision = this.addRiskToDecision(decision);
}
// Apply patience
if (personality.patience > 0.7) {
decision = this.makeDecisionMorePatient(decision);
}
return decision;
}
private addRiskToDecision(decision: any): any {
return { ...decision, risk: 'high' };
}
private makeDecisionMorePatient(decision: any): any {
return { ...decision, wait: true };
}
private calculateConfidence(decision: any, analysis: any, aiPlayer: AIPlayerProfile): number {
let confidence = 0.5;
// Base confidence on skill level
confidence += aiPlayer.skillLevel * 0.3;
// Adjust based on analysis quality
if (analysis.opportunities?.length > 0) {
confidence += 0.2;
}
// Apply consistency personality trait
confidence = confidence * (0.7 + aiPlayer.personality.consistency * 0.3);
return Math.max(0.1, Math.min(0.95, confidence));
}
private generateFallbackDecision(gameState: GameState): any {
// Generate a safe, random decision as fallback
return {
action: 'wait',
reason: 'fallback'
};
}
private updatePerformanceMetrics(aiPlayer: AIPlayerProfile, outcome: number): void {
const perf = aiPlayer.performance;
perf.totalGames++;
// Update win rate (assuming outcome > 0.5 is a win)
const isWin = outcome > 0.5;
perf.winRate = ((perf.winRate * (perf.totalGames - 1)) + (isWin ? 1 : 0)) / perf.totalGames;
// Update decision accuracy
perf.decisionAccuracy = ((perf.decisionAccuracy * (perf.totalGames - 1)) + Math.abs(outcome)) / perf.totalGames;
}
// Utility methods for neural network
private getInputSize(gameType: string): number {
switch (gameType) {
case 'puzzle': return 16; // 4x4 puzzle board
case 'strategy': return 32;
default: return 16;
}
}
private getOutputSize(gameType: string): number {
switch (gameType) {
case 'puzzle': return 4; // 4 directions
case 'strategy': return 8;
default: return 4;
}
}
private generateRandomWeights(gameType: string): number[][][] {
// Generate random initial weights
return []; // Simplified
}
private generateRandomBiases(gameType: string): number[][] {
// Generate random initial biases
return []; // Simplified
}
private adjustNeuralNetwork(network: any, error: number, learningRate: number): void {
// Simplified weight adjustment
// In practice, this would implement proper backpropagation
}
private getNeighbors(node: PathfindingNode): PathfindingNode[] {
const neighbors: PathfindingNode[] = [];
const directions = [
{ dx: 0, dy: -1 }, { dx: 1, dy: 0 },
{ dx: 0, dy: 1 }, { dx: -1, dy: 0 }
];
for (const dir of directions) {
const x = node.x + dir.dx;
const y = node.y + dir.dy;
if (x >= 0 && x < this.pathfindingGrid[0].length &&
y >= 0 && y < this.pathfindingGrid.length) {
neighbors.push(this.pathfindingGrid[y][x]);
}
}
return neighbors;
}
private getDistance(nodeA: PathfindingNode, nodeB: PathfindingNode): number {
return Math.abs(nodeA.x - nodeB.x) + Math.abs(nodeA.y - nodeB.y);
}
/**
* קבלת נתוני ביצועים של AI player
*/
getAIPlayerStats(playerId: string): AIPlayerProfile | null {
return this.aiPlayers.get(playerId) || null;
}
/**
* עדכון personality של AI player
*/
updateAIPersonality(playerId: string, personality: Partial<AIPersonality>): boolean {
const aiPlayer = this.aiPlayers.get(playerId);
if (!aiPlayer) return false;
aiPlayer.personality = { ...aiPlayer.personality, ...personality };
return true;
}
/**
* הגדרת רמת מיומנות
*/
setSkillLevel(playerId: string, skillLevel: number): boolean {
const aiPlayer = this.aiPlayers.get(playerId);
if (!aiPlayer) return false;
aiPlayer.skillLevel = Math.max(0, Math.min(1, skillLevel));
return true;
}
/**
* ניקוי משאבים
*/
dispose(): void {
this.aiPlayers.clear();
this.behaviorTrees.clear();
this.neuralNetworks.clear();
this.decisionHistory.length = 0;
this.learningData.clear();
console.log('🤖 Enhanced AI Engine disposed');
}
}
export default EnhancedAIEngine;