oscam-2.26.01-11942-802-wit.../oscam-ecm.c
2026-02-17 10:06:53 +00:00

4206 lines
121 KiB
C
Executable File

#define MODULE_LOG_PREFIX "ecm"
#include "globals.h"
#include <stddef.h> // Dodano dla definicji size_t
#include <math.h>
#include "cscrypt/md5.h"
#include "module-anticasc.h"
#include "module-cacheex.h"
#include "module-led.h"
#include "module-stat.h"
#include "module-webif.h"
#include "module-cw-cycle-check.h"
#include "module-gbox.h"
#include "oscam-cache.h"
#include "oscam-chk.h"
#include "oscam-client.h"
#include "oscam-config.h"
#include "oscam-ecm.h"
#include "oscam-garbage.h"
#include "oscam-failban.h"
#include "oscam-net.h"
#include "oscam-time.h"
#include "oscam-lock.h"
#include "oscam-string.h"
#include "oscam-work.h"
#include "reader-common.h"
#include "module-cccam-data.h"
#ifdef CS_CACHEEX_AIO
#include "oscam-hashtable.h"
#endif
extern CS_MUTEX_LOCK ecmcache_lock;
extern struct ecm_request_t *ecmcwcache;
extern uint32_t ecmcwcache_size;
extern int32_t exit_oscam;
extern CS_MUTEX_LOCK ecm_pushed_deleted_lock;
extern struct ecm_request_t *ecm_pushed_deleted;
static pthread_mutex_t cw_process_sleep_cond_mutex;
static pthread_cond_t cw_process_sleep_cond;
static int cw_process_wakeups;
int64_t ecmc_next, cache_next, msec_wait = 3000;
// Deklaracja wirtualnych czytników
static struct s_reader *virtual_cacheex_reader = NULL;
static struct s_reader *virtual_csp_reader = NULL;
#ifdef CS_CACHEEX_AIO
// ecm-cache
typedef struct ecm_cache
{
struct timeb first_recv_time;// time of first cw received
struct timeb upd_time; // updated time. Update time at each cw got
uint32_t csp_hash;
node ht_node;
node ll_node;
} ECM_CACHE;
static pthread_rwlock_t ecm_cache_lock;
static hash_table ht_ecm_cache;
static list ll_ecm_cache;
static int8_t ecm_cache_init_done = 0;
void free_ecm_cache(void)
{
deinitialize_hash_table(&ht_ecm_cache);
pthread_rwlock_destroy(&ecm_cache_lock);
}
void init_ecm_cache(void)
{
#ifdef CS_CACHEEX
if(cfg.cw_cache_size > 0 || cfg.cw_cache_memory > 0)
{
init_hash_table(&ht_ecm_cache, &ll_ecm_cache);
if (pthread_rwlock_init(&ecm_cache_lock,NULL) != 0)
{ cs_log("Error creating lock ecm_cache_lock!"); }
else
{ ecm_cache_init_done = 1; }
}
#endif
}
static uint8_t time_sort(ECM_CACHE *a, ECM_CACHE *b)
{
if (((int64_t)(a->upd_time.time) * 1000ull + (int64_t) a->upd_time.millitm) == ((int64_t)(b->upd_time.time) * 1000ull + (int64_t) b->upd_time.millitm)) return 0;
return (((int64_t)(a->upd_time.time) * 1000ull + (int64_t) a->upd_time.millitm) > ((int64_t)(b->upd_time.time) * 1000ull + (int64_t) b->upd_time.millitm)) ? -1 : 1;
}
static int compare_csp_hash_ecmcache(const void *arg, const void *obj)
{
uint32_t h = ((const ECM_CACHE*)obj)->csp_hash;
return memcmp(arg, &h, 4);
}
void ecm_cache_cleanup(bool force)
{
if(!ecm_cache_init_done)
{ return; }
SAFE_RWLOCK_WRLOCK(&ecm_cache_lock);
ECM_CACHE *ecm_cache;
node *i, *i_next;
uint32_t ll_c = 0;
uint32_t ll_ten_percent = (uint)tommy_list_count(&ll_ecm_cache)*0.1; // 10 percent of cache
if(!force)
sort_list(&ll_ecm_cache, time_sort);
i = get_first_node_list(&ll_ecm_cache);
while(i)
{
i_next = i->next;
ecm_cache = get_data_from_node(i);
if(!ecm_cache)
{
i = i_next;
continue;
}
if(!force)
{
++ll_c;
if(ll_c < ll_ten_percent)
{
remove_elem_list(&ll_ecm_cache, &ecm_cache->ll_node);
remove_elem_hash_table(&ht_ecm_cache, &ecm_cache->ht_node);
NULLFREE(ecm_cache);
}
else{
break;
}
}
else{
remove_elem_list(&ll_ecm_cache, &ecm_cache->ll_node);
remove_elem_hash_table(&ht_ecm_cache, &ecm_cache->ht_node);
NULLFREE(ecm_cache);
}
i = i_next;
}
SAFE_RWLOCK_UNLOCK(&ecm_cache_lock);
}
#endif
/**
* maxparallel - Helper: Calculate pending slots array size
* Formula: round(maxparallel * parallelfactor)
* parallelfactor <= 0 means no pending slots (zapping disabled)
*/
static inline int32_t get_pending_size(struct s_reader *rdr)
{
// parallelfactor <= 0 means no pending slots (disabled or not configured)
if(rdr->parallelfactor <= 0.0f)
return 0;
int32_t size = (int32_t)(rdr->maxparallel * rdr->parallelfactor + 0.5f);
return (size < 1) ? 1 : size;
}
/**
* maxparallel - Helper: Check if slot is expired
* Returns timeout threshold in ms, 0 if no timeout check needed
*/
static int32_t get_slot_timeout(struct s_parallel_slot *slot, int32_t paralleltimeout)
{
if(slot->ecm_interval > 0)
return slot->ecm_interval + paralleltimeout;
else
return 10000 + paralleltimeout; // Default 10s if no interval measured
}
/**
* maxparallel - Helper: Clear a slot
*/
static void clear_slot(struct s_parallel_slot *slot)
{
slot->srvid = 0;
slot->ecm_interval = 0;
slot->client = NULL;
memset(&slot->last_ecm, 0, sizeof(struct timeb));
}
/**
* maxparallel - Helper: Add client+service to blocked list
* Replaces any existing entry for the same client
*/
static void block_client_service(struct s_reader *rdr, struct s_client *client, uint16_t srvid)
{
if(!rdr || !client || !rdr->blocked_services)
return;
// Check if client already has a blocked entry - if so, update it
LL_ITER it = ll_iter_create(rdr->blocked_services);
struct s_blocked_client *bc;
while((bc = ll_iter_next(&it)))
{
if(bc->client == client)
{
bc->srvid = srvid;
return;
}
}
// Add new entry
if(!cs_malloc(&bc, sizeof(struct s_blocked_client)))
return;
bc->client = client;
bc->srvid = srvid;
ll_append(rdr->blocked_services, bc);
}
/**
* maxparallel - Helper: Check if client+service is blocked
* Returns: 1 = blocked, 0 = not blocked
*/
static int8_t is_client_blocked(struct s_reader *rdr, struct s_client *client, uint16_t srvid)
{
if(!rdr || !client || !rdr->blocked_services)
return 0;
LL_ITER it = ll_iter_create(rdr->blocked_services);
struct s_blocked_client *bc;
while((bc = ll_iter_next(&it)))
{
if(bc->client == client && bc->srvid == srvid)
return 1;
}
return 0;
}
/**
* maxparallel - Helper: Remove block for client if zapping to different service
* Only unblocks if the new service is DIFFERENT from the blocked one
*/
static void unblock_client_if_different(struct s_reader *rdr, struct s_client *client, uint16_t new_srvid)
{
if(!rdr || !client || !rdr->blocked_services)
return;
LL_ITER it = ll_iter_create(rdr->blocked_services);
struct s_blocked_client *bc;
while((bc = ll_iter_next(&it)))
{
if(bc->client == client)
{
// Only unblock if zapping to a DIFFERENT service
if(bc->srvid != new_srvid)
{
ll_iter_remove_data(&it);
}
return;
}
}
}
/**
* maxparallel - Helper: Clear all blocked entries (when active slot becomes free)
*/
static void clear_blocked_services(struct s_reader *rdr)
{
if(!rdr || !rdr->blocked_services)
return;
ll_clear_data(rdr->blocked_services);
}
/**
* maxparallel - Cleanup expired slots in both arrays and promote pending
* - Removes expired slots from parallel_slots (active) and parallel_slots_pending (pending)
* - Upgrades pending to active when space becomes available (FIFO)
* - Does NOT drop pending (that's done separately when active services send ECMs)
* MUST be called with parallel_lock held
* Returns: count of active slots after cleanup
*/
static int32_t reader_cleanup_slots_nolock(struct s_reader *rdr, struct timeb *now)
{
if(!rdr || rdr->maxparallel <= 0)
return 0;
int32_t i;
int32_t active_count = 0;
int32_t pending_count = 0;
int32_t free_active = -1; // First empty slot in active array
// Pass 1: Clean expired slots in active array, count active
for(i = 0; i < rdr->maxparallel; i++)
{
if(rdr->parallel_slots[i].srvid == 0)
{
if(free_active < 0)
free_active = i;
continue;
}
int64_t gone = comp_timeb(now, &rdr->parallel_slots[i].last_ecm);
int32_t timeout = get_slot_timeout(&rdr->parallel_slots[i], rdr->paralleltimeout);
if(gone > timeout)
{
cs_log_dbg(D_READER, "reader %s: service %04X expired (no ECM for %"PRId64" ms, timeout %d ms)",
rdr->label, rdr->parallel_slots[i].srvid, gone, timeout);
clear_slot(&rdr->parallel_slots[i]);
if(free_active < 0)
free_active = i;
}
else
{
active_count++;
}
}
// Pass 2: Clean expired slots in pending array, count active
int32_t pending_size = get_pending_size(rdr);
for(i = 0; i < pending_size; i++)
{
if(rdr->parallel_slots_prov[i].srvid == 0)
continue;
int64_t gone = comp_timeb(now, &rdr->parallel_slots_prov[i].last_ecm);
int32_t timeout = get_slot_timeout(&rdr->parallel_slots_prov[i], rdr->paralleltimeout);
if(gone > timeout)
{
cs_log_dbg(D_READER, "reader %s: pending service %04X expired (no ECM for %"PRId64" ms, timeout %d ms)",
rdr->label, rdr->parallel_slots_prov[i].srvid, gone, timeout);
clear_slot(&rdr->parallel_slots_prov[i]);
}
else
{
pending_count++;
}
}
// Pass 3: Upgrade pending to active if space available (FIFO)
// This handles the zapping case: old service expired, new one can be promoted
while(active_count < rdr->maxparallel && pending_count > 0)
{
// Find oldest pending (largest age = first to arrive = FIFO upgrade)
int32_t oldest_idx = -1;
int64_t oldest_time = -1;
for(i = 0; i < pending_size; i++)
{
if(rdr->parallel_slots_prov[i].srvid != 0)
{
int64_t age = comp_timeb(now, &rdr->parallel_slots_prov[i].last_ecm);
if(oldest_idx < 0 || age > oldest_time)
{
oldest_idx = i;
oldest_time = age;
}
}
}
if(oldest_idx < 0)
break;
// Find empty slot in active array
if(free_active < 0)
{
for(i = 0; i < rdr->maxparallel; i++)
{
if(rdr->parallel_slots[i].srvid == 0)
{
free_active = i;
break;
}
}
}
if(free_active < 0)
break; // Should not happen, but safety check
// Move pending to active
rdr->parallel_slots[free_active] = rdr->parallel_slots_prov[oldest_idx];
clear_slot(&rdr->parallel_slots_prov[oldest_idx]);
cs_log_dbg(D_READER, "reader %s: service %04X promoted from pending to active (slot %d)",
rdr->label, rdr->parallel_slots[free_active].srvid, free_active);
active_count++;
pending_count--;
free_active = -1; // Need to find next empty slot
}
// If active slots are now available, clear blocked list
// This allows previously blocked clients to try again
if(active_count < rdr->maxparallel)
{
clear_blocked_services(rdr);
}
return active_count;
}
/**
* maxparallel - Drop pending services when active array is full (FIFO)
* Called only when an ACTIVE service receives an ECM, proving it's still active.
* Drops the OLDEST pending first (first in, first out)
* This is fair: first to request overload is first to be denied.
* Dropped clients are added to the blocked list so they fall over to other readers.
* MUST be called with parallel_lock held
*/
static void reader_drop_pending_nolock(struct s_reader *rdr, struct timeb *now)
{
if(!rdr || rdr->maxparallel <= 0)
return;
// Count active and pending
int32_t i;
int32_t active_count = 0;
int32_t pending_count = 0;
int32_t pending_size = get_pending_size(rdr);
for(i = 0; i < rdr->maxparallel; i++)
{
if(rdr->parallel_slots[i].srvid != 0)
active_count++;
}
for(i = 0; i < pending_size; i++)
{
if(rdr->parallel_slots_prov[i].srvid != 0)
pending_count++;
}
// Drop pending if active array is full (FIFO - oldest dropped first)
while(active_count >= rdr->maxparallel && pending_count > 0)
{
// Find oldest pending (largest age = first to arrive = drop first)
int32_t oldest_idx = -1;
int64_t oldest_time = -1;
for(i = 0; i < pending_size; i++)
{
if(rdr->parallel_slots_prov[i].srvid != 0)
{
int64_t age = comp_timeb(now, &rdr->parallel_slots_prov[i].last_ecm);
if(oldest_idx < 0 || age > oldest_time)
{
oldest_idx = i;
oldest_time = age;
}
}
}
if(oldest_idx < 0)
break;
// Add client to blocked list before clearing slot
struct s_parallel_slot *slot = &rdr->parallel_slots_prov[oldest_idx];
if(slot->client)
{
block_client_service(rdr, slot->client, slot->srvid);
}
cs_log("reader %s: dropped pending service %04X (%.3f sec old, active services still running)",
rdr->label, slot->srvid, (float)oldest_time / 1000);
clear_slot(slot);
pending_count--;
}
}
/**
* maxparallel - Check if reader has capacity for a service (does NOT reserve slot)
* Used during ECM request to decide if reader should be tried.
* Also checks if client+service is blocked (was dropped earlier).
* Returns: 1 = has capacity (or service already registered), 0 = full or blocked (skip reader)
*/
static int8_t reader_has_capacity(struct s_reader *rdr, uint16_t srvid, struct s_client *client)
{
if(!rdr || rdr->maxparallel <= 0)
return 1; // unlimited
cs_readlock(__func__, &rdr->parallel_lock);
struct timeb now;
cs_ftime(&now);
int32_t i;
int32_t pending_size = get_pending_size(rdr);
int8_t result = 0;
// Check if client+service is blocked (was dropped earlier)
if(is_client_blocked(rdr, client, srvid))
{
result = 0;
goto done;
}
// Check if service already registered in active array
for(i = 0; i < rdr->maxparallel; i++)
{
if(rdr->parallel_slots[i].srvid == srvid)
{
result = 1;
goto done;
}
}
// Check if service already registered in pending array
for(i = 0; i < pending_size; i++)
{
if(rdr->parallel_slots_prov[i].srvid == srvid)
{
result = 1;
goto done;
}
}
// Count active slots (with expiry check)
int32_t active_count = 0;
for(i = 0; i < rdr->maxparallel; i++)
{
if(rdr->parallel_slots[i].srvid != 0)
{
int64_t gone = comp_timeb(&now, &rdr->parallel_slots[i].last_ecm);
int32_t timeout = get_slot_timeout(&rdr->parallel_slots[i], rdr->paralleltimeout);
if(gone <= timeout)
active_count++;
}
}
// Space in active array?
if(active_count < rdr->maxparallel)
{
result = 1;
goto done;
}
// Space in pending array?
if(pending_size > 0)
{
int32_t pending_count = 0;
for(i = 0; i < pending_size; i++)
{
if(rdr->parallel_slots_prov[i].srvid != 0)
{
int64_t gone = comp_timeb(&now, &rdr->parallel_slots_prov[i].last_ecm);
int32_t timeout = get_slot_timeout(&rdr->parallel_slots_prov[i], rdr->paralleltimeout);
if(gone <= timeout)
pending_count++;
}
}
if(pending_count < pending_size)
{
result = 1;
goto done;
}
}
// No capacity available
result = 0;
done:
cs_readunlock(__func__, &rdr->parallel_lock);
return result;
}
/**
* maxparallel - Register a service on a reader (called when CW is found)
* Uses dual-array architecture:
* - parallel_slots: active services (the limit)
* - parallel_slots_pending: pending services during zapping
* New services go to active if space available, otherwise pending.
* Returns: 1 = registered, 0 = failed
*/
static int8_t reader_register_service(struct s_reader *rdr, uint16_t srvid, struct s_client *client)
{
if(!rdr || rdr->maxparallel <= 0)
return 1; // unlimited, always OK
cs_writelock(__func__, &rdr->parallel_lock);
// If client is registering a DIFFERENT service, unblock them
// (they zapped away from the blocked service)
unblock_client_if_different(rdr, client, srvid);
struct timeb now;
cs_ftime(&now);
// Cleanup expired slots and handle promotions/drops
int32_t active_count = reader_cleanup_slots_nolock(rdr, &now);
int32_t i;
int32_t pending_size = get_pending_size(rdr);
// Search for existing slot in both arrays
int32_t existing_active = -1;
int32_t existing_pending = -1;
int32_t free_active = -1;
int32_t free_pending = -1;
// Search active array
for(i = 0; i < rdr->maxparallel; i++)
{
if(rdr->parallel_slots[i].srvid == srvid)
existing_active = i;
else if(free_active < 0 && rdr->parallel_slots[i].srvid == 0)
free_active = i;
}
// Search pending array
for(i = 0; i < pending_size; i++)
{
if(rdr->parallel_slots_prov[i].srvid == srvid)
existing_pending = i;
else if(free_pending < 0 && rdr->parallel_slots_prov[i].srvid == 0)
free_pending = i;
}
struct s_parallel_slot *slot = NULL;
int8_t is_new = 0;
int8_t is_pending = 0;
int32_t slot_idx = -1;
const char *array_name = "active";
if(existing_active >= 0)
{
// Already in active array - just update
slot = &rdr->parallel_slots[existing_active];
slot_idx = existing_active;
is_new = 0;
}
else if(existing_pending >= 0)
{
// Already in pending array - update there
slot = &rdr->parallel_slots_prov[existing_pending];
slot_idx = existing_pending;
is_new = 0;
is_pending = 1;
array_name = "pending";
}
else if(active_count < rdr->maxparallel && free_active >= 0)
{
// New service, space in active array
slot = &rdr->parallel_slots[free_active];
slot_idx = free_active;
slot->srvid = srvid;
slot->ecm_interval = 0;
slot->client = client;
is_new = 1;
}
else if(free_pending >= 0)
{
// New service, active full -> put in pending
slot = &rdr->parallel_slots_prov[free_pending];
slot_idx = free_pending;
slot->srvid = srvid;
slot->ecm_interval = 0;
slot->client = client;
is_new = 1;
is_pending = 1;
array_name = "pending";
}
else
{
// No slot available in either array
cs_writeunlock(__func__, &rdr->parallel_lock);
cs_log("reader %s: no slot available for service %04X (all %d+%d slots used)",
rdr->label, srvid, rdr->maxparallel, pending_size);
return 0;
}
// Update interval for existing slots
if(!is_new && slot->last_ecm.time > 0)
{
int64_t interval = comp_timeb(&now, &slot->last_ecm);
if(interval > 0 && interval < 30000) // Sanity: < 30 seconds
{
if(slot->ecm_interval == 0)
slot->ecm_interval = (int32_t)interval;
else
slot->ecm_interval = (slot->ecm_interval + (int32_t)interval) / 2;
}
}
slot->last_ecm = now;
// If this is an ACTIVE service (not pending), drop pending if needed
// This ensures pending are only dropped when active services prove they're still active
if(!is_pending)
{
reader_drop_pending_nolock(rdr, &now);
}
// Recount for logging (drop may have changed counts)
int32_t final_active = 0, final_pending = 0;
for(i = 0; i < rdr->maxparallel; i++)
{
if(rdr->parallel_slots[i].srvid != 0)
final_active++;
}
for(i = 0; i < pending_size; i++)
{
if(rdr->parallel_slots_prov[i].srvid != 0)
final_pending++;
}
if(is_new)
{
if(final_pending > 0)
{
cs_log_dbg(D_READER, "reader %s: registered service %04X in %s slot %d (%d/%d active, +%d pending)",
rdr->label, srvid, array_name, slot_idx, final_active, rdr->maxparallel, final_pending);
}
else
{
cs_log_dbg(D_READER, "reader %s: registered service %04X in %s slot %d (%d/%d active)",
rdr->label, srvid, array_name, slot_idx, final_active, rdr->maxparallel);
}
// Log "now full" only once per state change, and only for active services
if(!is_pending && final_active >= rdr->maxparallel && !rdr->parallel_full)
{
rdr->parallel_full = 1;
cs_log("reader %s: now full (%d/%d active)",
rdr->label, final_active, rdr->maxparallel);
}
}
// Reset full flag when capacity becomes available
if(final_active < rdr->maxparallel && rdr->parallel_full)
{
rdr->parallel_full = 0;
cs_log_dbg(D_READER, "reader %s: capacity available (%d/%d active)",
rdr->label, final_active, rdr->maxparallel);
}
cs_writeunlock(__func__, &rdr->parallel_lock);
return 1;
}
void fallback_timeout(ECM_REQUEST *er)
{
if(er->rc >= E_UNHANDLED && er->stage < 4)
{
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} fallback timeout! (stage: %d)", (check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid, er->stage);
debug_ecm(D_TRACE, "fallback for %s %s", username(er->client), buf);
while(er->stage < 4) // if preferlocalcards=1 and no answer from locals, initial stage will be 2! We need to reach stage=4 to call fallback's.
{
request_cw_from_readers(er, 0);
}
}
}
void ecm_timeout(ECM_REQUEST *er)
{
if(!er->readers_timeout_check)
{
er->readers_timeout_check = 1;
if(check_client(er->client) && er->rc >= E_UNHANDLED)
{
// Wymuś decyzję o głosowaniu, jeśli limit czasu upłynął, a ECM jest nadal nieobsługiwany
if (cfg.cwvote_enabled && is_cwvote_caid(er) && er->rc == E_UNHANDLED)
{
if (cfg.cwvote_log_enabled)
{
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} [ecm_timeout] CW Vote enabled and CAID matches, attempting to decide.",
(check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid);
}
int vote_result = cw_vote_decide(er);
if (vote_result == 1) // Znaleziono zwycięzcę
{
er->rc = E_FOUND;
er->rcEx = 0;
send_dcw(er->client, er);
return; // ECM obsłużony przez głosowanie
}
}
debug_ecm(D_TRACE, "timeout for %s %s", username(er->client), buf);
// set timeout for readers not answering
struct s_ecm_answer *ea_list;
for(ea_list = er->matching_rdr; ea_list; ea_list = ea_list->next)
{
if((ea_list->status & (REQUEST_SENT | REQUEST_ANSWERED)) == REQUEST_SENT) // Request sent, but no answer!
{
write_ecm_answer(ea_list->reader, er, E_TIMEOUT, 0, NULL, NULL, 0, NULL); // set timeout for readers not answered!
}
}
// send timeout to client!
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} client timeout! ", (check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid);
er->rc = E_TIMEOUT;
er->rcEx = 0;
send_dcw(er->client, er);
}
}
}
void increment_n_request(struct s_client *cl)
{
if(check_client(cl))
{
cl->n_request[1]++;
first_client->n_request[1]++;
}
}
uint8_t checkCWpart(uint8_t *cw, int8_t part)
{
uint8_t eo = part ? 8 : 0;
int8_t i;
for(i = 0; i < 8; i++)
if(cw[i + eo]) { return 1; }
return 0;
}
void update_n_request(void)
{
struct s_client *cl;
cs_readlock(__func__, &clientlist_lock);
for(cl = first_client->next; cl; cl = cl->next)
{
#ifdef CS_CACHEEX
if(check_client(cl) && get_module(cl)->num != R_CSP && cl->typ == 'c' && !cl->dup && cl->account && cl->account->cacheex.mode<=1) //no cacheex 2/3 client
#else
if(check_client(cl) && get_module(cl)->num != R_CSP && cl->typ == 'c' && !cl->dup)
#endif
{
cl->n_request[0] = cl->n_request[1];
cl->n_request[1] = 0;
}
else
{
cl->n_request[0] = 0;
cl->n_request[1] = 0;
}
}
first_client->n_request[0] = first_client->n_request[1];
first_client->n_request[1] = 0;
cs_readunlock(__func__, &clientlist_lock);
}
// Funkcja inicjalizująca wirtualne czytniki
static void init_virtual_readers(void)
{
if (!virtual_cacheex_reader) {
if (cs_malloc(&virtual_cacheex_reader, sizeof(struct s_reader))) {
memset(virtual_cacheex_reader, 0, sizeof(struct s_reader));
cs_strncpy(virtual_cacheex_reader->label, "CACHEEX_VIRTUAL", sizeof(virtual_cacheex_reader->label));
virtual_cacheex_reader->grp = 0;
virtual_cacheex_reader->client = NULL;
cs_log_dbg(D_TRACE, "Virtual CACHEEX reader initialized.");
} else {
cs_log("ERROR: Failed to allocate virtual_cacheex_reader!");
}
}
if (!virtual_csp_reader) {
if (cs_malloc(&virtual_csp_reader, sizeof(struct s_reader))) {
memset(virtual_csp_reader, 0, sizeof(struct s_reader));
cs_strncpy(virtual_csp_reader->label, "CSP_VIRTUAL", sizeof(virtual_csp_reader->label));
virtual_csp_reader->grp = 0;
virtual_csp_reader->client = NULL;
cs_log_dbg(D_TRACE, "Virtual CSP reader initialized.");
} else {
cs_log("ERROR: Failed to allocate virtual_csp_reader!");
}
}
}
static void *cw_process(void)
{
set_thread_name(__func__);
int64_t time_to_check_fbtimeout, time_to_check_ctimeout, next_check, n_request_next;
struct timeb t_now, tbc, ecmc_time, cache_time, n_request_time;
ECM_REQUEST *er = NULL;
time_t ecm_maxcachetime;
#ifdef CS_CACHEEX
int64_t time_to_check_cacheex_wait_time;
int64_t time_to_check_cacheex_mode1_delay;
#endif
cs_pthread_cond_init(__func__, &cw_process_sleep_cond_mutex, &cw_process_sleep_cond);
#ifdef CS_ANTICASC
int32_t ac_next;
struct timeb ac_time;
cs_ftime(&ac_time);
add_ms_to_timeb(&ac_time, cfg.ac_stime * 60 * 1000);
#endif
cs_ftime(&ecmc_time);
add_ms_to_timeb(&ecmc_time, 1000);
cs_ftime(&cache_time);
add_ms_to_timeb(&cache_time, 3000);
cs_ftime(&n_request_time);
add_ms_to_timeb(&n_request_time, 60 * 1000);
while(!exit_oscam)
{
if(cw_process_wakeups == 0) // No waiting wakeups, proceed to sleep
{
sleepms_on_cond(__func__, &cw_process_sleep_cond_mutex, &cw_process_sleep_cond, msec_wait);
}
cw_process_wakeups = 0; // We've been woken up, reset the counter
if(exit_oscam)
{ break; }
next_check = 0;
#ifdef CS_ANTICASC
ac_next = 0;
#endif
ecmc_next = 0;
cache_next = 0;
msec_wait = 0;
cs_ftime(&t_now);
cs_readlock(__func__, &ecmcache_lock);
for(er = ecmcwcache; er; er = er->next)
{
if((er->from_cacheex || er->from_csp) // ignore ecms from cacheex/csp
|| er->readers_timeout_check // ignore already checked
|| !check_client(er->client)) // ignore ecm of killed clients
{
continue;
}
if(er->rc >= E_UNHANDLED)
{
#ifdef CS_CACHEEX
// cacheex_wait_time
if(er->cacheex_wait_time && !er->cacheex_wait_time_expired)
{
tbc = er->tps;
time_to_check_cacheex_mode1_delay = 0;
time_to_check_ctimeout = add_ms_to_timeb_diff(&tbc, lb_auto_timeout(er, er->client_timeout));
if(comp_timeb(&t_now, &tbc) >= 0)
{
add_job(er->client, ACTION_CACHEEX_TIMEOUT, (void *)er, 0);
time_to_check_cacheex_wait_time = 0;
}
else if(er->cacheex_mode1_delay && !er->stage && er->cacheex_reader_count>0)
{
// check for cacheex_mode1_delay
tbc = er->tps;
time_to_check_cacheex_mode1_delay = add_ms_to_timeb_diff(&tbc, lb_auto_timeout(er, er->cacheex_mode1_delay));
if(comp_timeb(&t_now, &tbc) >= 0)
{
add_job(er->client, ACTION_CACHEEX1_DELAY, (void *)er, 0);
time_to_check_cacheex_mode1_delay = 0;
}
}
if(!next_check || (time_to_check_cacheex_wait_time > 0 && time_to_check_cacheex_wait_time < next_check))
{ next_check = time_to_check_cacheex_wait_time; }
if(!next_check || (time_to_check_cacheex_mode1_delay > 0 && time_to_check_cacheex_mode1_delay < next_check))
{ next_check = time_to_check_cacheex_mode1_delay; }
}
#endif
if(er->stage < 4)
{
// fbtimeout
tbc = er->tps;
time_to_check_fbtimeout = add_ms_to_timeb_diff(&tbc, lb_auto_timeout(er, get_fallbacktimeout(er->caid)));
if(comp_timeb(&t_now, &tbc) >= 0)
{
add_job(er->client, ACTION_FALLBACK_TIMEOUT, (void *)er, 0);
time_to_check_fbtimeout = 0;
}
if(!next_check || (time_to_check_fbtimeout > 0 && time_to_check_fbtimeout < next_check))
{ next_check = time_to_check_fbtimeout; }
}
// clienttimeout
if(!er->readers_timeout_check) // ecm stays in cache at least ctimeout+2seconds!
{
tbc = er->tps;
time_to_check_ctimeout = add_ms_to_timeb_diff(&tbc, lb_auto_timeout(er, er->client_timeout));
if(comp_timeb(&t_now, &tbc) >= 0)
{
add_job(er->client, ACTION_CLIENT_TIMEOUT, (void *)er, 0);
time_to_check_ctimeout = 0;
}
if(!next_check || (time_to_check_ctimeout > 0 && time_to_check_ctimeout < next_check))
{ next_check = time_to_check_ctimeout; }
}
}
}
cs_readunlock(__func__, &ecmcache_lock);
#ifdef CS_ANTICASC
if(cfg.ac_enabled && (ac_next = comp_timeb(&ac_time, &t_now)) <= 10)
{
ac_do_stat();
cs_ftime(&ac_time);
ac_next = add_ms_to_timeb_diff(&ac_time, cfg.ac_stime * 60 * 1000);
}
#endif
if((ecmc_next = comp_timeb(&ecmc_time, &t_now)) <= 10)
{
uint32_t count = 0;
struct ecm_request_t *ecm, *ecmt = NULL, *prv;
cs_readlock(__func__, &ecmcache_lock);
for(ecm = ecmcwcache, prv = NULL; ecm; prv = ecm, ecm = ecm->next, count++)
{
ecm_maxcachetime = t_now.time - ((ecm->client_timeout + 500) / 1000 + 3);
if(ecm->tps.time < ecm_maxcachetime)
{
cs_readunlock(__func__, &ecmcache_lock);
cs_writelock(__func__, &ecmcache_lock);
ecmt = ecm;
if(prv)
{ prv->next = NULL; }
else
{ ecmcwcache = NULL; }
cs_writeunlock(__func__, &ecmcache_lock);
break;
}
}
if(!ecmt)
{ cs_readunlock(__func__, &ecmcache_lock); }
ecmcwcache_size = count;
while(ecmt)
{
ecm = ecmt->next;
free_ecm(ecmt);
ecmt = ecm;
}
#ifdef CS_CACHEEX
ecmt=NULL;
cs_readlock(__func__, &ecm_pushed_deleted_lock);
for(ecm = ecm_pushed_deleted, prv = NULL; ecm; prv = ecm, ecm = ecm->next)
{
ecm_maxcachetime = t_now.time - ((ecm->client_timeout + 500) / 1000 + 3);
if(ecm->tps.time < ecm_maxcachetime)
{
cs_readunlock(__func__, &ecm_pushed_deleted_lock);
cs_writelock(__func__, &ecm_pushed_deleted_lock);
ecmt = ecm;
if(prv)
{ prv->next = NULL; }
else
{ ecm_pushed_deleted = NULL; }
cs_writeunlock(__func__, &ecm_pushed_deleted_lock);
break;
}
}
if(!ecmt)
{ cs_readunlock(__func__, &ecm_pushed_deleted_lock); }
while(ecmt)
{
ecm = ecmt->next;
free_push_in_ecm(ecmt);
ecmt = ecm;
}
#endif
cs_ftime(&ecmc_time);
ecmc_next = add_ms_to_timeb_diff(&ecmc_time, 1000);
}
if((cache_next = comp_timeb(&cache_time, &t_now)) <= 10)
{
cleanup_cache(false);
cacheex_cleanup_hitcache(false);
cs_ftime(&cache_time);
cache_next = add_ms_to_timeb_diff(&cache_time, 3000);
}
if((n_request_next = comp_timeb(&n_request_time, &t_now)) <= 10)
{
update_n_request();
cs_ftime(&n_request_time);
n_request_next = add_ms_to_timeb_diff(&n_request_time, 60 * 1000);
}
msec_wait = next_check;
#ifdef CS_ANTICASC
if(!msec_wait || (ac_next > 0 && ac_next < msec_wait))
{ msec_wait = ac_next; }
#endif
if(!msec_wait || (ecmc_next > 0 && ecmc_next < msec_wait))
{ msec_wait = ecmc_next; }
if(!msec_wait || (cache_next > 0 && cache_next < msec_wait))
{ msec_wait = cache_next; }
if(!msec_wait || (n_request_next > 0 && n_request_next < msec_wait))
{ msec_wait = n_request_next; }
if(!msec_wait)
{ msec_wait = 3000; }
cleanupcwcycle();
}
return NULL;
}
void cw_process_thread_start(void)
{
init_virtual_readers(); // Inicjalizacja wirtualnych czytników
start_thread("cw_process", (void *) &cw_process, NULL, NULL, 1, 1);
}
void cw_process_thread_wakeup(void)
{
cw_process_wakeups++; // Do not sleep...
SAFE_COND_SIGNAL(&cw_process_sleep_cond);
}
void convert_to_beta(struct s_client *cl, ECM_REQUEST *er, uint16_t caidto)
{
static uint8_t headerN3[10] = { 0xc7, 0x00, 0x00, 0x00, 0x01, 0x10, 0x10, 0x00, 0x87, 0x12 };
static uint8_t headerN2[10] = { 0xc9, 0x00, 0x00, 0x00, 0x01, 0x10, 0x10, 0x00, 0x48, 0x12 };
er->ocaid = er->caid;
er->caid = caidto;
er->prid = 0;
er->ecmlen = er->ecm[2] + 3;
memmove(er->ecm + 13, er->ecm + 3, er->ecmlen - 3);
if(er->ecmlen > 0x88)
{
memcpy(er->ecm + 3, headerN3, 10);
if(er->ecm[0] == 0x81)
{ er->ecm[12] += 1; }
er->ecm[1] = 0x70;
}
else
{
memcpy(er->ecm + 3, headerN2, 10);
}
er->ecmlen += 10;
er->ecm[2] = er->ecmlen - 3;
er->btun = 1;
cl->cwtun++;
cl->account->cwtun++;
first_client->cwtun++;
cs_log_dbg(D_TRACE, "ECM converted ocaid from 0x%04X to BetaCrypt caid 0x%04X for service id 0x%04X",
er->ocaid, caidto, er->srvid);
}
void convert_to_nagra(struct s_client *cl, ECM_REQUEST *er, uint16_t caidto)
{
cs_log_dbg(D_TRACE, "convert_to_nagra");
er->ocaid = er->caid;
er->caid = caidto;
er->prid = 0;
er->ecmlen = er->ecm[2] + 3;
// not sure
if(er->ecmlen < 0x52)
{ er->ecm[1] = 0x30; }
memmove(er->ecm + 3, er->ecm + 13, er->ecmlen - 3);
er->ecmlen -= 10;
er->ecm[2] = er->ecmlen - 3;
er->btun = 1;
cl->cwtun++;
cl->account->cwtun++;
first_client->cwtun++;
cs_log_dbg(D_TRACE, "ECM converted ocaid from: 0x%04X to Nagra: 0x04%X for service id:0x04%X",
er->ocaid, caidto, er->srvid);
}
void cs_betatunnel(ECM_REQUEST *er)
{
int32_t i;
struct s_client *cl = cur_client();
uint32_t mask_all = 0xFFFF;
TUNTAB *ttab = &cl->ttab;
for(i = 0; i < ttab->ttnum; i++)
{
if((er->caid == ttab->ttdata[i].bt_caidfrom) && ((er->srvid == ttab->ttdata[i].bt_srvid) || (ttab->ttdata[i].bt_srvid) == mask_all))
{
if(chk_is_betatunnel_caid(er->caid) == 1 && er->ocaid == 0x0000)
{
convert_to_nagra(cl, er, ttab->ttdata[i].bt_caidto);
}
else if(er->ocaid == 0x0000)
{
convert_to_beta(cl, er, ttab->ttdata[i].bt_caidto);
}
return;
}
}
}
static void remove_ecm_from_reader(ECM_REQUEST *ecm)
{
int32_t i;
struct s_ecm_answer *ea = ecm->matching_rdr;
while(ea)
{
if((ea->status & REQUEST_SENT) && !(ea->status & REQUEST_ANSWERED))
{
// we found a outstanding reader, clean it:
struct s_reader *rdr = ea->reader;
if(rdr)
{
struct s_client *cl = rdr->client;
if(check_client(cl))
{
ECM_REQUEST *ecmtask = cl->ecmtask;
if(ecmtask)
{
for(i = 0; i < cfg.max_pending; ++i)
{
if(ecmtask[i].parent == ecm)
{
ecmtask[i].client = NULL;
cacheex_set_csp_lastnode(&ecmtask[i]);
}
}
}
}
}
}
ea = ea->next;
}
}
void free_ecm(ECM_REQUEST *ecm)
{
struct s_ecm_answer *ea, *nxt;
cacheex_free_csp_lastnodes(ecm);
gbox_free_cards_pending(ecm);
// remove this ecm from reader queue to avoid segfault on very late answers (when ecm is already disposed)
// first check for outstanding answers:
remove_ecm_from_reader(ecm);
// free matching_rdr list:
ea = ecm->matching_rdr;
ecm->matching_rdr = NULL;
while(ea)
{
nxt = ea->next;
cs_lock_destroy(__func__, &ea->ecmanswer_lock);
add_garbage(ea);
ea = nxt;
}
if(ecm->src_data)
{ add_garbage(ecm->src_data); }
add_garbage(ecm);
}
void free_push_in_ecm(ECM_REQUEST *ecm)
{
cacheex_free_csp_lastnodes(ecm);
gbox_free_cards_pending(ecm);
if(ecm->src_data)
{ NULLFREE(ecm->src_data); }
NULLFREE(ecm);
}
ECM_REQUEST *get_ecmtask(void)
{
ECM_REQUEST *er = NULL;
struct s_client *cl = cur_client();
if(!cl)
{ return NULL; }
if(!cs_malloc(&er, sizeof(ECM_REQUEST)))
{ return NULL; }
memset(er->vote_pool, 0, sizeof(er->vote_pool)); // Initialize vote_pool to zero
cs_ftime(&er->tps);
er->rc = E_UNHANDLED;
er->client = cl;
er->grp = 0; // no readers/cacheex-clients answers yet
//cs_log("client %s ECMTASK %d module %s", username(cl), n, get_module(cl)->desc);
return er;
}
void cleanup_ecmtasks(struct s_client *cl)
{
if(!cl) { return; }
ECM_REQUEST *ecm;
// remove this clients ecm from queue. because of cache, just null the client:
cs_readlock(__func__, &ecmcache_lock);
for(ecm = ecmcwcache; ecm && cl; ecm = ecm->next)
{
if(ecm->client == cl)
{
ecm->client = NULL;
}
}
cs_readunlock(__func__, &ecmcache_lock);
// remove client from rdr ecm-queue:
cs_readlock(__func__, &readerlist_lock);
struct s_reader *rdr = first_active_reader;
while(rdr)
{
if(check_client(rdr->client) && rdr->client->ecmtask)
{
int i;
for(i = 0; (i < cfg.max_pending) && cl; i++)
{
ecm = &rdr->client->ecmtask[i];
if(ecm->client == cl)
{
ecm->client = NULL;
}
}
}
rdr = rdr->next;
}
cs_readunlock(__func__, &readerlist_lock);
}
static void add_cascade_data(struct s_client *client, ECM_REQUEST *er)
{
if(!client->cascadeusers)
{ client->cascadeusers = ll_create("cascade_data"); }
LLIST *l = client->cascadeusers;
LL_ITER it = ll_iter_create(l);
time_t now = time(NULL);
struct s_cascadeuser *cu;
int8_t found = 0;
while((cu = ll_iter_next(&it)))
{
if(er->caid == cu->caid && er->prid == cu->prid && er->srvid == cu->srvid) // found it
{
if(cu->time < now)
{ cu->cwrate = now - cu->time; }
cu->time = now;
found = 1;
}
else if(cu->time + 60 < now) // old
{ ll_iter_remove_data(&it); }
}
if(!found) // add it if not found
{
if(!cs_malloc(&cu, sizeof(struct s_cascadeuser)))
{ return; }
cu->caid = er->caid;
cu->prid = er->prid;
cu->srvid = er->srvid;
cu->time = now;
ll_append(l, cu);
}
}
int32_t is_double_check_caid(ECM_REQUEST *er, FTAB *double_check_caid)
{
if(!double_check_caid->nfilts) { return 1; }
int32_t i, k;
for(i = 0; i < double_check_caid->nfilts; i++)
{
uint16_t tcaid = double_check_caid->filts[i].caid;
if(tcaid && (tcaid == er->caid || (tcaid < 0x0100 && (er->caid >> 8) == tcaid))) // caid match
{
int32_t nprids = double_check_caid->filts[i].nprids;
if(!nprids) // No Provider ->Ok
{ return 1; }
for(k = 0; k < nprids; k++)
{
uint32_t prid = double_check_caid->filts[i].prids[k];
if(prid == er->prid) // Provider matches
{ return 1; }
}
}
}
return 0;
}
struct s_ecm_answer *get_ecm_answer(struct s_reader *reader, ECM_REQUEST *er)
{
if(!er || !reader) { return NULL; }
struct s_ecm_answer *ea;
for(ea = er->matching_rdr; ea; ea = ea->next)
{
if(ea->reader == reader)
{
return ea;
}
}
return NULL;
}
void distribute_ea(struct s_ecm_answer *ea)
{
struct s_ecm_answer *ea_temp;
for(ea_temp = ea->pending; ea_temp; ea_temp = ea_temp->pending_next)
{
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} [distribute_ea] send ea (%s) by reader %s answering for client %s", (check_client(ea_temp->er->client) ? ea_temp->er->client->account->usr : "-"), ea_temp->er->caid, ea_temp->er->prid, ea_temp->er->srvid, ea->rc==E_FOUND?"OK":"NOK", ea_temp->reader->label, (check_client(ea->er->client) ? ea->er->client->account->usr : "-"));
#ifdef CS_CACHEEX_AIO
if(ea->rc==E_FOUND && ea->er->localgenerated)
ea_temp->er->localgenerated = 1;
#endif
// e.g. we cannot send timeout, because "ea_temp->er->client" could wait/ask other readers! Simply set not_found if different from E_FOUND!
write_ecm_answer(ea_temp->reader, ea_temp->er, (ea->rc==E_FOUND? E_FOUND : E_NOTFOUND), ea->rcEx, ea->cw, NULL, ea->tier, &ea->cw_ex);
}
}
int32_t send_dcw(struct s_client *client, ECM_REQUEST *er)
{
if(!check_client(client) || client->typ != 'c')
{ return 0; }
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} [send_dcw] rc %d from reader %s", (check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid, er->rc, er->selected_reader ? er->selected_reader->label : "-");
static const char stageTxt[] = { '0', 'C', 'L', 'P', 'F', 'X' };
static const char *stxt[] = { "found", "cache1", "cache2", "cache3", "not found", "timeout", "sleeping",
"fake", "invalid", "corrupt", "no card", "expdate", "disabled", "stopped" };
static const char *stxtEx[16] = {"", "group", "caid", "ident", "class", "chid", "queue", "peer", "sid", "", "", "", "", "", "", ""};
static const char *stxtWh[16] = {"", "user ", "reader ", "server ", "lserver ", "", "", "", "", "", "", "", "" , "" , "", ""};
#ifdef CS_CACHEEX_AIO
char sby[100] = "", sreason[100] = "", scwcinfo[32] = "", schaninfo[CS_SERVICENAME_SIZE] = "", srealecmtime[50]="";
#else
char sby[100] = "", sreason[70] = "", scwcinfo[32] = "", schaninfo[CS_SERVICENAME_SIZE] = "", srealecmtime[50]="";
#endif
char erEx[32] = "";
char usrname[38] = "";
char channame[28];
struct timeb tpe;
snprintf(usrname, sizeof(usrname) - 1, "%s", username(client));
#ifdef WITH_DEBUG
if(cs_dblevel & D_CLIENTECM)
{
char buf[ECM_FMT_LEN];
char ecmd5[17 * 3];
char cwstr[17 * 3];
format_ecm(er, buf, ECM_FMT_LEN);
cs_hexdump(0, er->ecmd5, 16, ecmd5, sizeof(ecmd5));
cs_hexdump(0, er->cw, 16, cwstr, sizeof(cwstr));
#ifdef CS_CACHEEX
char csphash[5 * 3];
cs_hexdump(0, (void *)&er->csp_hash, 4, csphash, sizeof(csphash));
cs_log_dbg(D_CLIENTECM, "Client %s csphash %s cw %s rc %d %s", username(client), csphash, cwstr, er->rc, buf);
#else
cs_log_dbg(D_CLIENTECM, "Client %s cw %s rc %d %s", username(client), cwstr, er->rc, buf);
#endif
}
#endif
struct s_reader *er_reader = er->selected_reader; // responding reader
struct s_ecm_answer *ea_orig = get_ecm_answer(er_reader, er);
// check if ecm_answer from pending's
if(ea_orig && ea_orig->is_pending && er->rc == E_FOUND)
{ er->rc = E_CACHE2; }
// check if answer from cacheex-1 reader
if(er->rc == E_FOUND && er_reader && cacheex_reader(er_reader)) // so add hit to cacheex mode 1 readers
{
er->rc = E_CACHEEX;
}
// real ecm time
if(ea_orig && !ea_orig->is_pending && er->rc == E_FOUND
&& (
#ifdef CS_CACHEEX
er->cacheex_wait_time ||
#endif
(ea_orig->status & READER_FALLBACK)))
{
snprintf(srealecmtime, sizeof(srealecmtime) - 1, " (real %d ms)", ea_orig->ecm_time);
}
if(er->rc == E_TIMEOUT)
{
#ifdef CS_CACHEEX
if(!er->from_cacheex1_client) // cosmetic: show "by" readers only for "normal" clients
{
#endif
struct s_ecm_answer *ea_list;
int32_t ofs = 0;
for(ea_list = er->matching_rdr; ea_list; ea_list = ea_list->next)
{
if(ea_list->reader && ofs < (int32_t)sizeof(sby) && ((ea_list->status & REQUEST_SENT) && (ea_list->rc == E_TIMEOUT || ea_list->rc >= E_99))) //Request send, but no cw answered!
{
ofs += snprintf(sby + ofs, sizeof(sby) - ofs - 1, "%s%s", ofs ? "," : " by ", ea_list->reader->label);
}
}
if(er->ocaid && ofs < (int32_t)sizeof(sby))
{ snprintf(sby + ofs, sizeof(sby) - ofs - 1, "(btun %04X)", er->ocaid); }
#ifdef CS_CACHEEX
}
#endif
}
else if(er_reader)
{
// add marker to reader if ECM_REQUEST was betatunneled
if(er->ocaid)
{ snprintf(sby, sizeof(sby) - 1, " by %s(btun %04X)", er_reader->label, er->ocaid); }
else
{ snprintf(sby, sizeof(sby) - 1, " by %s", er_reader->label); }
}
#ifdef CS_CACHEEX
else if(er->cacheex_src) // only for cacheex mode-3 clients (no mode-1 or mode-2 because reader is set!) and csp
{
char *cex_name = "-";
if(check_client(er->cacheex_src) && er->cacheex_src->account)
{
if(er->cacheex_src->account->usr[0] != '\0')
cex_name = er->cacheex_src->account->usr;
else
cex_name = "csp";
}
if(er->ocaid)
{
snprintf(sby, sizeof(sby) - 1, " by %s(btun %04X)", cex_name, er->ocaid);
}
else
{
snprintf(sby, sizeof(sby) - 1, " by %s", cex_name);
}
}
#endif
if(er->rc < E_NOTFOUND)
{
er->rcEx = 0;
// memset(er->msglog, 0, MSGLOGSIZE); // remove reader msglog from previous requests that failed, founds never give back msglog!
}
if(er->rcEx)
{ snprintf(erEx, sizeof(erEx) - 1, "rejected %s%s", stxtWh[er->rcEx >> 4], stxtEx[er->rcEx & 0xf]); }
get_servicename_or_null(client, er->srvid, er->prid, er->caid, channame, sizeof(channame));
if(!channame[0])
{
schaninfo[0] = '\0';
}
else
{
snprintf(schaninfo, sizeof(schaninfo) - 1, " - %s", channame);
}
#ifdef CS_CACHEEX
int cx = 0;
if(er->msglog[0])
{
cx = snprintf(sreason, sizeof(sreason) - 1, " (%s)", er->msglog);
}
#else
if(er->msglog[0])
{
snprintf(sreason, sizeof(sreason) - 1, " (%s)", er->msglog);
}
#endif
#ifdef CW_CYCLE_CHECK
if(er->cwc_msg_log[0])
{ snprintf(scwcinfo, sizeof(scwcinfo) - 1, " (%.26s)", er->cwc_msg_log); }
#endif
cs_ftime(&tpe);
#ifdef CS_CACHEEX
int cx2 = 0;
if(er->rc >= E_CACHEEX && er->cacheex_wait_time && er->cacheex_wait_time_expired)
{
cx2 = snprintf(sreason+cx, (sizeof sreason)-cx, " (wait_time over)");
}
else
{
cx2 = cx;
}
if(er->cw_count>1)
{
#ifdef CS_CACHEEX_AIO
if(er->cw_count > 0x0F000000 || er->localgenerated)
{
uint32_t cw_count_cleaned = er->cw_count ^ 0x0F000000;
if(cw_count_cleaned > 1)
snprintf(sreason+cx2, (sizeof sreason)-cx2, " (cw count %d) (lg)", cw_count_cleaned);
else
snprintf(sreason+cx2, (sizeof sreason)-cx2, " (lg)");
}
else
{
#endif
snprintf (sreason+cx2, (sizeof sreason)-cx2, " (cw count %d)", er->cw_count);
#ifdef CS_CACHEEX_AIO
}
}
else
{
if(er->localgenerated)
snprintf(sreason+cx2, (sizeof sreason)-cx2, " (lg)");
#endif
}
#endif
client->cwlastresptime = comp_timeb(&tpe, &er->tps);
time_t now = time(NULL);
webif_client_add_lastresponsetime(client, client->cwlastresptime, now, er->rc); // add to ringbuffer
if(er_reader)
{
struct s_client *er_cl = er_reader->client;
if(check_client(er_cl))
{
er_cl->cwlastresptime = client->cwlastresptime;
webif_client_add_lastresponsetime(er_cl, client->cwlastresptime, now, er->rc);
er_cl->last_providptr = client->last_providptr;
er_cl->last_srvidptr = client->last_srvidptr;
}
}
webif_client_init_lastreader(client, er, er_reader, stxt);
client->last = now;
//cs_log_dbg(D_TRACE, "CHECK rc=%d er->cacheex_src=%s", er->rc, username(er->cacheex_src));
switch(er->rc)
{
case E_FOUND:
{
client->cwfound++;
client->account->cwfound++;
first_client->cwfound++;
break;
}
case E_CACHE1:
case E_CACHE2:
case E_CACHEEX:
{
client->cwcache++;
client->account->cwcache++;
first_client->cwcache++;
#ifdef CS_CACHEEX
if(check_client(er->cacheex_src))
{
first_client->cwcacheexhit++;
er->cacheex_src->cwcacheexhit++;
if(er->cacheex_src->account)
{ er->cacheex_src->account->cwcacheexhit++; }
}
#endif
break;
}
case E_NOTFOUND:
case E_CORRUPT:
case E_NOCARD:
{
if(er->rcEx)
{
client->cwignored++;
client->account->cwignored++;
first_client->cwignored++;
}
else
{
client->cwnot++;
client->account->cwnot++;
first_client->cwnot++;
}
break;
}
case E_TIMEOUT:
{
client->cwtout++;
client->account->cwtout++;
first_client->cwtout++;
break;
}
default:
{
client->cwignored++;
client->account->cwignored++;
first_client->cwignored++;
}
}
#ifdef CS_ANTICASC
// [zaplist] ACoSC anticascading
if(cfg.acosc_enabled)
{
int8_t max_active_sids = 0;
int8_t zap_limit = 0;
int8_t penalty = 0;
int32_t penalty_duration = 0;
int32_t delay = 0;
int8_t max_ecms_per_minute = 0;
char *info1 = NULL;
char *info2 = NULL;
char *info3 = NULL;
char *info4 = NULL;
char *info5 = NULL;
char *info6 = NULL;
// **global or user value?
cs_writelock(__func__, &clientlist_lock);
max_active_sids = client->account->acosc_max_active_sids == -1 ? cfg.acosc_max_active_sids : client->account->acosc_max_active_sids;
info1 = client->account->acosc_max_active_sids == -1 ? "Globalvalue" : "Uservalue";
zap_limit = client->account->acosc_zap_limit == -1 ? cfg.acosc_zap_limit : client->account->acosc_zap_limit;
info5 = client->account->acosc_zap_limit == -1 ? "Globalvalue" : "Uservalue";
penalty = client->account->acosc_penalty == -1 ? cfg.acosc_penalty : client->account->acosc_penalty;
info2 = client->account->acosc_penalty == -1 ? "Globalvalue" : "Uservalue";
penalty_duration = client->account->acosc_penalty_duration == -1 ? cfg.acosc_penalty_duration : client->account->acosc_penalty_duration;
info3 = client->account->acosc_penalty_duration == -1 ? "Globalvalue" : "Uservalue";
delay = client->account->acosc_delay == -1 ? cfg.acosc_delay : client->account->acosc_delay;
info4 = client->account->acosc_delay == -1 ? "Globalvalue" : "Uservalue";
max_ecms_per_minute = client->account->acosc_max_ecms_per_minute == -1 ? cfg.acosc_max_ecms_per_minute : client->account->acosc_max_ecms_per_minute;
info6 = client->account->acosc_max_ecms_per_minute == -1 ? "Globalvalue" : "Uservalue";
//**
if((er->rc < E_NOTFOUND && max_active_sids > 0) || zap_limit > 0 || max_ecms_per_minute > 0)
{
int8_t k = 0;
int8_t active_sid_count = 0;
time_t zaptime = time(NULL);
if(client->account->acosc_penalty_active == 4 && client->account->acosc_penalty_until <= zaptime) // reset penalty_active
{
client->account->acosc_penalty_active = 0;
client->account->acosc_penalty_until = 0;
}
if(client->account->acosc_penalty_active == 0 && max_active_sids > 0)
{
for(k=0; k<15 ; k++)
{
if(zaptime-30 < client->client_zap_list[k].lasttime && client->client_zap_list[k].request_stage == 10)
{
cs_log_dbg(D_TRACE, "[zaplist] ACoSC for Client: %s more then 10 ECM's for %04X@%06X/%04X/%04X", username(client), client->client_zap_list[k].caid, client->client_zap_list[k].provid, client->client_zap_list[k].chid, client->client_zap_list[k].sid);
active_sid_count ++;
}
}
cs_log_dbg(D_TRACE, "[zaplist] ACoSC for Client: %s active_sid_count= %i with more than 10 followed ECM's (mas:%i (%s))", username(client), active_sid_count, max_active_sids, info1);
}
if(client->account->acosc_penalty_active == 0 && max_active_sids > 0 && active_sid_count > max_active_sids) //max_active_sids reached
{
client->account->acosc_penalty_active = 1;
client->account->acosc_penalty_until = zaptime + penalty_duration;
}
if(client->account->acosc_penalty_active == 0 && zap_limit > 0 && client->account->acosc_user_zap_count > zap_limit) // zap_limit reached
{
client->account->acosc_penalty_active = 2;
client->account->acosc_penalty_until = zaptime + penalty_duration;
}
if(client->account->acosc_penalty_active == 0 && max_ecms_per_minute > 0 && client->n_request[1] >= max_ecms_per_minute && penalty != 4) // max ecms per minute reached
{
client->account->acosc_penalty_active = 3;
client->account->acosc_penalty_until = zaptime + penalty_duration;
}
if(client->account->acosc_penalty_active == 0 && max_ecms_per_minute > 0 && client->n_request[1] > 0 && penalty == 4) // max ecms per minute with hidecards penalty
{
client->account->acosc_penalty_active = 3;
client->account->acosc_penalty_until = zaptime + penalty_duration;
}
if(client->account->acosc_penalty_active > 0)
{
if(client->account->acosc_penalty_active == 4)
{ cs_log_dbg(D_TRACE, "[zaplist] ACoSC for Client: %s penalty_duration: %" PRId64 " seconds left(%s)", username(client), (int64_t)(client->account->acosc_penalty_until - zaptime), info3); }
int16_t lt = get_module(client)->listenertype;
switch(penalty)
{
case 1: // NULL CW
er->rc = E_FAKE; // E_FAKE give only a status fake not a NULL cw
er->rcEx = E2_WRONG_CHKSUM;
if(client->account->acosc_penalty_active == 1)
{ cs_log("[zaplist] ACoSC for Client: %s max_activ_sids reached: %i:%i(%s) penalty: 1(%s) send null CW", username(client), active_sid_count, max_active_sids, info1, info2); }
if(client->account->acosc_penalty_active == 2)
{ cs_log("[zaplist] ACoSC for Client: %s zap_limit reached: %i:%i(%s) penalty: 1(%s) send null CW", username(client), client->account->acosc_user_zap_count, zap_limit, info5, info2); }
if(client->account->acosc_penalty_active == 3)
{ cs_log("[maxecms] ACoSC for Client: %s max_ecms_per_minute reached: ecms_last_minute=%i ecms_now=%i max=%i(%s) penalty: 1(%s) send null CW", username(client), client->n_request[0], client->n_request[1], max_ecms_per_minute, info6, info2); }
break;
case 2: // ban
if(lt != LIS_DVBAPI)
{
if(client->account->acosc_penalty_active == 1)
{ cs_log("[zaplist] ACoSC for Client: %s max_activ_sids reached: %i:%i(%s) penalty: 2(%s) BAN Client - Kill and set Client to failban list for %i sec.", username(client), active_sid_count, max_active_sids, info1, info2, penalty_duration); }
if(client->account->acosc_penalty_active == 2)
{ cs_log("[zaplist] ACoSC for Client: %s zap_limit reached: %i:%i(%s) penalty: 2(%s) BAN Client - Kill and set Client to failban list for %i sec.", username(client), client->account->acosc_user_zap_count, zap_limit, info5, info2, penalty_duration); }
if(client->account->acosc_penalty_active == 3)
{ cs_log("[maxecms] ACoSC for Client: %s max_ecms_per_minute reached: ecms_last_minute=%i ecms_now=%i max=%i(%s) penalty: 2(%s) BAN Client - Kill and set Client to failban list for %i sec.", username(client), client->n_request[0], client->n_request[1], max_ecms_per_minute, info6, info2, penalty_duration); }
cs_add_violation_acosc(client, client->account->usr, penalty_duration);
add_job(client, ACTION_CLIENT_KILL, NULL, 0);
}
else
{
cs_log("[zaplist] ACoSC for Client: %s %i:%i(%s) penalty: 2(%s) BAN Client - don't Ban dvbapi user only stop decoding", username(client), active_sid_count, max_active_sids, info1, info2);
}
er->rc = E_DISABLED;
break;
case 3: // delay
if(client->account->acosc_penalty_active == 1)
{ cs_log("[zaplist] ACoSC for Client: %s max_activ_sids reached: %i:%i(%s) penalty: 3(%s) delay CW: %ims(%s)", username(client), active_sid_count, max_active_sids, info1, info2, delay, info4); }
if(client->account->acosc_penalty_active == 2)
{ cs_log("[zaplist] ACoSC for Client: %s zap_limit reached: %i:%i(%s) penalty: 3(%s) delay CW: %ims(%s)", username(client), client->account->acosc_user_zap_count, zap_limit, info5, info2, delay, info4); }
if(client->account->acosc_penalty_active == 3)
{ cs_log("[maxecms] ACoSC for Client: %s max_ecms_per_minute reached: ecms_last_minute=%i ecms_now=%i max=%i(%s) penalty: 3(%s) delay CW: %ims(%s)", username(client), client->n_request[0], client->n_request[1], max_ecms_per_minute, info6, info2, delay, info4); }
cs_writeunlock(__func__, &clientlist_lock);
cs_sleepms(delay);
cs_writelock(__func__, &clientlist_lock);
client->cwlastresptime += delay;
snprintf(sreason, sizeof(sreason)-1, " (%d ms penalty delay)", delay);
break;
case 4: // hidecards
if(client->account->acosc_penalty_active == 3)
{
cs_log("[maxecms] ACoSC for Client: %s ecms_last_minute=%i ecms_now=%i max=%i(%s) penalty: 4(%s) hidecards - hidecards to the client for %i sec", username(client), client->n_request[0], client->n_request[1], max_ecms_per_minute, info6, info2, penalty_duration);
client->start_hidecards = 1;
}
break;
default: // logging
if(client->account->acosc_penalty_active == 1)
{ cs_log("[zaplist] ACoSC for Client: %s max_activ_sids reached: %i:%i(%s) penalty: 0(%s) only logging", username(client), active_sid_count, max_active_sids, info1, info2); }
if(client->account->acosc_penalty_active == 2)
{ cs_log("[zaplist] ACoSC for Client: %s zap_limit reached: %i:%i(%s) penalty: 0(%s) only logging", username(client), client->account->acosc_user_zap_count, zap_limit, info5, info2); }
if(client->account->acosc_penalty_active == 3)
{ cs_log("[maxecms] ACoSC for Client: %s max_ecms_per_minute reached: ecms_last_minute=%i ecms_now=%i max=%i(%s) penalty: 0(%s) only logging", username(client), client->n_request[0], client->n_request[1], max_ecms_per_minute, info6, info2); }
break;
}
client->account->acosc_user_zap_count = 0; // we got already a penalty
client->account->acosc_penalty_active = 3;
client->account->acosc_penalty_active = 4;
}
}
cs_writeunlock(__func__, &clientlist_lock);
}
#endif
if(cfg.double_check && er->rc <= E_CACHE2 && er->selected_reader && is_double_check_caid(er, &cfg.double_check_caid))
{
if(er->checked == 0) // First CW, save it and wait for next one
{
er->checked = 1;
er->origin_reader = er->selected_reader;
memcpy(er->cw_checked, er->cw, sizeof(er->cw));
cs_log("DOUBLE CHECK FIRST CW by %s idx %d cpti %d", er->origin_reader->label, er->idx, er->msgid);
}
else if(er->origin_reader != er->selected_reader) // Second (or third and so on) cw. We have to compare
{
if(memcmp(er->cw_checked, er->cw, sizeof(er->cw)) == 0)
{
er->checked++;
cs_log("DOUBLE CHECKED! %d. CW by %s idx %d cpti %d", er->checked, er->selected_reader->label, er->idx, er->msgid);
}
else
{
cs_log("DOUBLE CHECKED NONMATCHING! %d. CW by %s idx %d cpti %d", er->checked, er->selected_reader->label, er->idx, er->msgid);
}
}
if(er->checked < 2) // less as two same cw? mark as pending!
{
er->rc = E_UNHANDLED;
goto ESC;
}
}
ac_chk(client, er, 1);
int32_t is_fake = 0;
if(er->rc == E_FAKE)
{
is_fake = 1;
er->rc = E_FOUND;
}
get_module(client)->send_dcw(client, er);
add_cascade_data(client, er);
if(is_fake)
{ er->rc = E_FAKE; }
#ifdef CS_ANTICASC
cs_writelock(__func__, &clientlist_lock);
if(client->start_hidecards)
{
client->start_hidecards = 0;
add_job(client, ACTION_CLIENT_HIDECARDS, NULL, 0);
}
cs_writeunlock(__func__, &clientlist_lock);
#endif
if(!(er->rc == E_SLEEPING && client->cwlastresptime == 0))
{
char buf[ECM_FMT_LEN];
format_ecm(er, buf, ECM_FMT_LEN);
if(er->reader_avail == 1 || er->stage == 0)
{
cs_log("%s (%s): %s (%d ms)%s%s%s%s", usrname, buf, er->rcEx ? erEx : stxt[er->rc],
client->cwlastresptime, sby, schaninfo, sreason, scwcinfo);
}
else
{
cs_log("%s (%s): %s (%d ms)%s (%c/%d/%d/%d)%s%s%s%s", usrname, buf, er->rcEx ? erEx : stxt[er->rc],
client->cwlastresptime, sby, stageTxt[er->stage], er->reader_requested,
(er->reader_count + er->fallback_reader_count), er->reader_avail, schaninfo,
srealecmtime, sreason, scwcinfo);
}
}
cs_log_dump_dbg(D_ATR, er->cw, 16, "cw:");
led_status_cw_not_found(er);
ESC:
return 0;
}
/*
* write_ecm_request():
*/
static int32_t write_ecm_request(struct s_reader *rdr, ECM_REQUEST *er)
{
add_job(rdr->client, ACTION_READER_ECM_REQUEST, (void *)er, 0);
return 1;
}
/**
* sends the ecm request to the readers
* ECM_REQUEST er : the ecm
* er->stage: 0 = no reader asked yet
* 2 = ask only local reader (skipped without preferlocalcards)
* 3 = ask any non fallback reader
* 4 = ask fallback reader
**/
void request_cw_from_readers(ECM_REQUEST *er, uint8_t stop_stage)
{
struct s_ecm_answer *ea;
int8_t sent = 0;
if(er->stage >= 4) { return; }
while(1)
{
if(stop_stage && er->stage >= stop_stage) { return; }
er->stage++;
#ifdef CS_CACHEEX
if(er->stage == 1 && er->preferlocalcards==2)
{ er->stage++; }
#else
if(er->stage == 1)
{ er->stage++; }
#endif
if(er->stage == 2 && !er->preferlocalcards)
{ er->stage++; }
for(ea = er->matching_rdr; ea; ea = ea->next)
{
switch(er->stage)
{
#ifdef CS_CACHEEX
case 1:
{
// Cache-Exchange
if((ea->status & REQUEST_SENT) ||
(ea->status & (READER_ACTIVE | READER_FALLBACK | READER_LOCAL)) != (READER_ACTIVE | READER_LOCAL))
{ continue; }
break;
}
#endif
case 2:
{
// only local reader
if((ea->status & REQUEST_SENT) ||
(ea->status & (READER_ACTIVE | READER_FALLBACK | READER_LOCAL)) != (READER_ACTIVE | READER_LOCAL))
{ continue; }
break;
}
case 3:
{
// any non fallback reader not asked yet
if((ea->status & REQUEST_SENT) ||
(ea->status & (READER_ACTIVE | READER_FALLBACK)) != READER_ACTIVE)
{ continue; }
break;
}
default:
{
// only fallbacks
if((ea->status & REQUEST_SENT) ||
(ea->status & (READER_ACTIVE | READER_FALLBACK)) != (READER_ACTIVE | READER_FALLBACK))
{ continue; }
break;
}
}
struct s_reader *rdr = ea->reader;
#ifdef WITH_DEBUG
if (cs_dblevel & (D_TRACE | D_CSP))
{
char ecmd5[17 * 3];
cs_hexdump(0, er->ecmd5, 16, ecmd5, sizeof(ecmd5));
cs_log_dbg(D_TRACE | D_CSP, "request_cw stage=%d to reader %s ecm hash=%s", er->stage, rdr ? rdr->label : "", ecmd5);
}
#endif
// maxparallel: check if reader has capacity (slot is reserved later when CW found)
if(rdr->maxparallel > 0 && !reader_has_capacity(rdr, er->srvid, er->client))
{
cs_log_dbg(D_LB, "reader %s skipped for %s srvid %04X (maxparallel %d reached)",
rdr->label, check_client(er->client) ? er->client->account->usr : "-",
er->srvid, rdr->maxparallel);
continue;
}
ea->status |= REQUEST_SENT;
cs_ftime(&ea->time_request_sent);
er->reader_requested++;
write_ecm_request(ea->reader, er);
// set sent=1 only if reader is active/connected. If not, switch to next stage!
if(!sent && rdr)
{
struct s_client *rcl = rdr->client;
if(check_client(rcl))
{
if(rcl->typ == 'r' && rdr->card_status == CARD_INSERTED)
{ sent = 1; }
else if(rcl->typ == 'p' && (rdr->card_status == CARD_INSERTED || rdr->tcp_connected))
{ sent = 1; }
}
}
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} [write_ecm_request] reader %s --> SENT %d", (check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid, rdr ? ea->reader->label : "-", sent);
}
if(sent || er->stage >= 4)
{ break; }
}
}
void add_cache_from_reader(ECM_REQUEST *er, struct s_reader *rdr, uint32_t csp_hash, uint8_t *ecmd5, uint8_t *cw, int16_t caid, int32_t prid, int16_t srvid
#ifdef CS_CACHEEX_AIO
, int32_t ecm_time
#endif
)
{
ECM_REQUEST *ecm;
if (cs_malloc(&ecm, sizeof(ECM_REQUEST)))
{
memset(ecm, 0, sizeof(ECM_REQUEST));
cs_ftime(&ecm->tps);
ecm->cwc_cycletime = er->cwc_cycletime;
ecm->cwc_next_cw_cycle = er->cwc_next_cw_cycle;
memcpy(ecm->ecm, er->ecm, sizeof(ecm->ecm)); // ecm[0] is pushed to cacheexclients so we need a copy from it
ecm->caid = caid;
ecm->prid = prid;
ecm->srvid = srvid;
memcpy(ecm->ecmd5, ecmd5, CS_ECMSTORESIZE);
ecm->csp_hash = csp_hash;
ecm->rc = E_FOUND;
memcpy(ecm->cw, cw, sizeof(ecm->cw));
ecm->grp = rdr->grp;
ecm->selected_reader = rdr;
ecm->from_csp = er->from_csp; // Propagacja flagi from_csp
#ifdef CS_CACHEEX_AIO
ecm->ecm_time = ecm_time;
ecm->localgenerated = er->localgenerated;
#endif
#ifdef CS_CACHEEX
if(rdr && cacheex_reader(rdr))
{ ecm->cacheex_src = rdr->client; } //so adds hits to reader
#endif
add_cache(ecm); //add cw to cache
#ifdef CS_CACHEEX
cs_writelock(__func__, &ecm_pushed_deleted_lock);
ecm->next = ecm_pushed_deleted;
ecm_pushed_deleted = ecm;
cs_writeunlock(__func__, &ecm_pushed_deleted_lock);
#else
NULLFREE(ecm);
#endif
}
}
void chk_dcw(struct s_ecm_answer *ea)
{
if(!ea || !ea->er)
{ return; }
ECM_REQUEST *ert = ea->er;
struct s_ecm_answer *ea_list;
struct s_reader *eardr = ea->reader;
if(!ert || !eardr)
{ return; }
// ecm request already answered!
if(ert->rc < E_99)
{
#ifdef CS_CACHEEX
if(ea && ert->rc < E_NOTFOUND && ea->rc < E_NOTFOUND && memcmp(ea->cw, ert->cw, sizeof(ert->cw)) != 0)
{
char cw1[16 * 3 + 2], cw2[16 * 3 + 2];
#ifdef WITH_DEBUG
if(cs_dblevel & D_TRACE)
{
cs_hexdump(0, ea->cw, 16, cw1, sizeof(cw1));
cs_hexdump(0, ert->cw, 16, cw2, sizeof(cw2));
}
#endif
char ip1[20] = "", ip2[20] = "";
if(ea->reader && check_client(ea->reader->client)) { cs_strncpy(ip1, cs_inet_ntoa(ea->reader->client->ip), sizeof(ip1)); }
if(ert->cacheex_src) { cs_strncpy(ip2, cs_inet_ntoa(ert->cacheex_src->ip), sizeof(ip2)); }
else if(ert->selected_reader && check_client(ert->selected_reader->client)) { cs_strncpy(ip2, cs_inet_ntoa(ert->selected_reader->client->ip), sizeof(ip2)); }
ECM_REQUEST *er = ert;
debug_ecm(D_TRACE, "WARNING2: Different CWs %s from %s(%s)<>%s(%s): %s<>%s", buf,
username(ea->reader ? ea->reader->client : ert->client), ip1,
er->cacheex_src ? username(er->cacheex_src) : (ert->selected_reader ? ert->selected_reader->label : "unknown/csp"), ip2,
cw1, cw2);
}
#endif
return;
}
// Logika głosowania CW
if (cfg.cwvote_enabled && is_cwvote_caid(ert) && ert->rc == E_UNHANDLED)
{
if (cfg.cwvote_log_enabled)
{
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} [chk_dcw] CW Vote enabled and CAID matches, attempting to decide.",
(check_client(ert->client) ? ert->client->account->usr : "-"), ert->caid, ert->prid, ert->srvid);
}
int vote_result = cw_vote_decide(ert);
if (vote_result == 1) // Znaleziono zwycięzcę
{
ert->rc = E_FOUND;
ert->rcEx = 0;
send_dcw(ert->client, ert);
return; // ECM obsłużony przez głosowanie
}
// Jeśli vote_result wynosi 0 (brak wyraźnego zwycięzcy) lub -1 (za mało głosów),
// kontynuuj z istniejącą logiką, aby umożliwić więcej czytników/przekroczenie limitu czasu.
}
#ifdef CS_CACHEEX
/* jeśli odpowiedź od czytnika cacheex-1, nie wysyłaj odpowiedzi do klienta!
* wątek check_cache sprawdzi licznik i wyśle odpowiedź do klienta!
* W każdym razie, powinniśmy sprawdzić, czy musimy przejść do innego etapu (>1)
*/
if(eardr && cacheex_reader(eardr))
{
// if wait_time, and not wait_time expired and wait_time due to hitcache(or awtime>0),
// we have to wait cacheex timeout before call other readers (stage>1)
if(cacheex_reader(eardr) && !ert->cacheex_wait_time_expired && ert->cacheex_hitcache)
{ return; }
int8_t cacheex_left = 0;
uint8_t has_cacheex = 0;
if(ert->stage == 1)
{
for(ea_list = ert->matching_rdr; ea_list; ea_list = ea_list->next)
{
cs_readlock(__func__, &ea_list->ecmanswer_lock);
if(((ea_list->status & (READER_CACHEEX | READER_FALLBACK | READER_ACTIVE))) == (READER_CACHEEX | READER_ACTIVE))
{ has_cacheex = 1; }
if((!(ea_list->status & READER_FALLBACK) && ((ea_list->status & (REQUEST_SENT | REQUEST_ANSWERED | READER_CACHEEX | READER_ACTIVE)) == (REQUEST_SENT | READER_CACHEEX | READER_ACTIVE))) || ea_list->rc < E_NOTFOUND)
{ cacheex_left++; }
cs_readunlock(__func__, &ea_list->ecmanswer_lock);
}
if(has_cacheex && !cacheex_left) { request_cw_from_readers(ert, 0); }
}
return;
}
#endif
int32_t reader_left = 0, local_left = 0, reader_not_flb_left = 0, has_not_fallback = 0, has_local = 0;
ert->selected_reader = eardr;
switch(ea->rc)
{
case E_FOUND:
// maxparallel: register this service on the winning reader
if(eardr->maxparallel > 0)
{ reader_register_service(eardr, ert->srvid, ert->client); }
memcpy(ert->cw, ea->cw, 16);
ert->cw_ex = ea->cw_ex;
ert->rcEx = 0;
ert->rc = ea->rc;
ert->grp |= eardr->grp;
cs_strncpy(ert->msglog, ea->msglog, sizeof(ert->msglog));
#ifdef HAVE_DVBAPI
ert->adapter_index = ea->er->adapter_index;
#endif
break;
case E_INVALID:
case E_NOTFOUND:
{
// check if there are other readers to ask, and if not send NOT_FOUND to client
ert->rcEx = ea->rcEx;
cs_strncpy(ert->msglog, ea->msglog, sizeof(ert->msglog));
for(ea_list = ert->matching_rdr; ea_list; ea_list = ea_list->next)
{
cs_readlock(__func__, &ea_list->ecmanswer_lock);
if((!(ea_list->status & READER_FALLBACK) && ((ea_list->status & (REQUEST_SENT | REQUEST_ANSWERED | READER_LOCAL | READER_ACTIVE)) == (REQUEST_SENT | READER_LOCAL | READER_ACTIVE))) || ea_list->rc < E_NOTFOUND)
{ local_left++; }
if((!(ea_list->status & READER_FALLBACK) && ((ea_list->status & (REQUEST_SENT | REQUEST_ANSWERED | READER_ACTIVE)) == (REQUEST_SENT | READER_ACTIVE))) || ea_list->rc < E_NOTFOUND)
{ reader_not_flb_left++; }
if(((ea_list->status & (REQUEST_ANSWERED | READER_ACTIVE)) == (READER_ACTIVE)) || ea_list->rc < E_NOTFOUND)
{ reader_left++; }
if(((ea_list->status & (READER_FALLBACK | READER_ACTIVE))) == (READER_ACTIVE))
{ has_not_fallback = 1; }
if(((ea_list->status & (READER_LOCAL | READER_FALLBACK | READER_ACTIVE))) == (READER_LOCAL | READER_ACTIVE))
{ has_local = 1; }
cs_readunlock(__func__, &ea_list->ecmanswer_lock);
}
switch(ert->stage)
{
case 2: // only local reader (used only if preferlocalcards=1)
{
if(has_local && !local_left) { request_cw_from_readers(ert, 0); }
break;
}
case 3:
{
// any fallback reader not asked yet
if(has_not_fallback && !reader_not_flb_left) { request_cw_from_readers(ert, 0); }
break;
}
}
if(!reader_left // no more matching reader
#ifdef CS_CACHEEX
&& !cfg.wait_until_ctimeout
#endif
)
{ ert->rc = E_NOTFOUND; } // so we set the return code
break;
}
case E_TIMEOUT: // if timeout, we have to send timeout to client: this is done by ecm_timeout callback
return;
break;
case E_UNHANDLED:
return;
break;
default:
cs_log("unexpected ecm answer rc=%d.", ea->rc);
return;
break;
}
if(ert->rc < E_99)
send_dcw(ert->client, ert);
}
uint32_t chk_provid(uint8_t *ecm, uint16_t caid)
{
int32_t i, len, descriptor_length = 0;
uint32_t provid = 0;
switch(caid >> 8)
{
case 0x01: // seca
provid = b2i(2, ecm + 3);
break;
case 0x05: // viaccess
i = (ecm[4] == 0xD2) ? ecm[5] + 2 : 0; // skip d2 nano
if((ecm[5 + i] == 3) && ((ecm[4 + i] == 0x90) || (ecm[4 + i] == 0x40)))
{ provid = (b2i(3, ecm + 6 + i) & 0xFFFFF0); }
i = (ecm[6] == 0xD2) ? ecm[7] + 2 : 0; // skip d2 nano long ecm
if((ecm[7 + i] == 7) && ((ecm[6 + i] == 0x90) || (ecm[6 + i] == 0x40)))
{ provid = (b2i(3, ecm + 8 + i) & 0xFFFFF0); }
break;
case 0x0D: // cryptoworks
len = (((ecm[1] & 0xf) << 8) | ecm[2]) + 3;
for(i = 8; i < len; i += descriptor_length + 2)
{
descriptor_length = ecm[i + 1];
if(ecm[i] == 0x83)
{
provid = (uint32_t)ecm[i + 2] & 0xFE;
break;
}
}
break;
case 0x18: // nagra2
if (caid == 0x1801) // more safety
provid = b2i(2, ecm + 5);
break;
}
return provid;
}
void update_chid(ECM_REQUEST *er)
{
er->chid = get_subid(er);
}
int32_t write_ecm_answer(struct s_reader *reader, ECM_REQUEST *er, int8_t rc, uint8_t rcEx, uint8_t *cw, char *msglog, uint16_t used_cardtier, EXTENDED_CW* cw_ex)
{
if(!reader || !er || !er->tps.time) { return 0; }
// drop too late answers, to avoid seg fault --> only answer until tps.time+((cfg.ctimeout+500)/1000+1) is accepted
time_t timeout = time(NULL) - ((cfg.ctimeout + 500) / 1000 + 1);
if(er->tps.time < timeout) // < and NOT <=
{ return 0; }
struct timeb now;
cs_ftime(&now);
#ifdef CS_CACHEEX_AIO
uint8_t dontsetAnswered = 0;
#endif
uint8_t dontwriteStats = 0;
if(er && er->parent)
{
// parent is only set on reader->client->ecmtask[], but we want original er
ECM_REQUEST *er_reader_cp = er;
er = er->parent; // Now er is "original" ecm, before it was the reader-copy
er_reader_cp->rc = rc;
er_reader_cp->idx = 0;
#ifdef CS_CACHEEX_AIO
er->localgenerated = er_reader_cp->localgenerated;
#endif
timeout = time(NULL) - ((cfg.ctimeout + 500) / 1000 + 1);
if(er->tps.time < timeout)
{ return 0; }
}
#ifdef CS_CACHEEX_AIO
if(rc < E_NOTFOUND && !er->localgenerated && (reader->cacheex.localgenerated_only_in || chk_lg_only(er, &reader->cacheex.lg_only_in_tab)) && !chk_srvid_localgenerated_only_exception(er))
{
cs_log_dbg(D_CACHEEX, "reader: %s !er->localgenerated - rc: E_NOTFOUND set, no stats written for reader", reader ? reader->label : "-");
rc = E_NOTFOUND;
dontsetAnswered = 1;
dontwriteStats = 1;
}
#endif
struct s_ecm_answer *ea = get_ecm_answer(reader, er);
if(!ea) { return 0; }
cs_writelock(__func__, &ea->ecmanswer_lock);
if((ea->status & REQUEST_ANSWERED))
{
cs_log_dbg(D_READER, "Reader %s already answer, skip this ecm answer!", reader ? reader->label : "-");
cs_writeunlock(__func__, &ea->ecmanswer_lock);
return 0;
}
// Special checks for rc
// Skip check for BISS1 - cw could be zero but still catch cw=0 by anticascading
// Skip check for BISS2 - we use the extended cw, so the "simple" cw is always zero
// bad/wrong chksum/ecm
if(rc == E_NOTFOUND && rcEx == E2_WRONG_CHKSUM)
{
cs_log_dbg(D_READER, "Ai_vote: FAKE DCW DETECTED from reader %s (bad/wrong checksum)", reader ? reader->label : "-");
rc = E_INVALID;
rcEx = E2_WRONG_CHKSUM;
er->stage = 5;
// dont write stats for bad/wrong chksum/ecm
dontwriteStats = 1;
// set all other matching_readers => inactive to skip them and dont spread the bad ecm
struct s_ecm_answer *ea_list;
for(ea_list = er->matching_rdr; ea_list; ea_list = ea_list->next)
{
ea_list->status &= ~(READER_ACTIVE | READER_FALLBACK);
}
}
if(rc < E_NOTFOUND && cw && chk_is_null_CW(cw) && !caid_is_biss(er->caid))
{
rc = E_NOTFOUND;
cs_log_dbg(D_TRACE | D_LB, "Ai_vote: FAKE DCW DETECTED from reader %s (null CW)", reader ? reader->label : "-");
}
if(rc < E_NOTFOUND && cw && !chk_halfCW(er,cw))
{
rc = E_NOTFOUND;
cs_log_dbg(D_TRACE | D_LB, "Ai_vote: FAKE DCW DETECTED from reader %s (wrong swapped NDS CW)", reader ? reader->label : "-");
}
if(reader && cw && rc < E_NOTFOUND)
{
if(!cfg.disablecrccws && !reader->disablecrccws)
{
if(!chk_if_ignore_checksum(er, &cfg.disablecrccws_only_for) && !chk_if_ignore_checksum(er, &reader->disablecrccws_only_for))
{
uint8_t i, c;
for(i = 0; i < 16; i += 4)
{
c = ((cw[i] + cw[i + 1] + cw[i + 2]) & 0xff);
if(cw[i + 3] != c)
{
uint8_t nano = 0x00;
if(er->caid == 0x100 && er->ecm[5] > 0x00)
{
nano = er->ecm[5]; // seca nano protection
}
if(reader->dropbadcws && !nano) // only drop controlword if no cw encryption is applied
{
rc = E_NOTFOUND;
rcEx = E2_WRONG_CHKSUM;
break;
}
else
{
if(!nano) // only fix checksum if no cw encryption is applied (nano = 0)
{
cs_log_dbg(D_TRACE, "notice: changed dcw checksum byte cw[%i] from %02x to %02x", i + 3, cw[i + 3], c);
cw[i + 3] = c;
}
else
{
if(i == 12) // there are servers delivering correct controlwords but with failing last cw checksum (on purpose?!)
{
cs_log_dbg(D_TRACE,"NANO%02d: BAD PEER DETECTED, oscam has fixed the last cw crc that wasn't matching!", nano);
cw[i + 3] = c; // fix the last controlword
}
else
{
cs_log_dbg(D_TRACE,"NANO%02d: not fixing the crc of this cw since its still encrypted!", nano);
break; // crc failed so stop!
}
}
}
}
}
}
else
{
cs_log_dbg(D_TRACE, "notice: CW checksum check disabled for %04X:%06X", er->caid, er->prid);
}
}
else
{
cs_log_dbg(D_TRACE, "notice: CW checksum check disabled");
}
if(chk_if_ignore_checksum(er, &reader->disablecrccws_only_for) && caid_is_videoguard(er->caid)
#ifdef CS_CACHEEX_AIO
&& !chk_srvid_disablecrccws_only_for_exception(er)
#endif
)
{
uint8_t k, csum;
uint8_t hit = 0;
uint8_t oe = checkCWpart(cw, 0) ? 0 : 8;
for(k = 0; k < 8; k += 4)
{
csum = ((cw[k + oe] + cw[k + oe + 1] + cw[k + oe + 2]) & 0xff);
if(cw[k + oe + 3] == csum)
{
hit++;
}
}
if(hit > 1)
{
char ecmd5s[17 * 3];
cs_hexdump(0, er->ecmd5, 16, ecmd5s, sizeof(ecmd5s));
if(reader->dropbadcws)
{
rc = E_NOTFOUND;
rcEx = E2_WRONG_CHKSUM;
cs_log("Probably got bad CW from reader: %s, caid %04X, srvid %04X (%s) - dropping CW, lg: %i", reader->label, er->caid, er->srvid, ecmd5s
#ifdef CS_CACHEEX_AIO
, er->localgenerated);
#else
, 0);
#endif
}
else
{
cs_log("Probably got bad CW from reader: %s, caid %04X, srvid %04X (%s), lg: %i", reader->label, er->caid, er->srvid, ecmd5s
#ifdef CS_CACHEEX_AIO
, er->localgenerated);
#else
, 0);
#endif
}
}
}
}
#ifdef CW_CYCLE_CHECK
uint8_t cwc_ct = er->cwc_cycletime > 0 ? er->cwc_cycletime : 0;
uint8_t cwc_ncwc = er->cwc_next_cw_cycle < 2 ? er->cwc_next_cw_cycle : 2;
if(!checkcwcycle(er->client, er, reader, cw, rc, cwc_ct, cwc_ncwc))
{
rc = E_NOTFOUND;
rcEx = E2_WRONG_CHKSUM;
cs_log_dbg(D_CACHEEX | D_CWC | D_LB,
"{client %s, caid %04X, srvid %04X} [write_ecm_answer] cyclecheck FAILED → DROP CW from reader: %s",
(er->client ? er->client->account->usr : "-"),
er->caid,
er->srvid,
reader ? reader->label : "-");
}
else
{
cs_log_dbg(D_CACHEEX | D_CWC | D_LB,
"{client %s, caid %04X, srvid %04X} [write_ecm_answer] cyclecheck PASSED → CW allowed from reader: %s",
(er->client ? er->client->account->usr : "-"),
er->caid,
er->srvid,
reader ? reader->label : "-");
}
#endif
//END -- SPECIAL CHECKs for rc
#ifdef CS_CACHEEX_AIO
if(!dontsetAnswered)
{
#endif
ea->status |= REQUEST_ANSWERED;
#ifdef CS_CACHEEX_AIO
}
#endif
// Update reader stats BEFORE vote logic (with original rc), because after vote ea->rc is set to E_UNHANDLED
if(!dontwriteStats && rc < E_NOTFOUND)
{
ea->ecm_time = comp_timeb(&now, &ea->time_request_sent);
if(ea->ecm_time < 1) { ea->ecm_time = 1; }
send_reader_stat(reader, er, ea, rc, ea->ecm_time);
}
else
{
ea->ecm_time = comp_timeb(&now, &ea->time_request_sent);
if(ea->ecm_time < 1) { ea->ecm_time = 1; }
}
// Update reader ECM counters BEFORE vote logic (with original rc)
// These counters (ecmsok, ecmsnok, etc.) are used for webif statistics
if(!ea->is_pending && rc < E_NOTFOUND)
{
reader->ecmsok++;
reader->webif_ecmsok++;
#ifdef CS_CACHEEX_AIO
if(er->localgenerated)
reader->ecmsoklg++;
#endif
#ifdef CS_CACHEEX
struct s_client *eacl = reader->client;
if(cacheex_reader(reader) && check_client(eacl))
{
eacl->cwcacheexgot++;
cacheex_add_stats(eacl, ea->er->caid, ea->er->srvid, ea->er->prid, 1
#ifdef CS_CACHEEX_AIO
, er->localgenerated);
#else
);
#endif
first_client->cwcacheexgot++;
#ifdef CS_CACHEEX_AIO
if(er->localgenerated)
{
eacl->cwcacheexgotlg++;
first_client->cwcacheexgotlg++;
}
#endif
}
#endif
}
// Update reader ECM counters for E_NOTFOUND BEFORE vote logic
if(!ea->is_pending && rc == E_NOTFOUND)
{
reader->ecmsnok++;
reader->webif_ecmsnok++;
if(reader->ecmnotfoundlimit && reader->ecmsnok >= reader->ecmnotfoundlimit)
{
rdr_log(reader, "ECM not found limit reached %u. Restarting the reader.",
reader->ecmsnok);
reader->ecmsnok = 0; // Reset the variable
reader->ecmshealthnok = 0; // Reset the variable
add_job(reader->client, ACTION_READER_RESTART, NULL, 0);
}
}
// Update reader ECM counters for E_TIMEOUT BEFORE vote logic
if(!ea->is_pending && rc == E_TIMEOUT)
{
#ifdef WITH_LB
STAT_QUERY q;
readerinfofix_get_stat_query(er, &q);
READER_STAT *s;
s = readerinfofix_get_add_stat(reader, &q);
if (s)
{
cs_log_dbg(D_LB, "inc fail {client %s, caid %04X, prid %06X, srvid %04X} [write_ecm_answer] reader %s rc %d, ecm time %d ms", (check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid, reader ? reader->label : "-", rc, ea->ecm_time);
readerinfofix_inc_fail(s); // now increase fail factor for unhandled timeouts
}
#endif
reader->ecmstout++; // now append timeouts to the readerinfo timeout count
reader->webif_ecmstout++;
}
// If a valid CW is found, and CW Vote is enabled for this CAID, add it to the vote pool and mark this answer as unhandled for now
if(cfg.cwvote_enabled && is_cwvote_caid(er) && rc < E_NOTFOUND && cw && !chk_is_null_CW(cw) && !caid_is_biss(er->caid))
{
if (cfg.cwvote_log_enabled)
{
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} [write_ecm_answer] CW Vote enabled, adding CW to vote pool from reader %s.",
(check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid, reader->label);
}
cw_vote_add(er, cw, reader); // Add to vote pool
ea->rc = E_UNHANDLED; // Mark as unhandled, waiting for vote decision
}
else
{
ea->rc = rc; // For non-CW answers or invalid CWs, set the actual RC
}
ea->rcEx = rcEx;
if(cw) { memcpy(ea->cw, cw, 16); } // Store the CW in ea->cw for voting
if(msglog) { memcpy(ea->msglog, msglog, MSGLOGSIZE); }
ea->tier = used_cardtier;
if(cw_ex)
{
ea->cw_ex = *cw_ex;
}
cs_writeunlock(__func__, &ea->ecmanswer_lock);
struct timeb tpe;
cs_ftime(&tpe);
int32_t ntime = comp_timeb(&tpe, &er->tps);
if(ntime < 1) { ntime = 1; }
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} [write_ecm_answer] reader %s rc %d, ecm time %d ms (%d ms)", (check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid, reader ? reader->label : "-", rc, ea->ecm_time, ntime);
// send ea for ecm request
int32_t res = 0;
struct s_client *cl = er->client;
if(check_client(cl))
{
res = 1;
add_job(er->client, ACTION_ECM_ANSWER_READER, ea, 0); // chk_dcw
}
// distribute ea for pendings
if(ea->pending) // has pending ea
{ distribute_ea(ea); }
if(!ea->is_pending) // not for pending ea - only once for ea
{
// cache update
// Skip check for BISS1 - cw could be indeed zero
// Skip check for BISS2 - we use the extended cw, so the "simple" cw is always zero
if(ea && (ea->rc < E_NOTFOUND) && (!chk_is_null_CW(ea->cw) && !caid_is_biss(er->caid)))
{
#ifdef CS_CACHEEX_AIO
int32_t ecmtime = ea->ecm_time;
if(er->cacheex_wait_time_expired && er->cacheex_wait_time)
ecmtime = ea->ecm_time + er->cacheex_wait_time;
#endif
add_cache_from_reader(er, reader, er->csp_hash, er->ecmd5, ea->cw, er->caid, er->prid, er->srvid
#ifdef CS_CACHEEX_AIO
, ecmtime);
#else
);
#endif
}
// reader checks
#ifdef WITH_DEBUG
if(cs_dblevel & D_TRACE)
{
char ecmd5[17 * 3];
cs_hexdump(0, er->ecmd5, 16, ecmd5, sizeof(ecmd5));
rdr_log_dbg(reader, D_TRACE, "ecm answer for ecm hash %s rc=%d", ecmd5, ea->rc);
}
#endif
// Update reader stats:
// Old reader ECM counter updates removed - now handled before vote logic
// reader checks
if(reader->ecmsok != 0 || reader->ecmsnok != 0 || reader->ecmstout != 0)
{
reader->ecmshealthok = ((double) reader->ecmsok / (reader->ecmsok + reader->ecmsnok + reader->ecmstout)) * 100;
#ifdef CS_CACHEEX_AIO
reader->ecmshealthoklg = ((double) reader->ecmsoklg / (reader->ecmsok + reader->ecmsnok + reader->ecmstout)) * 100;
#endif
reader->ecmshealthnok = ((double) reader->ecmsnok / (reader->ecmsok + reader->ecmsnok + reader->ecmstout)) * 100;
reader->ecmshealthtout = ((double) reader->ecmstout / (reader->ecmsok + reader->ecmsnok + reader->ecmstout)) * 100;
}
if(rc == E_FOUND && reader->resetcycle > 0)
{
reader->resetcounter++;
if(reader->resetcounter > reader->resetcycle)
{
reader->resetcounter = 0;
rdr_log(reader, "Resetting reader, resetcyle of %d ecms reached", reader->resetcycle);
reader->card_status = CARD_NEED_INIT;
cardreader_reset(cl);
}
}
}
return res;
}
// chid calculation from module stat to here
// to improve the quickfix concerning ecm chid info and extend it
// to all client requests wereby the chid is known in module stat
uint32_t get_subid(ECM_REQUEST *er)
{
if(!er->ecmlen)
{ return 0; }
uint32_t id = 0;
switch(er->caid >> 8)
{
case 0x01: // seca
id = b2i(2, er->ecm + 7);
break;
case 0x05: // viaccess
id = b2i(2, er->ecm + 8);
break;
case 0x06: // irdeto
id = b2i(2, er->ecm + 6);
break;
case 0x09: // videoguard
id = b2i(2, er->ecm + 11);
break;
case 0x4A: // DRE-Crypt, Bulcrypt, Tongfang and others?
if(!caid_is_bulcrypt(er->caid) && !caid_is_dre(er->caid))
{ id = b2i(2, er->ecm + 6); }
break;
}
return id;
}
static void set_readers_counter(ECM_REQUEST *er)
{
struct s_ecm_answer *ea;
er->reader_count = 0;
er->fallback_reader_count = 0;
er->localreader_count = 0;
er->cacheex_reader_count = 0;
for(ea = er->matching_rdr; ea; ea = ea->next)
{
if(ea->status & READER_ACTIVE)
{
if(!(ea->status & READER_FALLBACK))
{ er->reader_count++; }
else
{ er->fallback_reader_count++; }
if(cacheex_reader(ea->reader))
{ er->cacheex_reader_count++; }
else if(is_localreader(ea->reader, er))
{ er->localreader_count++; }
}
}
}
void write_ecm_answer_fromcache(struct s_write_from_cache *wfc)
{
ECM_REQUEST *er = NULL;
ECM_REQUEST *ecm = NULL;
er = wfc->er_new;
ecm = wfc->er_cache;
#ifdef CS_CACHEEX_AIO
if(ecm->localgenerated || (ecm->cw_count > 0x0F000000))
er->localgenerated = 1;
#endif
int8_t rc_orig = er->rc;
er->grp |= ecm->grp; // update group
#ifdef CS_CACHEEX
if(ecm->from_csp) { er->csp_answered = 1; } // update er as answered by csp (csp have no group)
#endif
if(er->rc >= E_NOTFOUND || er->rc == E_UNHANDLED)
{
// CW Vote: If enabled and CAID matches, add to vote pool instead of sending DCW directly
if (cfg.cwvote_enabled && is_cwvote_caid(er) && !chk_is_null_CW(ecm->cw) && !caid_is_biss(er->caid))
{
// Use cacheex_src or from_csp directly if selected_reader is NULL
// This fixes the issue where virtual readers are NULL
struct s_reader *source_rdr = NULL;
if (ecm->selected_reader) {
source_rdr = ecm->selected_reader;
}
// Pass the actual cacheex source or csp info for voting
// Use ecm->cacheex_src or ecm->from_csp when virtual readers are not available
if (source_rdr || ecm->cacheex_src || ecm->from_csp) {
if (cfg.cwvote_log_enabled) {
const char *source_label = "unknown";
if (source_rdr) {
source_label = source_rdr->label;
} else if (ecm->cacheex_src && check_client(ecm->cacheex_src) && ecm->cacheex_src->account) {
source_label = ecm->cacheex_src->account->usr;
} else if (ecm->from_csp) {
source_label = "CSP";
}
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} [write_ecm_answer_fromcache] CW Vote enabled, adding CW from cache (source: %s) to vote pool.",
(check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid, source_label);
}
// Always copy cacheex_src and from_csp so cw_vote_decide can use it
if (ecm->cacheex_src) {
er->cacheex_src = ecm->cacheex_src;
}
if (ecm->from_csp) {
er->from_csp = ecm->from_csp;
}
cw_vote_add(er, ecm->cw, source_rdr);
// Keep er->rc as E_UNHANDLED to allow voting to proceed
// Do NOT call send_dcw here. It will be called by cw_vote_decide or ecm_timeout.
} else {
// Fallback if no source could be determined for voting
if (cfg.cwvote_log_enabled) {
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} [write_ecm_answer_fromcache] CW Vote enabled, but source for cache entry could not be determined. Sending DCW directly.",
(check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid);
}
send_dcw(er->client, er);
}
}
else if (rc_orig == E_UNHANDLED) // Original condition for sending DCW if not voting
{
#ifdef CS_CACHEEX
if(ecm->cacheex_src) // from cacheex or csp
{
er->rc = E_CACHEEX;
}
else
#endif
{ er->rc=E_CACHE1; } // from normal readers
memcpy(er->cw, ecm->cw, 16);
er->selected_reader = ecm->selected_reader;
er->cw_count = ecm->cw_count;
#ifdef CS_CACHEEX
// here we should be sure cex client has not been freed!
if(ecm->cacheex_src && is_valid_client(ecm->cacheex_src) && !ecm->cacheex_src->kill)
{
er->cacheex_src = ecm->cacheex_src;
er->cwc_cycletime = ecm->cwc_cycletime;
er->cwc_next_cw_cycle = ecm->cwc_next_cw_cycle;
}
else
{
er->cacheex_src = NULL;
}
int8_t cacheex = check_client(er->client) && er->client->account ? er->client->account->cacheex.mode : 0;
if(cacheex == 1 && check_client(er->client))
{
cacheex_add_stats(er->client, er->caid, er->srvid, er->prid, 0
#ifdef CS_CACHEEX_AIO
, er->localgenerated);
#else
);
#endif
er->client->cwcacheexpush++;
if(er->client->account)
{ er->client->account->cwcacheexpush++; }
first_client->cwcacheexpush++;
#ifdef CS_CACHEEX_AIO
if(er->localgenerated)
{
er->client->cwcacheexpushlg++;
first_client->cwcacheexpushlg++;
}
#endif
}
#endif
#ifdef CS_CACHEEX
if(cfg.delay && cacheex!=1) // No delay on cacheexchange mode 1 client!
{ cs_sleepms(cfg.delay); }
#else
if(cfg.delay)
{ cs_sleepms(cfg.delay); }
#endif
if(rc_orig == E_UNHANDLED)
{
cs_log_dbg(D_LB,"{client %s, caid %04X, prid %06X, srvid %04X} [write_ecm_answer_fromcache] found cw in CACHE (count %d)!", (check_client(er->client)?er->client->account->usr:"-"),er->caid, er->prid, er->srvid,
#ifdef CS_CACHEEX_AIO
(er->cw_count > 0x0F000000) ? er->cw_count ^= 0x0F000000 : er->cw_count);
#else
er->cw_count);
#endif
send_dcw(er->client, er);
}
}
}
}
#ifdef CS_CACHEEX_AIO
static bool ecm_cache_check(ECM_REQUEST *er)
{
if(ecm_cache_init_done && cfg.ecm_cache_droptime > 0)
{
ECM_CACHE *ecm_cache = NULL;
SAFE_RWLOCK_WRLOCK(&ecm_cache_lock);
ecm_cache = find_hash_table(&ht_ecm_cache, &er->csp_hash, sizeof(uint32_t), &compare_csp_hash_ecmcache);
if(!ecm_cache)
{
// ecm_cache-size(count/memory) pre-check
if(
(cfg.ecm_cache_size && (cfg.ecm_cache_size > tommy_hashlin_count(&ht_ecm_cache)))
|| (cfg.ecm_cache_memory && (cfg.ecm_cache_memory*1024*1024 > tommy_hashlin_memory_usage(&ht_ecm_cache)))
)
{
if(cs_malloc(&ecm_cache, sizeof(ECM_CACHE)))
{
ecm_cache->csp_hash = er->csp_hash;
cs_ftime(&ecm_cache->first_recv_time);
cs_ftime(&ecm_cache->upd_time);
tommy_hashlin_insert(&ht_ecm_cache, &ecm_cache->ht_node, ecm_cache, tommy_hash_u32(0, &er->csp_hash, sizeof(er->csp_hash)));
tommy_list_insert_tail(&ll_ecm_cache, &ecm_cache->ll_node, ecm_cache);
SAFE_RWLOCK_UNLOCK(&ecm_cache_lock);
return true;
}
else{
SAFE_RWLOCK_UNLOCK(&ecm_cache_lock);
cs_log("[ecm_cache] ERROR: NO added HASH to ecm_cache!!");
return false;
}
}
else{
// clean cache call;
SAFE_RWLOCK_UNLOCK(&ecm_cache_lock);
ecm_cache_cleanup(true);
return false;
}
}
// ecm found
else{
int64_t gone_diff = 0;
gone_diff = comp_timeb(&er->tps, &ecm_cache->first_recv_time);
cs_ftime(&ecm_cache->upd_time);
if(gone_diff >= cfg.ecm_cache_droptime * 1000)
{
cs_log_dbg(D_CW_CACHE, "[ecm_cache] ECM drop, current ecm_cache_size: %i - ecm_cache-mem-size: %i MiB", count_hash_table(&ht_ecm_cache), (int)(tommy_hashlin_memory_usage(&ht_ecm_cache)/1024/1024));
SAFE_RWLOCK_UNLOCK(&ecm_cache_lock);
return false;
}
}
SAFE_RWLOCK_UNLOCK(&ecm_cache_lock);
return true;
}
else{
cs_log_dbg(D_CW_CACHE,"[ecm_cache] ecm_cache_init_done %i cfg.ecm_cache_size: %"PRIu32" cfg.ecm_cache_memory %"PRIu32" MiB", ecm_cache_init_done, cfg.ecm_cache_size, cfg.ecm_cache_memory);
return true;
}
}
#endif
void get_cw(struct s_client *client, ECM_REQUEST *er)
{
// clienttimeout per CAID override
uint32_t client_timeout = cfg.ctimeout;
int32_t percaid_timeout = caidvaluetab_get_value(&cfg.ctimeouttab, er->caid, 0);
if(percaid_timeout > 0)
{
client_timeout = percaid_timeout;
}
er->client_timeout = client_timeout;
#ifdef CS_CACHEEX_AIO
cacheex_update_hash(er);
if(!ecm_cache_check(er))
{
er->rc = E_INVALID;
send_dcw(client, er);
free_ecm(er);
return;
}
#endif
cs_log_dump_dbg(D_ATR, er->ecm, er->ecmlen, "get cw for ecm:");
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} [get_cw] NEW REQUEST!", (check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid);
increment_n_request(client);
int32_t i, j, m;
time_t now = time((time_t *)0);
uint32_t line = 0;
uint16_t sct_len;
er->client = client;
er->rc = E_UNHANDLED; // set default rc status to unhandled
er->cwc_next_cw_cycle = 2; // set it to: we dont know
// user was on freetv or didn't request for some time
// so we reset lastswitch to get correct stats/webif display
if(now - client->lastecm > cfg.hideclient_to) { client->lastswitch = 0; }
client->lastecm = now;
if(client == first_client || !client ->account || client->account == first_client->account)
{
// DVBApi+serial is allowed to request anonymous accounts:
int16_t listenertype = get_module(client)->listenertype;
if(listenertype != LIS_DVBAPI && listenertype != LIS_SERIAL)
{
er->rc = E_INVALID;
er->rcEx = E2_GLOBAL;
snprintf(er->msglog, sizeof(er->msglog), "invalid user account %s", username(client));
}
}
// ecmlen must be 0 (no ecm) or >2 (because SCT_LEN() needs at least 3 bytes)
if(er->ecmlen < 0 || er->ecmlen == 1 || er->ecmlen == 2)
{
er->rc = E_INVALID;
er->rcEx = E2_GLOBAL;
snprintf(er->msglog, sizeof(er->msglog), "ECM size %d invalid, ignored! client %s", er->ecmlen, username(client));
}
if(er->ecmlen > MAX_ECM_SIZE)
{
er->rc = E_INVALID;
er->rcEx = E2_GLOBAL;
snprintf(er->msglog, sizeof(er->msglog), "ECM size %d > Max ECM size %d, ignored! client %s", er->ecmlen, MAX_ECM_SIZE, username(client));
}
if(er->ecmlen > 2)
{
sct_len = SCT_LEN(er->ecm);
if(sct_len > er->ecmlen || sct_len < 4)
{
er->rc = E_INVALID;
er->rcEx = E2_GLOBAL;
snprintf(er->msglog, sizeof(er->msglog), "Real ECM size %d > ECM size %d, ignored! client %s", sct_len, er->ecmlen, username(client));
}
er->ecmlen = sct_len;
}
if(!client->grp)
{
er->rc = E_INVALID;
er->rcEx = E2_GROUP;
snprintf(er->msglog, sizeof(er->msglog), "invalid user group %s", username(client));
}
// add chid for all client requests as in module stat
update_chid(er);
// betacrypt ecm with nagra header
if(chk_is_betatunnel_caid(er->caid) == 1 && (er->ecmlen == 0x89 || er->ecmlen == 0x4A) && er->ecm[3] == 0x07 && (er->ecm[4] == 0x84 || er->ecm[4] == 0x45))
{
if(er->caid == 0x1702)
{
er->caid = 0x1833;
}
else
{
check_lb_auto_betatunnel_mode(er);
}
cs_log_dbg(D_TRACE, "Quickfix remap beta->nagra: 0x%X, 0x%X, 0x%X, 0x%X", er->caid, er->ecmlen, er->ecm[3], er->ecm[4]);
}
// nagra ecm with betacrypt header 1801, 1833, 1834, 1835
if(chk_is_betatunnel_caid(er->caid) == 2 && (er->ecmlen == 0x93 || er->ecmlen == 0x54) && er->ecm[13] == 0x07 && (er->ecm[14] == 0x84 || er->ecm[14] == 0x45))
{
if(er->caid == 0x1833)
{
er->caid = 0x1702;
}
else
{
er->caid = 0x1722;
}
cs_log_dbg(D_TRACE, "Quickfix remap nagra->beta: 0x%X, 0x%X, 0x%X, 0x%X", er->caid, er->ecmlen, er->ecm[13], er->ecm[44]);
}
// Ariva quickfix (invalid nagra provider)
if(((er->caid & 0xFF00) == 0x1800) && er->prid > 0x00FFFF)
{ er->prid = 0; }
// Check for invalid provider, extract provider out of ecm:
uint32_t prid = chk_provid(er->ecm, er->caid);
if(!er->prid)
{
er->prid = prid;
}
else
{
if(prid && prid != er->prid)
{
cs_log_dbg(D_TRACE, "provider fixed: %04X@%06X to %04X@%06X", er->caid, er->prid, er->caid, prid);
er->prid = prid;
}
}
#ifdef MODULE_NEWCAMD
// Set providerid for newcamd clients if none is given
if(!er->prid && client->ncd_server)
{
int32_t pi = client->port_idx;
if(pi >= 0 && cfg.ncd_ptab.nports && cfg.ncd_ptab.nports >= pi && cfg.ncd_ptab.ports[pi].ncd)
{ er->prid = cfg.ncd_ptab.ports[pi].ncd->ncd_ftab.filts[0].prids[0]; }
}
#endif
// CAID not supported or found
if(!er->caid)
{
er->rc = E_INVALID;
er->rcEx = E2_CAID;
snprintf(er->msglog, MSGLOGSIZE, "CAID not supported or found");
}
// user expired
if(client->expirationdate && client->expirationdate < client->lastecm)
{ er->rc = E_EXPDATE; }
// out of timeframe
if(client->allowedtimeframe_set)
{
struct tm acttm;
localtime_r(&now, &acttm);
int32_t curday = acttm.tm_wday;
char *dest = strstr(weekdstr,"ALL");
int32_t all_idx = (dest - weekdstr) / 3;
uint8_t allowed = 0;
// checkout if current time is allowed in the current day
allowed = CHECK_BIT(client->allowedtimeframe[curday][acttm.tm_hour][acttm.tm_min / 30], (acttm.tm_min % 30));
// or checkout if current time is allowed for all days
allowed |= CHECK_BIT(client->allowedtimeframe[all_idx][acttm.tm_hour][acttm.tm_min / 30], (acttm.tm_min % 30));
if(!(allowed))
{
er->rc = E_EXPDATE;
}
cs_log_dbg(D_TRACE, "Check Timeframe - result: %d, day:%s time: %02dH%02d, allowed: %s\n", er->rc, shortDay[curday], acttm.tm_hour, acttm.tm_min, allowed ? "true" : "false");
}
// user disabled
if(client->disabled != 0)
{
if(client->failban & BAN_DISABLED)
{
cs_add_violation(client, client->account->usr);
cs_disconnect_client(client);
}
er->rc = E_DISABLED;
}
if(!chk_global_whitelist(er, &line))
{
debug_ecm(D_TRACE, "whitelist filtered: %s (%s) line %d", username(client), buf, line);
er->rc = E_INVALID;
}
#ifdef CS_CACHEEX
if(client->account && client->account->cacheex.mode == 2 && !client->account->cacheex.allow_request)
{
er->rc = E_INVALID;
snprintf(er->msglog, MSGLOGSIZE, "invalid request from cacheex-2 client");
}
#endif
// rc < 100 -> ecm error
if(er->rc >= E_UNHANDLED)
{
m = er->caid;
i = er->srvid;
if(i != client->last_srvid || !client->lastswitch)
{
if(cfg.usrfileflag)
{ cs_statistics(client); }
client->lastswitch = now;
}
// user sleeping
if(client->tosleep && (now - client->lastswitch > client->tosleep))
{
if(client->failban & BAN_SLEEPING)
{
cs_add_violation(client, client->account->usr);
cs_disconnect_client(client);
}
if(client->c35_sleepsend != 0)
{
er->rc = E_STOPPED; // send sleep command CMD08 {00 255}
}
else
{
er->rc = E_SLEEPING;
}
}
client->last_srvid = i;
client->last_caid = m;
client->last_provid = er->prid;
int32_t ecm_len = (((er->ecm[1] & 0x0F) << 8) | er->ecm[2]) + 3;
for(j = 0; (j < 6) && (er->rc >= E_UNHANDLED); j++)
{
switch(j)
{
case 0:
// fake (uniq)
if(client->dup)
{ er->rc = E_FAKE; }
break;
case 1:
// invalid (caid)
if(!chk_bcaid(er, &client->ctab))
{
er->rc = E_INVALID;
er->rcEx = E2_CAID;
snprintf(er->msglog, MSGLOGSIZE, "invalid caid 0x%04X", er->caid);
}
break;
case 2:
// invalid (srvid)
// matching srvids (or 0000) specified in betatunnel will bypass this filter
if(!chk_srvid(client, er))
{
if(!chk_on_btun(SRVID_ZERO, client, er))
{
er->rc = E_INVALID;
snprintf(er->msglog, MSGLOGSIZE, "invalid SID");
}
}
break;
case 3:
// invalid (ufilters)
if(!chk_ufilters(er))
{ er->rc = E_INVALID; }
break;
case 4:
// invalid (sfilter)
if(!chk_sfilter(er, &get_module(client)->ptab))
{ er->rc = E_INVALID; }
break;
case 5:
// corrupt
if((i = er->ecmlen - ecm_len))
{
if(i > 0)
{
cs_log_dbg(D_TRACE, "warning: ecm size adjusted from %d to %d", er->ecmlen, ecm_len);
er->ecmlen = ecm_len;
}
else
{ er->rc = E_CORRUPT; }
}
break;
}
}
}
// Check for odd/even byte
// Don't check for BISS1 and BISS2 mode 1/E or fake caid (ECM is fake for them)
// Don't check for BISS2 mode CA (ECM table is always 0x80)
if(!caid_is_biss(er->caid) && !caid_is_fake(er->caid) && get_odd_even(er) == 0)
{
cs_log_dbg(D_TRACE, "warning: ecm with null odd/even byte from %s", (check_client(er->client) ? er->client->account->usr : "-"));
er->rc = E_INVALID;
}
// not continue, send rc to client
if(er->rc < E_UNHANDLED)
{
send_dcw(client, er);
free_ecm(er);
return;
}
// CW Vote: If enabled and CAID matches, keep as E_UNHANDLED to allow voting
if (cfg.cwvote_enabled && is_cwvote_caid(er))
{
if (cfg.cwvote_log_enabled)
{
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} [get_cw] CW Vote enabled for this CAID, keeping ECM as E_UNHANDLED for voting. Logging enabled.",
(check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid);
}
else
{
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} [get_cw] CW Vote enabled for this CAID, keeping ECM as E_UNHANDLED for voting. Logging disabled.",
(check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid);
}
er->rc = E_UNHANDLED; // Ensure it's unhandled for voting
}
else if (er->rc < E_UNHANDLED) // If not for voting, and already an error, send it
{
send_dcw(client, er);
free_ecm(er);
return;
}
#ifdef CS_CACHEEX
int8_t cacheex = client->account ? client->account->cacheex.mode : 0;
er->from_cacheex1_client = 0;
if(cacheex == 1) {er->from_cacheex1_client = 1;}
#endif
// set preferlocalcards for this ecm request (actually, paramter
// is per user based, maybe in fiture it will be caid based too)
er->preferlocalcards = cfg.preferlocalcards;
if(client->account && client->account->preferlocalcards > -1)
{
er->preferlocalcards = client->account->preferlocalcards;
}
if(er->preferlocalcards <0 || er->preferlocalcards >2) {er->preferlocalcards=0;}
if(chk_is_betatunnel_caid(er->caid) && client->ttab.ttnum)
{
cs_log_dump_dbg(D_TRACE, er->ecm, 13, "betatunnel? ecmlen=%d", er->ecmlen);
cs_betatunnel(er);
}
// ignore ecm...
int32_t offset = 3;
// ...and betacrypt header for cache md5 calculation
if(caid_is_betacrypt(er->caid))
{ offset = 13; }
uint8_t md5tmp[MD5_DIGEST_LENGTH];
// store ECM in cache
memcpy(er->ecmd5, MD5(er->ecm + offset, er->ecmlen - offset, md5tmp), CS_ECMSTORESIZE);
cacheex_update_hash(er);
ac_chk(client, er, 0);
//******** CHECK IF FOUND ECM IN CACHE
struct ecm_request_t *ecm = NULL;
ecm = check_cache(er, client);
if(ecm) // found in cache
{
cs_log_dbg(D_LB,"{client %s, caid %04X, prid %06X, srvid %04X} [get_cw] cw found immediately in cache! ", (check_client(er->client)?er->client->account->usr:"-"),er->caid, er->prid, er->srvid);
struct s_write_from_cache *wfc = NULL;
if(!cs_malloc(&wfc, sizeof(struct s_write_from_cache)))
{
NULLFREE(ecm);
free_ecm(er);
return;
}
wfc->er_new = er;
wfc->er_cache = ecm;
write_ecm_answer_fromcache(wfc);
NULLFREE(wfc);
NULLFREE(ecm);
free_ecm(er);
return;
}
// zaplist ACoSC
#ifdef CS_ANTICASC
if(cfg.acosc_enabled)
{
cs_writelock(__func__, &clientlist_lock);
insert_zaplist(er, client);
cs_writeunlock(__func__, &clientlist_lock);
}
#endif
er->reader_avail = 0;
er->readers = 0;
struct s_ecm_answer *ea, *prv = NULL;
struct s_reader *rdr;
cs_readlock(__func__, &readerlist_lock);
cs_readlock(__func__, &clientlist_lock);
for(rdr = first_active_reader; rdr; rdr = rdr->next)
{
uint8_t is_fallback = chk_is_fixed_fallback(rdr, er);
int8_t match = matching_reader(er, rdr);
if(!match) // if this reader does not match, check betatunnel for it
match = lb_check_auto_betatunnel(er, rdr);
if(match)
{
er->reader_avail++;
#ifdef CS_CACHEEX
if(cacheex == 1 && !cacheex_reader(rdr)) // ex1-cl only ask ex1-rdr
{ continue; }
#endif
if(!cs_malloc(&ea, sizeof(struct s_ecm_answer)))
{ goto OUT; }
#ifdef WITH_EXTENDED_CW
// Correct CSA mode is CBC - default to that instead
ea->cw_ex.algo_mode = CW_ALGO_MODE_CBC;
#endif
er->readers++;
ea->reader = rdr;
ea->er = er;
ea->rc = E_UNHANDLED;
if(prv)
{ prv->next = ea; }
else
{ er->matching_rdr = ea; }
prv = ea;
ea->status = READER_ACTIVE;
if(cacheex_reader(rdr))
{ ea->status |= READER_CACHEEX; }
else if(is_localreader(rdr, er))
{ ea->status |= READER_LOCAL; }
if(is_fallback && (!is_localreader(rdr, er) || (is_localreader(rdr, er) && !er->preferlocalcards)))
{ ea->status |= READER_FALLBACK; }
ea->pending = NULL;
ea->is_pending = false;
cs_lock_create(__func__, &ea->ecmanswer_lock, "ecmanswer_lock", 5000);
}
}
OUT:
cs_readunlock(__func__, &clientlist_lock);
cs_readunlock(__func__, &readerlist_lock);
lb_set_best_reader(er);
// set reader_count and fallback_reader_count
set_readers_counter(er);
// if preferlocalcards > 0, check if we have local readers selected:
// if not, switch to preferlocalcards = 0 for this ecm
if(er->preferlocalcards > 0)
{
if(er->localreader_count == 0)
{
er->preferlocalcards = 0;
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} NO local readers, set preferlocalcards = %d",
(check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid, er->preferlocalcards);
}
}
#ifdef CS_CACHEEX
// WAIT_TIME
uint32_t cacheex_wait_time = 0;
uint32_t wait_time_no_hitcache = 0;
uint32_t wait_time_hitcache = 0;
if(client->account && !client->account->no_wait_time
#ifdef CS_CACHEEX_AIO
&& !chk_srvid_no_wait_time(er)
#endif
&& er->preferlocalcards<2)
{
wait_time_no_hitcache = get_cacheex_wait_time(er,NULL); // NO check hitcache. Wait_time is dwtime, or, if 0, awtime.
wait_time_hitcache = get_cacheex_wait_time(er,client); // check hitcache for calculating wait_time! If hitcache wait_time is biggest value between dwtime and awtime, else it's awtime.
if(
// If "normal" client and ex1-rdr>0, we cannot use hitcache for calculating wait_time because we cannot know if cw is available or not on ex1 server!
(cacheex != 1 && er->cacheex_reader_count)
||
/* Cw for ex1-cl comes from: INT. cache by "normal" readers (normal clients that ask normal readers), ex1-rdr and ex2-rdr and ex3-rdr.
* If readers, we have to wait cws generating by normal clients asking normal readers and answers by ex1-rdr (cannot use hitcache).
* If no readers, use hitcache for calculating wait_time.
*/
(cacheex == 1 && er->reader_avail)
)
{ cacheex_wait_time = wait_time_no_hitcache; }
else
{ cacheex_wait_time = wait_time_hitcache; }
}
cs_log_dbg(D_TRACE | D_CACHEEX, "[GET_CW] wait_time %d caid %04X prov %06X srvid %04X rc %d cacheex cl mode %d ex1rdr %d", cacheex_wait_time, er->caid, er->prid, er->srvid, er->rc, cacheex, er->cacheex_reader_count);
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} [get_cw] wait_time %d - client cacheex mode %d, reader avail for ecm %d, hitcache %d, preferlocalcards %d", (check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid, cacheex_wait_time, cacheex == 1 ? 1 : 0, er->reader_avail, wait_time_hitcache ? 1 : 0, er->preferlocalcards);
// END WAIT_TIME calculation
if(!cacheex_wait_time && (er->reader_count + er->fallback_reader_count) == 0)
#else
if((er->reader_count + er->fallback_reader_count) == 0)
#endif
{
er->rc = E_NOTFOUND;
if(!er->rcEx)
{ er->rcEx = E2_GROUP; }
snprintf(er->msglog, MSGLOGSIZE, "no matching reader");
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} [get_cw] NO Readers and NO wait_time... not_found! ", (check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid);
send_dcw(client, er);
free_ecm(er);
return;
}
//insert it in ecmcwcache!
cs_writelock(__func__, &ecmcache_lock);
er->next = ecmcwcache;
ecmcwcache = er;
ecmcwcache_size++;
cs_writeunlock(__func__, &ecmcache_lock);
er->rcEx = 0;
#ifdef CS_CACHEEX
er->cacheex_wait_time = 0;
er->cacheex_wait_time_expired = 1;
er->cacheex_hitcache = 0;
er->cacheex_mode1_delay = 0;
if(cacheex_wait_time) // wait time for cacheex
{
er->cacheex_wait_time = cacheex_wait_time;
er->cacheex_wait_time_expired = 0;
er->cacheex_hitcache = wait_time_hitcache ? 1 : 0; // usefull only when cacheex mode 1 readers answers before wait_time and we have to decide if we have to wait until wait_time expires.
er->cacheex_mode1_delay = get_cacheex_mode1_delay(er);
if(!er->cacheex_mode1_delay && er->cacheex_reader_count > 0)
{
request_cw_from_readers(er, 1); // setting stop_stage=1, we request only cacheex mode 1 readers. Others are requested at cacheex timeout!
}
}
else
#endif
request_cw_from_readers(er, 0);
#ifdef WITH_DEBUG
if(D_CLIENTECM & cs_dblevel)
{
char buf[ECM_FMT_LEN];
format_ecm(er, buf, ECM_FMT_LEN);
cs_log_dump_dbg(D_CLIENTECM, er->ecm, er->ecmlen, "Client %s ECM dump %s", username(client), buf);
}
#endif
cw_process_thread_wakeup();
}
int32_t ecmfmt(char *result, size_t size, uint16_t caid, uint16_t onid, uint32_t prid, uint16_t chid, uint16_t pid,
uint16_t srvid, uint16_t l, char *ecmd5hex, char *csphash, char *cw, uint16_t origin_peer, uint8_t distance, char *payload, char *tier)
{
if(!cfg.ecmfmt)
{
if(tier && payload)
{
return snprintf(result, size, "%04X@%06X/%04X/%04X/%02X:%s:0F06%s:%s", caid, prid, chid, srvid, l, ecmd5hex, payload, tier);
}
else if(tier)
{
return snprintf(result, size, "%04X@%06X/%04X/%04X/%02X:%s:%s", caid, prid, chid, srvid, l, ecmd5hex, tier);
}
else if(payload)
{
return snprintf(result, size, "%04X@%06X/%04X/%04X/%02X:%s:0F06%s", caid, prid, chid, srvid, l, ecmd5hex, payload);
}
else
{
return snprintf(result, size, "%04X@%06X/%04X/%04X/%02X:%s", caid, prid, chid, srvid, l, ecmd5hex);
}
}
#define ECMFMT_NUMBER 0
#define ECMFMT_STRING 1
#define ECMFMT_CHAR 2
uint8_t type = 0;
uint32_t ivalue = 0;
char *ifmt = NULL, *sfmt = NULL;
char *svalue = NULL, cvalue = '\0';
uint8_t hide_if_zero = 0;
char *c;
uint32_t s = 0;
for(c = cfg.ecmfmt; *c; c++)
{
if(*c == '0')
{
hide_if_zero = 1;
continue;
}
sfmt = NULL;
switch(*c)
{
case 't':
type = ECMFMT_STRING;
svalue = tier;
if(tier == NULL && !hide_if_zero)
{
type = ECMFMT_NUMBER;
ifmt = "%04X";
ivalue = 0;
}
break;
case 'c':
type = ECMFMT_NUMBER;
ifmt = "%04X";
ivalue = caid;
break;
case 'o':
type = ECMFMT_NUMBER;
ifmt = "%04X";
ivalue = onid;
break;
case 'p':
type = ECMFMT_NUMBER;
ifmt = "%06X";
ivalue = prid;
break;
case 'i':
type = ECMFMT_NUMBER;
ifmt = "%04X";
ivalue = chid;
break;
case 'd':
type = ECMFMT_NUMBER;
ifmt = "%04X";
ivalue = pid;
break;
case 's':
type = ECMFMT_NUMBER;
ifmt = "%04X";
ivalue = srvid;
break;
case 'l':
type = ECMFMT_NUMBER;
ifmt = "%02X";
ivalue = l;
break;
case 'h':
type = ECMFMT_STRING;
svalue = ecmd5hex;
break;
case 'e':
type = ECMFMT_STRING;
svalue = csphash;
break;
case 'w':
type = ECMFMT_STRING;
svalue = cw;
break;
case 'j':
type = ECMFMT_NUMBER;
ifmt = "%02X";
ivalue = distance;
break;
case 'g':
type = ECMFMT_NUMBER;
ifmt = "%04X";
ivalue = origin_peer;
break;
case '\\':
c++;
type = ECMFMT_CHAR;
cvalue = *c;
if(cvalue == '\0')
{ return s; }
break;
case 'y':
type = ECMFMT_STRING;
svalue = payload;
sfmt = "0F06%.06s";
if(payload == NULL && !hide_if_zero)
{
type = ECMFMT_NUMBER;
ifmt = "0F06%06X";
ivalue = 0;
}
break;
case 'Y':
type = ECMFMT_STRING;
svalue = payload;
sfmt = "0F06%s";
if(payload == NULL && !hide_if_zero)
{
type = ECMFMT_NUMBER;
ifmt = "0F06%12X";
ivalue = 0;
}
break;
default:
type = ECMFMT_CHAR;
cvalue = *c;
break;
}
if(hide_if_zero)
{
if(type == ECMFMT_NUMBER && ivalue == 0)
{
hide_if_zero = 0;
continue;
}
else if(type == ECMFMT_STRING && svalue == NULL)
{
hide_if_zero = 0;
continue;
}
}
switch(type)
{
case ECMFMT_NUMBER:
s += snprintf(result + s, size - s, ifmt, ivalue);
break;
case ECMFMT_STRING:
s += snprintf(result + s, size - s , sfmt != NULL ? sfmt : "%s", svalue);
break;
case ECMFMT_CHAR:
if(size - s > 1)
{
result[s] = cvalue;
result[s+1] = '\0';
s++;
}
break;
default:
break;
}
}
return s;
}
int32_t format_ecm(ECM_REQUEST *ecm, char *result, size_t size)
{
char ecmd5hex[(16*2)+1];
char csphash[(4*2)+1] = { 0 };
char cwhex[(16*2)+1];
char *payload = NULL;
char *tier = NULL;
#ifdef READER_VIDEOGUARD
char payload_string[(6*2)+1];
char tier_string[83];
struct s_ecm_answer *ea;
if(ecm->selected_reader && caid_is_videoguard(ecm->selected_reader->caid) && !is_network_reader(ecm->selected_reader))
{
for(ea = ecm->matching_rdr; ea; ea = ea->next)
{
if(ea->tier && (ea->status & REQUEST_ANSWERED) && !is_network_reader(ea->reader))
{
get_tiername_defaultid(ea->tier, ecm->selected_reader->caid, tier_string);
tier = tier_string;
break;
}
}
cs_hexdump(0, ecm->selected_reader->VgLastPayload, 6, payload_string, sizeof(payload_string));
payload = payload_string;
}
#endif
cs_hexdump(0, ecm->ecmd5, 16, ecmd5hex, sizeof(ecmd5hex));
#ifdef CS_CACHEEX
cs_hexdump(0, (void *)&ecm->csp_hash, 4, csphash, sizeof(csphash));
#endif
cs_hexdump(0, ecm->cw, 16, cwhex, sizeof(cwhex));
#ifdef MODULE_GBOX
if(check_client(ecm->client) && get_module(ecm->client)->num == R_GBOX && ecm->gbox_ecm_dist)
{ return ecmfmt(result, size, ecm->caid, ecm->onid, ecm->prid, ecm->chid, ecm->pid, ecm->srvid, ecm->ecmlen, ecmd5hex, csphash, cwhex, ecm->gbox_ecm_src_peer, ecm->gbox_ecm_dist, payload, tier); }
else if (ecm->selected_reader && ecm->selected_reader->typ == R_GBOX && !ecm->gbox_ecm_dist)
{ return ecmfmt(result, size, ecm->caid, ecm->onid, ecm->prid, ecm->chid, ecm->pid, ecm->srvid, ecm->ecmlen, ecmd5hex, csphash, cwhex, ecm->selected_reader->gbox_cw_src_peer, ecm->selected_reader->currenthops, payload, tier); }
else
#endif
return ecmfmt(result, size, ecm->caid, ecm->onid, ecm->prid, ecm->chid, ecm->pid, ecm->srvid, ecm->ecmlen, ecmd5hex, csphash, cwhex, 0,
((ecm->selected_reader && ecm->selected_reader->currenthops) ? ecm->selected_reader->currenthops : 0), payload, tier);
}
/* =========================
* Ai Fake Dcw Detector
* ========================= */
int cw_vote_add(struct ecm_request_t *er, uint8_t *cw, struct s_reader *rdr)
{
int i, free_idx = -1;
int is_local = 0;
char cw_hex[33];
const char *source_label = "unknown";
if (rdr) {
source_label = rdr->label;
// Wirtualne czytniki nie są lokalne
if (rdr == virtual_cacheex_reader || rdr == virtual_csp_reader) {
is_local = 0;
} else {
is_local = is_localreader(rdr, er);
}
} else if (er->cacheex_src) {
if (check_client(er->cacheex_src) && er->cacheex_src->account) {
source_label = er->cacheex_src->account->usr;
} else {
source_label = "CACHEEX_CLIENT";
}
} else if (er->from_csp) {
source_label = "CSP";
}
cs_hexdump(0, cw, 16, cw_hex, sizeof(cw_hex));
// Sprawdź czy vote_pool jest pusta - jeśli nie, wyczyść ją
// Używamy er->tps.time jako identyfikatora sesji ECM
if (er->vote_pool_session != er->tps.time) {
memset(er->vote_pool, 0, sizeof(er->vote_pool));
er->vote_pool_session = er->tps.time;
}
// Logowanie wyłączone - odkomentuj jeśli potrzebne
// if (cfg.cwvote_log_enabled) {
// cs_log("[Ai_vote_add] Adding CW from client %s, source %s (CAID: %04X, PRID: %06X, SRVID: %04X). CW: %s",
// (check_client(er->client) ? er->client->account->usr : "-"),
// source_label, er->caid, er->prid, er->srvid, cw_hex);
// }
int max_cand = cfg.cwvote_max_candidates;
// Sprawdź czy wpis jest poprawnie zainicjalizowany (votes > 0) i w granicach tablicy
// Porównujemy cały CW (16 bajtów), nie tylko compare_len
for (i = 0; i < max_cand; i++) {
if (er->vote_pool[i].votes == 0 && free_idx < 0)
free_idx = i;
if (i < MAX_VOTE_CANDIDATES && er->vote_pool[i].votes > 0) {
int cmp = memcmp(er->vote_pool[i].cw, cw, 16);
if (cmp == 0) {
// CW już istnieje w puli - agreguj głosy niezależnie od źródła
// To pozwala na agregację głosów z różnych cacheex peerów dla tego samego CW
if (er->vote_pool[i].votes < MAX_VOTE_CANDIDATES) {
er->vote_pool[i].voters[er->vote_pool[i].votes] = rdr;
er->vote_pool[i].votes++;
if (is_local) er->vote_pool[i].local_votes++;
}
// cs_log("[Ai_vote_add] Aggregated vote for existing CW → Votes: %d (local: %d) from %s",
// er->vote_pool[i].votes, er->vote_pool[i].local_votes, source_label);
return 0;
}
}
}
if (free_idx < 0) {
// cs_log("[Ai_vote_add] Voting pool full!");
return -1;
}
// Nowy CW
memcpy(er->vote_pool[free_idx].cw, cw, 16);
er->vote_pool[free_idx].votes = 1;
er->vote_pool[free_idx].local_votes = is_local ? 1 : 0;
// Store the actual reader if available, otherwise NULL
er->vote_pool[free_idx].voters[0] = rdr;
if (cfg.cwvote_log_enabled) {
cs_log("[Ai_vote_add] New CW in slot %d → Votes: %d (local: %d) from %s",
free_idx, 1, er->vote_pool[free_idx].local_votes, source_label);
}
return 0;
}
int cw_vote_decide(struct ecm_request_t *er)
{
if (!cfg.cwvote_enabled)
return 0;
int i, total_votes = 0, best = -1;
int best_score = -1;
char cw_hex[33];
struct timeb now_tb;
cs_ftime(&now_tb); // aktualny czas
// Logowanie wyłączone - odkomentuj jeśli potrzebne
// if (cfg.cwvote_log_enabled) {
// cs_log("[Ai_vote_decide] Starting vote for client %s (CAID: %04X, SRVID: %04X)",
// (check_client(er->client) ? er->client->account->usr : "-"),
// er->caid, er->srvid);
// }
int max_cand = cfg.cwvote_max_candidates;
float local_weight = cfg.cwvote_local_weight;
int min_votes = cfg.cwvote_min_votes;
int timeout = cfg.cwvote_timeout;
int fallback = cfg.cwvote_fallback;
// Liczymy total_votes i logujemy kandydatów
for (i = 0; i < max_cand; i++) {
if (er->vote_pool[i].votes > 0) {
total_votes += er->vote_pool[i].votes;
if (cfg.cwvote_log_enabled) {
cs_hexdump(0, er->vote_pool[i].cw, 16, cw_hex, sizeof(cw_hex));
cs_log("[Ai_vote_decide] Candidate %d: CW: %s | Votes: %d (local: %d)",
i, cw_hex, er->vote_pool[i].votes, er->vote_pool[i].local_votes);
}
}
}
// Logowanie podsumowania - tylko gdy są jakieś głosy
if (cfg.cwvote_log_enabled && total_votes > 0) {
cs_log("[Ai_vote_decide] Total votes: %d | Min required: %d | Timeout: %d ms",
total_votes, min_votes, timeout);
}
// Szukamy najlepszego (z effective score) - MUSI być przed sprawdzeniem pojedynczego kandydata
for (i = 0; i < max_cand; i++) {
if (er->vote_pool[i].votes == 0) continue;
int effective_score = er->vote_pool[i].votes +
(int)(er->vote_pool[i].local_votes * local_weight);
if (effective_score > best_score) {
best_score = effective_score;
best = i;
}
}
// Sprawdź czy jest tylko jeden kandydat (jeden unikalny CW)
// Jeśli timeout == 0, przyjmij od razu - nie ma sensu czekać
// Jeśli timeout > 0, czekaj - może przyjdą inne klucze
if (total_votes == 1 && best >= 0 && timeout == 0) {
memcpy(er->cw, er->vote_pool[best].cw, 16);
// Update cacheex hit stats if the winning CW came from cacheex
if (cfg.cwvote_enabled && er->cacheex_src) {
struct s_client *src_cl = er->cacheex_src;
if (src_cl && src_cl->cwcacheexhit >= 0) {
src_cl->cwcacheexhit++;
if (src_cl->account) {
src_cl->account->cwcacheexhit++;
}
first_client->cwcacheexhit++;
}
}
if (cfg.cwvote_log_enabled) {
cs_hexdump(0, er->cw, 16, cw_hex, sizeof(cw_hex));
cs_log("[Ai_vote_decide] SINGLE CANDIDATE → Accepting CW: %s | Votes: %d (local: %d)",
cw_hex, er->vote_pool[best].votes, er->vote_pool[best].local_votes);
}
return 1;
}
// Za mało głosów
if (total_votes < min_votes) {
// Jeśli timeout minął, zaakceptuj pojedynczy głos jako fallback
if (timeout > 0 && comp_timeb(&now_tb, &er->tps) >= timeout) {
if (cfg.cwvote_log_enabled)
cs_log("[Ai_vote_decide] Timeout reached with too few votes (%d/%d) → accepting single candidate as fallback", total_votes, min_votes);
if (best >= 0) {
memcpy(er->cw, er->vote_pool[best].cw, 16);
// Update cacheex hit stats if the winning CW came from cacheex
if (cfg.cwvote_enabled && er->cacheex_src) {
struct s_client *src_cl = er->cacheex_src;
if (src_cl && src_cl->cwcacheexhit >= 0) {
src_cl->cwcacheexhit++;
if (src_cl->account) {
src_cl->account->cwcacheexhit++;
}
first_client->cwcacheexhit++;
}
}
if (cfg.cwvote_log_enabled) {
cs_hexdump(0, er->cw, 16, cw_hex, sizeof(cw_hex));
cs_log("[Ai_vote_decide] SINGLE CANDIDATE (fallback) → Accepting CW: %s | Votes: %d (local: %d)",
cw_hex, er->vote_pool[best].votes, er->vote_pool[best].local_votes);
}
return 1;
}
}
return 0;
}
if (best < 0)
return 0;
int majority_needed = total_votes / 2 + 1; // > 50%
bool has_majority = er->vote_pool[best].votes >= majority_needed;
bool timeout_reached = (timeout > 0 && comp_timeb(&now_tb, &er->tps) >= timeout);
if (has_majority || (timeout_reached && fallback == 1)) {
memcpy(er->cw, er->vote_pool[best].cw, 16);
// Update cacheex hit stats if the winning CW came from cacheex
if (cfg.cwvote_enabled && er->cacheex_src) {
struct s_client *src_cl = er->cacheex_src;
if (src_cl && src_cl->cwcacheexhit >= 0) {
src_cl->cwcacheexhit++;
if (src_cl->account) {
src_cl->account->cwcacheexhit++;
}
first_client->cwcacheexhit++;
}
}
if (cfg.cwvote_log_enabled) {
cs_hexdump(0, er->cw, 16, cw_hex, sizeof(cw_hex));
cs_log("[Ai_vote_decide] WINNER → CW: %s | Votes: %d (local: %d) | Effective: %d",
cw_hex, er->vote_pool[best].votes, er->vote_pool[best].local_votes, best_score);
}
return 1; // decyzja podjęta
}
if (timeout_reached && fallback == 2) {
// fallback = weź pierwszy CW (slot 0)
if (er->vote_pool[0].votes > 0) {
memcpy(er->cw, er->vote_pool[0].cw, 16);
// Update cacheex hit stats if the winning CW came from cacheex
if (cfg.cwvote_enabled && er->cacheex_src) {
struct s_client *src_cl = er->cacheex_src;
if (src_cl && src_cl->cwcacheexhit >= 0) {
src_cl->cwcacheexhit++;
if (src_cl->account) {
src_cl->account->cwcacheexhit++;
}
first_client->cwcacheexhit++;
}
}
if (cfg.cwvote_log_enabled) cs_log("[Ai_vote_decide] Fallback: taking first CW");
return 1;
}
}
if (cfg.cwvote_log_enabled)
cs_log("[Ai_vote_decide] No clear winner yet... waiting");
return 0;
}
/* =========================
* Ai VOTING HELPER FUNCTIONS
* ========================= */
int32_t is_cwvote_caid(ECM_REQUEST *er)
{
if (!cfg.cwvote_caids.cvcnum) // If no CAIDs are specified, it applies to all CAIDs
{
return 1;
}
for (int32_t i = 0; i < cfg.cwvote_caids.cvcnum; i++)
{
if (cfg.cwvote_caids.cvcdata[i].caid == er->caid)
{
return 1;
}
}
return 0;
}
/* ===== END CW VOTING ===== */