4168 lines
115 KiB
C
Executable File
4168 lines
115 KiB
C
Executable File
#define MODULE_LOG_PREFIX "ecm"
|
|
|
|
#include "globals.h"
|
|
#include <math.h>
|
|
// ---------- ENTROPY FUNCTIONS START ----------
|
|
|
|
static double entropy_calc(const uint8_t *data, int len)
|
|
{
|
|
if(!data || len <= 0)
|
|
return 0.0;
|
|
|
|
int freq[256] = {0};
|
|
|
|
for(int i = 0; i < len; i++)
|
|
freq[data[i]]++;
|
|
|
|
double entropy = 0.0;
|
|
|
|
for(int i = 0; i < 256; i++)
|
|
{
|
|
if(freq[i] == 0)
|
|
continue;
|
|
|
|
double p = (double)freq[i] / len;
|
|
entropy -= p * (log(p) / log(2));
|
|
}
|
|
|
|
return entropy;
|
|
}
|
|
|
|
static void entropy_log_ecm(ECM_REQUEST *er, struct s_reader *reader)
|
|
{
|
|
if(!er)
|
|
return;
|
|
|
|
double full = entropy_calc(er->cw, 16);
|
|
double half1 = entropy_calc(er->cw, 8);
|
|
double half2 = entropy_calc(er->cw + 8, 8);
|
|
|
|
cs_log_dbg(D_TRACE,
|
|
"ENTROPY reader=%s caid=%04X FULL=%.4f HALF1=%.4f HALF2=%.4f",
|
|
reader ? reader->label : "?",
|
|
er->caid,
|
|
full,
|
|
half1,
|
|
half2);
|
|
}
|
|
|
|
// ---------- ENTROPY FUNCTIONS END ----------
|
|
#define ENTROPY_THRESHOLD_DEFAULT 3.0
|
|
#define ENTROPY_THRESHOLD_NDS 2.3
|
|
|
|
#include "cscrypt/md5.h"
|
|
#include "module-anticasc.h"
|
|
#include "module-cacheex.h"
|
|
#include "module-led.h"
|
|
#include "module-stat.h"
|
|
#include "module-webif.h"
|
|
#include "module-cw-cycle-check.h"
|
|
#include "module-gbox.h"
|
|
#include "oscam-cache.h"
|
|
#include "oscam-chk.h"
|
|
#include "oscam-client.h"
|
|
#include "oscam-config.h"
|
|
#include "oscam-ecm.h"
|
|
#include "oscam-garbage.h"
|
|
#include "oscam-failban.h"
|
|
#include "oscam-net.h"
|
|
#include "oscam-time.h"
|
|
#include "oscam-lock.h"
|
|
#include "oscam-string.h"
|
|
#include "oscam-work.h"
|
|
#include "reader-common.h"
|
|
#include "module-cccam-data.h"
|
|
#ifdef CS_CACHEEX_AIO
|
|
#include "oscam-hashtable.h"
|
|
#endif
|
|
|
|
// ADD THIS LINE:
|
|
static int cw_checksum_ok(uint8_t *cw);
|
|
|
|
/////////////////////////////////////////////////////////////
|
|
// AUTODETECT STRUCT AND TABLE (ADD THIS HERE)
|
|
/////////////////////////////////////////////////////////////
|
|
|
|
#define CW_DETECT_MAX 8192
|
|
#define CW_LEARN_THRESHOLD 5
|
|
|
|
typedef struct
|
|
{
|
|
uint16_t caid;
|
|
uint32_t prid;
|
|
uint16_t srvid;
|
|
|
|
uint8_t is64;
|
|
uint8_t is48;
|
|
|
|
uint8_t learn64_count;
|
|
uint8_t learn48_count;
|
|
|
|
uint8_t strict_logged;
|
|
|
|
} cw_detect_entry;
|
|
|
|
|
|
static cw_detect_entry cw_detect_table[CW_DETECT_MAX];
|
|
|
|
|
|
/////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
// GET OR CREATE ENTRY
|
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
|
|
static cw_detect_entry *cw_detect_get(ECM_REQUEST *er)
|
|
{
|
|
int i;
|
|
|
|
// search existing entry
|
|
for(i = 0; i < CW_DETECT_MAX; i++)
|
|
{
|
|
if(cw_detect_table[i].caid == er->caid &&
|
|
cw_detect_table[i].prid == er->prid &&
|
|
cw_detect_table[i].srvid == er->srvid)
|
|
{
|
|
return &cw_detect_table[i];
|
|
}
|
|
}
|
|
|
|
// create new entry
|
|
for(i = 0; i < CW_DETECT_MAX; i++)
|
|
{
|
|
if(cw_detect_table[i].caid == 0)
|
|
{
|
|
cw_detect_table[i].caid = er->caid;
|
|
cw_detect_table[i].prid = er->prid;
|
|
cw_detect_table[i].srvid = er->srvid;
|
|
|
|
cw_detect_table[i].is64 = 0;
|
|
cw_detect_table[i].is48 = 0;
|
|
|
|
cw_detect_table[i].learn64_count = 0;
|
|
cw_detect_table[i].learn48_count = 0;
|
|
|
|
cw_detect_table[i].strict_logged = 0;
|
|
|
|
return &cw_detect_table[i];
|
|
}
|
|
}
|
|
|
|
return &cw_detect_table[0];
|
|
}
|
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
// CW CHECKSUM FUNCTION (ADD THIS BELOW cw_detect_get)
|
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
|
|
static int cw_checksum_ok(uint8_t *cw)
|
|
{
|
|
if(((cw[0]+cw[1]+cw[2])&0xFF)!=cw[3]) return 0;
|
|
if(((cw[4]+cw[5]+cw[6])&0xFF)!=cw[7]) return 0;
|
|
if(((cw[8]+cw[9]+cw[10])&0xFF)!=cw[11]) return 0;
|
|
if(((cw[12]+cw[13]+cw[14])&0xFF)!=cw[15]) return 0;
|
|
return 1;
|
|
}
|
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
extern CS_MUTEX_LOCK ecmcache_lock;
|
|
extern struct ecm_request_t *ecmcwcache;
|
|
extern uint32_t ecmcwcache_size;
|
|
extern int32_t exit_oscam;
|
|
|
|
extern CS_MUTEX_LOCK ecm_pushed_deleted_lock;
|
|
extern struct ecm_request_t *ecm_pushed_deleted;
|
|
|
|
static pthread_mutex_t cw_process_sleep_cond_mutex;
|
|
static pthread_cond_t cw_process_sleep_cond;
|
|
static int cw_process_wakeups;
|
|
int64_t ecmc_next, cache_next, msec_wait = 3000;
|
|
|
|
#ifdef CS_CACHEEX_AIO
|
|
// ecm-cache
|
|
typedef struct ecm_cache
|
|
{
|
|
struct timeb first_recv_time;// time of first cw received
|
|
struct timeb upd_time; // updated time. Update time at each cw got
|
|
uint32_t csp_hash;
|
|
node ht_node;
|
|
node ll_node;
|
|
} ECM_CACHE;
|
|
|
|
static pthread_rwlock_t ecm_cache_lock;
|
|
static hash_table ht_ecm_cache;
|
|
static list ll_ecm_cache;
|
|
static int8_t ecm_cache_init_done = 0;
|
|
|
|
void free_ecm_cache(void)
|
|
{
|
|
deinitialize_hash_table(&ht_ecm_cache);
|
|
pthread_rwlock_destroy(&ecm_cache_lock);
|
|
}
|
|
|
|
void init_ecm_cache(void)
|
|
{
|
|
#ifdef CS_CACHEEX
|
|
if(cfg.cw_cache_size > 0 || cfg.cw_cache_memory > 0)
|
|
{
|
|
init_hash_table(&ht_ecm_cache, &ll_ecm_cache);
|
|
if (pthread_rwlock_init(&ecm_cache_lock,NULL) != 0)
|
|
{ cs_log("Error creating lock ecm_cache_lock!"); }
|
|
else
|
|
{ ecm_cache_init_done = 1; }
|
|
}
|
|
#endif
|
|
}
|
|
|
|
static uint8_t time_sort(ECM_CACHE *a, ECM_CACHE *b)
|
|
{
|
|
if (((int64_t)(a->upd_time.time) * 1000ull + (int64_t) a->upd_time.millitm) == ((int64_t)(b->upd_time.time) * 1000ull + (int64_t) b->upd_time.millitm)) return 0;
|
|
return (((int64_t)(a->upd_time.time) * 1000ull + (int64_t) a->upd_time.millitm) > ((int64_t)(b->upd_time.time) * 1000ull + (int64_t) b->upd_time.millitm)) ? -1 : 1;
|
|
}
|
|
|
|
static int compare_csp_hash_ecmcache(const void *arg, const void *obj)
|
|
{
|
|
uint32_t h = ((const ECM_CACHE*)obj)->csp_hash;
|
|
return memcmp(arg, &h, 4);
|
|
}
|
|
|
|
void ecm_cache_cleanup(bool force)
|
|
{
|
|
if(!ecm_cache_init_done)
|
|
{ return; }
|
|
|
|
SAFE_RWLOCK_WRLOCK(&ecm_cache_lock);
|
|
|
|
ECM_CACHE *ecm_cache;
|
|
node *i, *i_next;
|
|
uint32_t ll_c = 0;
|
|
uint32_t ll_ten_percent = (uint)tommy_list_count(&ll_ecm_cache)*0.1; // 10 percent of cache
|
|
|
|
if(!force)
|
|
sort_list(&ll_ecm_cache, time_sort);
|
|
|
|
i = get_first_node_list(&ll_ecm_cache);
|
|
while(i)
|
|
{
|
|
i_next = i->next;
|
|
|
|
ecm_cache = get_data_from_node(i);
|
|
|
|
if(!ecm_cache)
|
|
{
|
|
i = i_next;
|
|
continue;
|
|
}
|
|
if(!force)
|
|
{
|
|
++ll_c;
|
|
|
|
if(ll_c < ll_ten_percent)
|
|
{
|
|
remove_elem_list(&ll_ecm_cache, &ecm_cache->ll_node);
|
|
remove_elem_hash_table(&ht_ecm_cache, &ecm_cache->ht_node);
|
|
NULLFREE(ecm_cache);
|
|
}
|
|
else{
|
|
break;
|
|
}
|
|
}
|
|
else{
|
|
remove_elem_list(&ll_ecm_cache, &ecm_cache->ll_node);
|
|
remove_elem_hash_table(&ht_ecm_cache, &ecm_cache->ht_node);
|
|
NULLFREE(ecm_cache);
|
|
}
|
|
i = i_next;
|
|
}
|
|
|
|
SAFE_RWLOCK_UNLOCK(&ecm_cache_lock);
|
|
}
|
|
#endif
|
|
|
|
/**
|
|
* maxparallel - Helper: Calculate pending slots array size
|
|
* Formula: round(maxparallel * parallelfactor)
|
|
* parallelfactor <= 0 means no pending slots (zapping disabled)
|
|
*/
|
|
static inline int32_t get_pending_size(struct s_reader *rdr)
|
|
{
|
|
// parallelfactor <= 0 means no pending slots (disabled or not configured)
|
|
if(rdr->parallelfactor <= 0.0f)
|
|
return 0;
|
|
int32_t size = (int32_t)(rdr->maxparallel * rdr->parallelfactor + 0.5f);
|
|
return (size < 1) ? 1 : size;
|
|
}
|
|
|
|
/**
|
|
* maxparallel - Helper: Check if slot is expired
|
|
* Returns timeout threshold in ms, 0 if no timeout check needed
|
|
*/
|
|
static int32_t get_slot_timeout(struct s_parallel_slot *slot, int32_t paralleltimeout)
|
|
{
|
|
if(slot->ecm_interval > 0)
|
|
return slot->ecm_interval + paralleltimeout;
|
|
else
|
|
return 10000 + paralleltimeout; // Default 10s if no interval measured
|
|
}
|
|
|
|
/**
|
|
* maxparallel - Helper: Clear a slot
|
|
*/
|
|
static void clear_slot(struct s_parallel_slot *slot)
|
|
{
|
|
slot->srvid = 0;
|
|
slot->ecm_interval = 0;
|
|
slot->client = NULL;
|
|
memset(&slot->last_ecm, 0, sizeof(struct timeb));
|
|
}
|
|
|
|
/**
|
|
* maxparallel - Helper: Add client+service to blocked list
|
|
* Replaces any existing entry for the same client
|
|
*/
|
|
static void block_client_service(struct s_reader *rdr, struct s_client *client, uint16_t srvid)
|
|
{
|
|
if(!rdr || !client || !rdr->blocked_services)
|
|
return;
|
|
|
|
// Check if client already has a blocked entry - if so, update it
|
|
LL_ITER it = ll_iter_create(rdr->blocked_services);
|
|
struct s_blocked_client *bc;
|
|
while((bc = ll_iter_next(&it)))
|
|
{
|
|
if(bc->client == client)
|
|
{
|
|
bc->srvid = srvid;
|
|
return;
|
|
}
|
|
}
|
|
|
|
// Add new entry
|
|
if(!cs_malloc(&bc, sizeof(struct s_blocked_client)))
|
|
return;
|
|
bc->client = client;
|
|
bc->srvid = srvid;
|
|
ll_append(rdr->blocked_services, bc);
|
|
}
|
|
|
|
/**
|
|
* maxparallel - Helper: Check if client+service is blocked
|
|
* Returns: 1 = blocked, 0 = not blocked
|
|
*/
|
|
static int8_t is_client_blocked(struct s_reader *rdr, struct s_client *client, uint16_t srvid)
|
|
{
|
|
if(!rdr || !client || !rdr->blocked_services)
|
|
return 0;
|
|
|
|
LL_ITER it = ll_iter_create(rdr->blocked_services);
|
|
struct s_blocked_client *bc;
|
|
while((bc = ll_iter_next(&it)))
|
|
{
|
|
if(bc->client == client && bc->srvid == srvid)
|
|
return 1;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* maxparallel - Helper: Remove block for client if zapping to different service
|
|
* Only unblocks if the new service is DIFFERENT from the blocked one
|
|
*/
|
|
static void unblock_client_if_different(struct s_reader *rdr, struct s_client *client, uint16_t new_srvid)
|
|
{
|
|
if(!rdr || !client || !rdr->blocked_services)
|
|
return;
|
|
|
|
LL_ITER it = ll_iter_create(rdr->blocked_services);
|
|
struct s_blocked_client *bc;
|
|
while((bc = ll_iter_next(&it)))
|
|
{
|
|
if(bc->client == client)
|
|
{
|
|
// Only unblock if zapping to a DIFFERENT service
|
|
if(bc->srvid != new_srvid)
|
|
{
|
|
ll_iter_remove_data(&it);
|
|
}
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
|
|
/**
|
|
* maxparallel - Helper: Clear all blocked entries (when active slot becomes free)
|
|
*/
|
|
static void clear_blocked_services(struct s_reader *rdr)
|
|
{
|
|
if(!rdr || !rdr->blocked_services)
|
|
return;
|
|
|
|
ll_clear_data(rdr->blocked_services);
|
|
}
|
|
|
|
/**
|
|
* maxparallel - Cleanup expired slots in both arrays and promote pending
|
|
* - Removes expired slots from parallel_slots (active) and parallel_slots_pending (pending)
|
|
* - Upgrades pending to active when space becomes available (FIFO)
|
|
* - Does NOT drop pending (that's done separately when active services send ECMs)
|
|
* MUST be called with parallel_lock held
|
|
* Returns: count of active slots after cleanup
|
|
*/
|
|
static int32_t reader_cleanup_slots_nolock(struct s_reader *rdr, struct timeb *now)
|
|
{
|
|
if(!rdr || rdr->maxparallel <= 0)
|
|
return 0;
|
|
|
|
int32_t i;
|
|
int32_t active_count = 0;
|
|
int32_t pending_count = 0;
|
|
int32_t free_active = -1; // First empty slot in active array
|
|
|
|
// Pass 1: Clean expired slots in active array, count active
|
|
for(i = 0; i < rdr->maxparallel; i++)
|
|
{
|
|
if(rdr->parallel_slots[i].srvid == 0)
|
|
{
|
|
if(free_active < 0)
|
|
free_active = i;
|
|
continue;
|
|
}
|
|
|
|
int64_t gone = comp_timeb(now, &rdr->parallel_slots[i].last_ecm);
|
|
int32_t timeout = get_slot_timeout(&rdr->parallel_slots[i], rdr->paralleltimeout);
|
|
|
|
if(gone > timeout)
|
|
{
|
|
cs_log_dbg(D_READER, "reader %s: service %04X expired (no ECM for %"PRId64" ms, timeout %d ms)",
|
|
rdr->label, rdr->parallel_slots[i].srvid, gone, timeout);
|
|
clear_slot(&rdr->parallel_slots[i]);
|
|
if(free_active < 0)
|
|
free_active = i;
|
|
}
|
|
else
|
|
{
|
|
active_count++;
|
|
}
|
|
}
|
|
|
|
// Pass 2: Clean expired slots in pending array, count active
|
|
int32_t pending_size = get_pending_size(rdr);
|
|
for(i = 0; i < pending_size; i++)
|
|
{
|
|
if(rdr->parallel_slots_prov[i].srvid == 0)
|
|
continue;
|
|
|
|
int64_t gone = comp_timeb(now, &rdr->parallel_slots_prov[i].last_ecm);
|
|
int32_t timeout = get_slot_timeout(&rdr->parallel_slots_prov[i], rdr->paralleltimeout);
|
|
|
|
if(gone > timeout)
|
|
{
|
|
cs_log_dbg(D_READER, "reader %s: pending service %04X expired (no ECM for %"PRId64" ms, timeout %d ms)",
|
|
rdr->label, rdr->parallel_slots_prov[i].srvid, gone, timeout);
|
|
clear_slot(&rdr->parallel_slots_prov[i]);
|
|
}
|
|
else
|
|
{
|
|
pending_count++;
|
|
}
|
|
}
|
|
|
|
// Pass 3: Upgrade pending to active if space available (FIFO)
|
|
// This handles the zapping case: old service expired, new one can be promoted
|
|
while(active_count < rdr->maxparallel && pending_count > 0)
|
|
{
|
|
// Find oldest pending (largest age = first to arrive = FIFO upgrade)
|
|
int32_t oldest_idx = -1;
|
|
int64_t oldest_time = -1;
|
|
|
|
for(i = 0; i < pending_size; i++)
|
|
{
|
|
if(rdr->parallel_slots_prov[i].srvid != 0)
|
|
{
|
|
int64_t age = comp_timeb(now, &rdr->parallel_slots_prov[i].last_ecm);
|
|
if(oldest_idx < 0 || age > oldest_time)
|
|
{
|
|
oldest_idx = i;
|
|
oldest_time = age;
|
|
}
|
|
}
|
|
}
|
|
|
|
if(oldest_idx < 0)
|
|
break;
|
|
|
|
// Find empty slot in active array
|
|
if(free_active < 0)
|
|
{
|
|
for(i = 0; i < rdr->maxparallel; i++)
|
|
{
|
|
if(rdr->parallel_slots[i].srvid == 0)
|
|
{
|
|
free_active = i;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
if(free_active < 0)
|
|
break; // Should not happen, but safety check
|
|
|
|
// Move pending to active
|
|
rdr->parallel_slots[free_active] = rdr->parallel_slots_prov[oldest_idx];
|
|
clear_slot(&rdr->parallel_slots_prov[oldest_idx]);
|
|
|
|
cs_log_dbg(D_READER, "reader %s: service %04X promoted from pending to active (slot %d)",
|
|
rdr->label, rdr->parallel_slots[free_active].srvid, free_active);
|
|
|
|
active_count++;
|
|
pending_count--;
|
|
free_active = -1; // Need to find next empty slot
|
|
}
|
|
|
|
// If active slots are now available, clear blocked list
|
|
// This allows previously blocked clients to try again
|
|
if(active_count < rdr->maxparallel)
|
|
{
|
|
clear_blocked_services(rdr);
|
|
}
|
|
|
|
return active_count;
|
|
}
|
|
|
|
/**
|
|
* maxparallel - Drop pending services when active array is full (FIFO)
|
|
* Called only when an ACTIVE service receives an ECM, proving it's still active.
|
|
* Drops the OLDEST pending first (first in, first out)
|
|
* This is fair: first to request overload is first to be denied.
|
|
* Dropped clients are added to the blocked list so they fall over to other readers.
|
|
* MUST be called with parallel_lock held
|
|
*/
|
|
static void reader_drop_pending_nolock(struct s_reader *rdr, struct timeb *now)
|
|
{
|
|
if(!rdr || rdr->maxparallel <= 0)
|
|
return;
|
|
|
|
// Count active and pending
|
|
int32_t i;
|
|
int32_t active_count = 0;
|
|
int32_t pending_count = 0;
|
|
int32_t pending_size = get_pending_size(rdr);
|
|
|
|
for(i = 0; i < rdr->maxparallel; i++)
|
|
{
|
|
if(rdr->parallel_slots[i].srvid != 0)
|
|
active_count++;
|
|
}
|
|
for(i = 0; i < pending_size; i++)
|
|
{
|
|
if(rdr->parallel_slots_prov[i].srvid != 0)
|
|
pending_count++;
|
|
}
|
|
|
|
// Drop pending if active array is full (FIFO - oldest dropped first)
|
|
while(active_count >= rdr->maxparallel && pending_count > 0)
|
|
{
|
|
// Find oldest pending (largest age = first to arrive = drop first)
|
|
int32_t oldest_idx = -1;
|
|
int64_t oldest_time = -1;
|
|
|
|
for(i = 0; i < pending_size; i++)
|
|
{
|
|
if(rdr->parallel_slots_prov[i].srvid != 0)
|
|
{
|
|
int64_t age = comp_timeb(now, &rdr->parallel_slots_prov[i].last_ecm);
|
|
if(oldest_idx < 0 || age > oldest_time)
|
|
{
|
|
oldest_idx = i;
|
|
oldest_time = age;
|
|
}
|
|
}
|
|
}
|
|
|
|
if(oldest_idx < 0)
|
|
break;
|
|
|
|
// Add client to blocked list before clearing slot
|
|
struct s_parallel_slot *slot = &rdr->parallel_slots_prov[oldest_idx];
|
|
if(slot->client)
|
|
{
|
|
block_client_service(rdr, slot->client, slot->srvid);
|
|
}
|
|
|
|
cs_log("reader %s: dropped pending service %04X (%.3f sec old, active services still running)",
|
|
rdr->label, slot->srvid, (float)oldest_time / 1000);
|
|
clear_slot(slot);
|
|
pending_count--;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* maxparallel - Check if reader has capacity for a service (does NOT reserve slot)
|
|
* Used during ECM request to decide if reader should be tried.
|
|
* Also checks if client+service is blocked (was dropped earlier).
|
|
* Returns: 1 = has capacity (or service already registered), 0 = full or blocked (skip reader)
|
|
*/
|
|
static int8_t reader_has_capacity(struct s_reader *rdr, uint16_t srvid, struct s_client *client)
|
|
{
|
|
if(!rdr || rdr->maxparallel <= 0)
|
|
return 1; // unlimited
|
|
|
|
cs_readlock(__func__, &rdr->parallel_lock);
|
|
|
|
struct timeb now;
|
|
cs_ftime(&now);
|
|
|
|
int32_t i;
|
|
int32_t pending_size = get_pending_size(rdr);
|
|
int8_t result = 0;
|
|
|
|
// Check if client+service is blocked (was dropped earlier)
|
|
if(is_client_blocked(rdr, client, srvid))
|
|
{
|
|
result = 0;
|
|
goto done;
|
|
}
|
|
|
|
// Check if service already registered in active array
|
|
for(i = 0; i < rdr->maxparallel; i++)
|
|
{
|
|
if(rdr->parallel_slots[i].srvid == srvid)
|
|
{
|
|
result = 1;
|
|
goto done;
|
|
}
|
|
}
|
|
|
|
// Check if service already registered in pending array
|
|
for(i = 0; i < pending_size; i++)
|
|
{
|
|
if(rdr->parallel_slots_prov[i].srvid == srvid)
|
|
{
|
|
result = 1;
|
|
goto done;
|
|
}
|
|
}
|
|
|
|
// Count active slots (with expiry check)
|
|
int32_t active_count = 0;
|
|
for(i = 0; i < rdr->maxparallel; i++)
|
|
{
|
|
if(rdr->parallel_slots[i].srvid != 0)
|
|
{
|
|
int64_t gone = comp_timeb(&now, &rdr->parallel_slots[i].last_ecm);
|
|
int32_t timeout = get_slot_timeout(&rdr->parallel_slots[i], rdr->paralleltimeout);
|
|
if(gone <= timeout)
|
|
active_count++;
|
|
}
|
|
}
|
|
|
|
// Space in active array?
|
|
if(active_count < rdr->maxparallel)
|
|
{
|
|
result = 1;
|
|
goto done;
|
|
}
|
|
|
|
// Space in pending array?
|
|
if(pending_size > 0)
|
|
{
|
|
int32_t pending_count = 0;
|
|
for(i = 0; i < pending_size; i++)
|
|
{
|
|
if(rdr->parallel_slots_prov[i].srvid != 0)
|
|
{
|
|
int64_t gone = comp_timeb(&now, &rdr->parallel_slots_prov[i].last_ecm);
|
|
int32_t timeout = get_slot_timeout(&rdr->parallel_slots_prov[i], rdr->paralleltimeout);
|
|
if(gone <= timeout)
|
|
pending_count++;
|
|
}
|
|
}
|
|
if(pending_count < pending_size)
|
|
{
|
|
result = 1;
|
|
goto done;
|
|
}
|
|
}
|
|
|
|
// No capacity available
|
|
result = 0;
|
|
|
|
done:
|
|
cs_readunlock(__func__, &rdr->parallel_lock);
|
|
return result;
|
|
}
|
|
|
|
/**
|
|
* maxparallel - Register a service on a reader (called when CW is found)
|
|
* Uses dual-array architecture:
|
|
* - parallel_slots: active services (the limit)
|
|
* - parallel_slots_pending: pending services during zapping
|
|
* New services go to active if space available, otherwise pending.
|
|
* Returns: 1 = registered, 0 = failed
|
|
*/
|
|
static int8_t reader_register_service(struct s_reader *rdr, uint16_t srvid, struct s_client *client)
|
|
{
|
|
if(!rdr || rdr->maxparallel <= 0)
|
|
return 1; // unlimited, always OK
|
|
|
|
cs_writelock(__func__, &rdr->parallel_lock);
|
|
|
|
// If client is registering a DIFFERENT service, unblock them
|
|
// (they zapped away from the blocked service)
|
|
unblock_client_if_different(rdr, client, srvid);
|
|
|
|
struct timeb now;
|
|
cs_ftime(&now);
|
|
|
|
// Cleanup expired slots and handle promotions/drops
|
|
int32_t active_count = reader_cleanup_slots_nolock(rdr, &now);
|
|
|
|
int32_t i;
|
|
int32_t pending_size = get_pending_size(rdr);
|
|
|
|
// Search for existing slot in both arrays
|
|
int32_t existing_active = -1;
|
|
int32_t existing_pending = -1;
|
|
int32_t free_active = -1;
|
|
int32_t free_pending = -1;
|
|
|
|
// Search active array
|
|
for(i = 0; i < rdr->maxparallel; i++)
|
|
{
|
|
if(rdr->parallel_slots[i].srvid == srvid)
|
|
existing_active = i;
|
|
else if(free_active < 0 && rdr->parallel_slots[i].srvid == 0)
|
|
free_active = i;
|
|
}
|
|
|
|
// Search pending array
|
|
for(i = 0; i < pending_size; i++)
|
|
{
|
|
if(rdr->parallel_slots_prov[i].srvid == srvid)
|
|
existing_pending = i;
|
|
else if(free_pending < 0 && rdr->parallel_slots_prov[i].srvid == 0)
|
|
free_pending = i;
|
|
}
|
|
|
|
struct s_parallel_slot *slot = NULL;
|
|
int8_t is_new = 0;
|
|
int8_t is_pending = 0;
|
|
int32_t slot_idx = -1;
|
|
const char *array_name = "active";
|
|
|
|
if(existing_active >= 0)
|
|
{
|
|
// Already in active array - just update
|
|
slot = &rdr->parallel_slots[existing_active];
|
|
slot_idx = existing_active;
|
|
is_new = 0;
|
|
}
|
|
else if(existing_pending >= 0)
|
|
{
|
|
// Already in pending array - update there
|
|
slot = &rdr->parallel_slots_prov[existing_pending];
|
|
slot_idx = existing_pending;
|
|
is_new = 0;
|
|
is_pending = 1;
|
|
array_name = "pending";
|
|
}
|
|
else if(active_count < rdr->maxparallel && free_active >= 0)
|
|
{
|
|
// New service, space in active array
|
|
slot = &rdr->parallel_slots[free_active];
|
|
slot_idx = free_active;
|
|
slot->srvid = srvid;
|
|
slot->ecm_interval = 0;
|
|
slot->client = client;
|
|
is_new = 1;
|
|
}
|
|
else if(free_pending >= 0)
|
|
{
|
|
// New service, active full -> put in pending
|
|
slot = &rdr->parallel_slots_prov[free_pending];
|
|
slot_idx = free_pending;
|
|
slot->srvid = srvid;
|
|
slot->ecm_interval = 0;
|
|
slot->client = client;
|
|
is_new = 1;
|
|
is_pending = 1;
|
|
array_name = "pending";
|
|
}
|
|
else
|
|
{
|
|
// No slot available in either array
|
|
cs_writeunlock(__func__, &rdr->parallel_lock);
|
|
cs_log("reader %s: no slot available for service %04X (all %d+%d slots used)",
|
|
rdr->label, srvid, rdr->maxparallel, pending_size);
|
|
return 0;
|
|
}
|
|
|
|
// Update interval for existing slots
|
|
if(!is_new && slot->last_ecm.time > 0)
|
|
{
|
|
int64_t interval = comp_timeb(&now, &slot->last_ecm);
|
|
if(interval > 0 && interval < 30000) // Sanity: < 30 seconds
|
|
{
|
|
if(slot->ecm_interval == 0)
|
|
slot->ecm_interval = (int32_t)interval;
|
|
else
|
|
slot->ecm_interval = (slot->ecm_interval + (int32_t)interval) / 2;
|
|
}
|
|
}
|
|
|
|
slot->last_ecm = now;
|
|
|
|
// If this is an ACTIVE service (not pending), drop pending if needed
|
|
// This ensures pending are only dropped when active services prove they're still active
|
|
if(!is_pending)
|
|
{
|
|
reader_drop_pending_nolock(rdr, &now);
|
|
}
|
|
|
|
// Recount for logging (drop may have changed counts)
|
|
int32_t final_active = 0, final_pending = 0;
|
|
for(i = 0; i < rdr->maxparallel; i++)
|
|
{
|
|
if(rdr->parallel_slots[i].srvid != 0)
|
|
final_active++;
|
|
}
|
|
for(i = 0; i < pending_size; i++)
|
|
{
|
|
if(rdr->parallel_slots_prov[i].srvid != 0)
|
|
final_pending++;
|
|
}
|
|
|
|
if(is_new)
|
|
{
|
|
if(final_pending > 0)
|
|
{
|
|
cs_log_dbg(D_READER, "reader %s: registered service %04X in %s slot %d (%d/%d active, +%d pending)",
|
|
rdr->label, srvid, array_name, slot_idx, final_active, rdr->maxparallel, final_pending);
|
|
}
|
|
else
|
|
{
|
|
cs_log_dbg(D_READER, "reader %s: registered service %04X in %s slot %d (%d/%d active)",
|
|
rdr->label, srvid, array_name, slot_idx, final_active, rdr->maxparallel);
|
|
}
|
|
|
|
// Log "now full" only once per state change, and only for active services
|
|
if(!is_pending && final_active >= rdr->maxparallel && !rdr->parallel_full)
|
|
{
|
|
rdr->parallel_full = 1;
|
|
cs_log("reader %s: now full (%d/%d active)",
|
|
rdr->label, final_active, rdr->maxparallel);
|
|
}
|
|
}
|
|
|
|
// Reset full flag when capacity becomes available
|
|
if(final_active < rdr->maxparallel && rdr->parallel_full)
|
|
{
|
|
rdr->parallel_full = 0;
|
|
cs_log_dbg(D_READER, "reader %s: capacity available (%d/%d active)",
|
|
rdr->label, final_active, rdr->maxparallel);
|
|
}
|
|
|
|
cs_writeunlock(__func__, &rdr->parallel_lock);
|
|
return 1;
|
|
}
|
|
|
|
void fallback_timeout(ECM_REQUEST *er)
|
|
{
|
|
if(er->rc >= E_UNHANDLED && er->stage < 4)
|
|
{
|
|
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} fallback timeout! (stage: %d)", (check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid, er->stage);
|
|
debug_ecm(D_TRACE, "fallback for %s %s", username(er->client), buf);
|
|
while(er->stage < 4) // if preferlocalcards=1 and no answer from locals, initial stage will be 2! We need to reach stage=4 to call fallback's.
|
|
{
|
|
request_cw_from_readers(er, 0);
|
|
}
|
|
}
|
|
}
|
|
|
|
void ecm_timeout(ECM_REQUEST *er)
|
|
{
|
|
if(!er->readers_timeout_check)
|
|
{
|
|
er->readers_timeout_check = 1;
|
|
|
|
if(check_client(er->client) && er->rc >= E_UNHANDLED)
|
|
{
|
|
debug_ecm(D_TRACE, "timeout for %s %s", username(er->client), buf);
|
|
|
|
// set timeout for readers not answering
|
|
struct s_ecm_answer *ea_list;
|
|
for(ea_list = er->matching_rdr; ea_list; ea_list = ea_list->next)
|
|
{
|
|
if((ea_list->status & (REQUEST_SENT | REQUEST_ANSWERED)) == REQUEST_SENT) // Request sent, but no answer!
|
|
{
|
|
write_ecm_answer(ea_list->reader, er, E_TIMEOUT, 0, NULL, NULL, 0, NULL); // set timeout for readers not answered!
|
|
}
|
|
}
|
|
|
|
// send timeout to client!
|
|
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} client timeout! ", (check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid);
|
|
er->rc = E_TIMEOUT;
|
|
er->rcEx = 0;
|
|
send_dcw(er->client, er);
|
|
}
|
|
}
|
|
}
|
|
|
|
void increment_n_request(struct s_client *cl)
|
|
{
|
|
if(check_client(cl))
|
|
{
|
|
cl->n_request[1]++;
|
|
first_client->n_request[1]++;
|
|
}
|
|
}
|
|
|
|
uint8_t checkCWpart(uint8_t *cw, int8_t part)
|
|
{
|
|
uint8_t eo = part ? 8 : 0;
|
|
int8_t i;
|
|
for(i = 0; i < 8; i++)
|
|
if(cw[i + eo]) { return 1; }
|
|
return 0;
|
|
}
|
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
// ===== CW AUTODETECT PATCH WITH LEARN THRESHOLD (48 + 64 LEARNING) =====
|
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
|
|
#define CW_LEARN_THRESHOLD 5
|
|
|
|
static int cw_autodetect_and_block(struct s_reader *reader, ECM_REQUEST *er)
|
|
{
|
|
if(!er || !er->cw)
|
|
return 1;
|
|
|
|
cw_detect_entry *e = cw_detect_get(er);
|
|
|
|
const char *reader_name =
|
|
(reader && reader->label) ? reader->label : "?";
|
|
|
|
int checksum_ok = cw_checksum_ok(er->cw);
|
|
|
|
int whitelist =
|
|
chk_if_ignore_checksum(er, &cfg.disablecrccws_only_for) ||
|
|
(reader && chk_if_ignore_checksum(er, &reader->disablecrccws_only_for));
|
|
|
|
/////////////////////////////////////////////////////////////
|
|
// STRICT MODE (NOT WHITELIST) → ONLY 48bit ALLOWED
|
|
/////////////////////////////////////////////////////////////
|
|
|
|
if(!whitelist)
|
|
{
|
|
if(!e->strict_logged)
|
|
{
|
|
cs_log("AUTODETECT STRICT 48bit MODE reader=%s (%04X@%06X/%04X)",
|
|
reader_name, er->caid, er->prid, er->srvid);
|
|
|
|
e->strict_logged = 1;
|
|
}
|
|
|
|
// block 64bit
|
|
if(!checksum_ok)
|
|
{
|
|
cs_log("AUTODETECT BLOCK 64bit reader=%s (%04X@%06X/%04X)",
|
|
reader_name, er->caid, er->prid, er->srvid);
|
|
|
|
return 0;
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
|
|
/////////////////////////////////////////////////////////////
|
|
// WHITELIST MODE → LEARN 48bit AND 64bit SEPARATELY
|
|
/////////////////////////////////////////////////////////////
|
|
|
|
// ---------- LEARN 64bit ----------
|
|
if(!checksum_ok)
|
|
{
|
|
if(!e->is64)
|
|
{
|
|
if(e->learn64_count < CW_LEARN_THRESHOLD)
|
|
{
|
|
e->learn64_count++;
|
|
|
|
cs_log("AUTODETECT LEARN 64bit STEP %d/%d reader=%s (%04X@%06X/%04X)",
|
|
e->learn64_count,
|
|
CW_LEARN_THRESHOLD,
|
|
reader_name,
|
|
er->caid,
|
|
er->prid,
|
|
er->srvid);
|
|
}
|
|
|
|
if(e->learn64_count >= CW_LEARN_THRESHOLD)
|
|
{
|
|
e->is64 = 1;
|
|
|
|
cs_log("AUTODETECT LOCKED 64bit reader=%s (%04X@%06X/%04X)",
|
|
reader_name,
|
|
er->caid,
|
|
er->prid,
|
|
er->srvid);
|
|
}
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
|
|
// ---------- LEARN 48bit ----------
|
|
if(checksum_ok)
|
|
{
|
|
if(!e->is48)
|
|
{
|
|
if(e->learn48_count < CW_LEARN_THRESHOLD)
|
|
{
|
|
e->learn48_count++;
|
|
|
|
cs_log("AUTODETECT LEARN 48bit STEP %d/%d reader=%s (%04X@%06X/%04X)",
|
|
e->learn48_count,
|
|
CW_LEARN_THRESHOLD,
|
|
reader_name,
|
|
er->caid,
|
|
er->prid,
|
|
er->srvid);
|
|
}
|
|
|
|
if(e->learn48_count >= CW_LEARN_THRESHOLD)
|
|
{
|
|
e->is48 = 1;
|
|
|
|
cs_log("AUTODETECT LOCKED 48bit reader=%s (%04X@%06X/%04X)",
|
|
reader_name,
|
|
er->caid,
|
|
er->prid,
|
|
er->srvid);
|
|
}
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
void update_n_request(void)
|
|
{
|
|
struct s_client *cl;
|
|
|
|
cs_readlock(__func__, &clientlist_lock);
|
|
for(cl = first_client->next; cl; cl = cl->next)
|
|
{
|
|
#ifdef CS_CACHEEX
|
|
if(check_client(cl) && get_module(cl)->num != R_CSP && cl->typ == 'c' && !cl->dup && cl->account && cl->account->cacheex.mode<=1) //no cacheex 2/3 client
|
|
#else
|
|
if(check_client(cl) && get_module(cl)->num != R_CSP && cl->typ == 'c' && !cl->dup)
|
|
#endif
|
|
{
|
|
cl->n_request[0] = cl->n_request[1];
|
|
cl->n_request[1] = 0;
|
|
}
|
|
else
|
|
{
|
|
cl->n_request[0] = 0;
|
|
cl->n_request[1] = 0;
|
|
}
|
|
}
|
|
|
|
first_client->n_request[0] = first_client->n_request[1];
|
|
first_client->n_request[1] = 0;
|
|
|
|
cs_readunlock(__func__, &clientlist_lock);
|
|
}
|
|
|
|
static void *cw_process(void)
|
|
{
|
|
set_thread_name(__func__);
|
|
int64_t time_to_check_fbtimeout, time_to_check_ctimeout, next_check, n_request_next;
|
|
struct timeb t_now, tbc, ecmc_time, cache_time, n_request_time;
|
|
ECM_REQUEST *er = NULL;
|
|
time_t ecm_maxcachetime;
|
|
|
|
#ifdef CS_CACHEEX
|
|
int64_t time_to_check_cacheex_wait_time;
|
|
int64_t time_to_check_cacheex_mode1_delay;
|
|
#endif
|
|
|
|
cs_pthread_cond_init(__func__, &cw_process_sleep_cond_mutex, &cw_process_sleep_cond);
|
|
|
|
#ifdef CS_ANTICASC
|
|
int32_t ac_next;
|
|
struct timeb ac_time;
|
|
cs_ftime(&ac_time);
|
|
add_ms_to_timeb(&ac_time, cfg.ac_stime * 60 * 1000);
|
|
#endif
|
|
|
|
cs_ftime(&ecmc_time);
|
|
add_ms_to_timeb(&ecmc_time, 1000);
|
|
cs_ftime(&cache_time);
|
|
add_ms_to_timeb(&cache_time, 3000);
|
|
cs_ftime(&n_request_time);
|
|
add_ms_to_timeb(&n_request_time, 60 * 1000);
|
|
|
|
while(!exit_oscam)
|
|
{
|
|
if(cw_process_wakeups == 0) // No waiting wakeups, proceed to sleep
|
|
{
|
|
sleepms_on_cond(__func__, &cw_process_sleep_cond_mutex, &cw_process_sleep_cond, msec_wait);
|
|
}
|
|
cw_process_wakeups = 0; // We've been woken up, reset the counter
|
|
if(exit_oscam)
|
|
{ break; }
|
|
|
|
next_check = 0;
|
|
#ifdef CS_ANTICASC
|
|
ac_next = 0;
|
|
#endif
|
|
ecmc_next = 0;
|
|
cache_next = 0;
|
|
msec_wait = 0;
|
|
|
|
cs_ftime(&t_now);
|
|
cs_readlock(__func__, &ecmcache_lock);
|
|
for(er = ecmcwcache; er; er = er->next)
|
|
{
|
|
|
|
if((er->from_cacheex || er->from_csp) // ignore ecms from cacheex/csp
|
|
|| er->readers_timeout_check // ignore already checked
|
|
|| !check_client(er->client)) // ignore ecm of killed clients
|
|
{
|
|
continue;
|
|
}
|
|
|
|
if(er->rc >= E_UNHANDLED)
|
|
{
|
|
#ifdef CS_CACHEEX
|
|
// cacheex_wait_time
|
|
if(er->cacheex_wait_time && !er->cacheex_wait_time_expired)
|
|
{
|
|
tbc = er->tps;
|
|
time_to_check_cacheex_mode1_delay = 0;
|
|
time_to_check_cacheex_wait_time = add_ms_to_timeb_diff(&tbc, lb_auto_timeout(er, er->cacheex_wait_time));
|
|
|
|
if(comp_timeb(&t_now, &tbc) >= 0)
|
|
{
|
|
add_job(er->client, ACTION_CACHEEX_TIMEOUT, (void *)er, 0);
|
|
time_to_check_cacheex_wait_time = 0;
|
|
|
|
}
|
|
else if(er->cacheex_mode1_delay && !er->stage && er->cacheex_reader_count>0)
|
|
{
|
|
// check for cacheex_mode1_delay
|
|
tbc = er->tps;
|
|
time_to_check_cacheex_mode1_delay = add_ms_to_timeb_diff(&tbc, lb_auto_timeout(er, er->cacheex_mode1_delay));
|
|
|
|
if(comp_timeb(&t_now, &tbc) >= 0)
|
|
{
|
|
add_job(er->client, ACTION_CACHEEX1_DELAY, (void *)er, 0);
|
|
time_to_check_cacheex_mode1_delay = 0;
|
|
}
|
|
}
|
|
|
|
if(!next_check || (time_to_check_cacheex_wait_time > 0 && time_to_check_cacheex_wait_time < next_check))
|
|
{ next_check = time_to_check_cacheex_wait_time; }
|
|
|
|
if(!next_check || (time_to_check_cacheex_mode1_delay > 0 && time_to_check_cacheex_mode1_delay < next_check))
|
|
{ next_check = time_to_check_cacheex_mode1_delay; }
|
|
}
|
|
#endif
|
|
if(er->stage < 4)
|
|
{
|
|
// fbtimeout
|
|
tbc = er->tps;
|
|
time_to_check_fbtimeout = add_ms_to_timeb_diff(&tbc, lb_auto_timeout(er, get_fallbacktimeout(er->caid)));
|
|
|
|
if(comp_timeb(&t_now, &tbc) >= 0)
|
|
{
|
|
add_job(er->client, ACTION_FALLBACK_TIMEOUT, (void *)er, 0);
|
|
time_to_check_fbtimeout = 0;
|
|
}
|
|
|
|
if(!next_check || (time_to_check_fbtimeout > 0 && time_to_check_fbtimeout < next_check))
|
|
{ next_check = time_to_check_fbtimeout; }
|
|
}
|
|
}
|
|
|
|
// clienttimeout
|
|
if(!er->readers_timeout_check) // ecm stays in cache at least ctimeout+2seconds!
|
|
{
|
|
tbc = er->tps;
|
|
time_to_check_ctimeout = add_ms_to_timeb_diff(&tbc, lb_auto_timeout(er, cfg.ctimeout));
|
|
|
|
if(comp_timeb(&t_now, &tbc) >= 0)
|
|
{
|
|
add_job(er->client, ACTION_CLIENT_TIMEOUT, (void *)er, 0);
|
|
time_to_check_ctimeout = 0;
|
|
}
|
|
|
|
if(!next_check || (time_to_check_ctimeout > 0 && time_to_check_ctimeout < next_check))
|
|
{ next_check = time_to_check_ctimeout; }
|
|
}
|
|
}
|
|
cs_readunlock(__func__, &ecmcache_lock);
|
|
#ifdef CS_ANTICASC
|
|
if(cfg.ac_enabled && (ac_next = comp_timeb(&ac_time, &t_now)) <= 10)
|
|
{
|
|
ac_do_stat();
|
|
cs_ftime(&ac_time);
|
|
ac_next = add_ms_to_timeb_diff(&ac_time, cfg.ac_stime * 60 * 1000);
|
|
}
|
|
#endif
|
|
if((ecmc_next = comp_timeb(&ecmc_time, &t_now)) <= 10)
|
|
{
|
|
uint32_t count = 0;
|
|
struct ecm_request_t *ecm, *ecmt = NULL, *prv;
|
|
|
|
cs_readlock(__func__, &ecmcache_lock);
|
|
for(ecm = ecmcwcache, prv = NULL; ecm; prv = ecm, ecm = ecm->next, count++)
|
|
{
|
|
ecm_maxcachetime = t_now.time - ((cfg.ctimeout + 500) / 1000 + 3); // to be sure no more access er!
|
|
|
|
if(ecm->tps.time < ecm_maxcachetime)
|
|
{
|
|
cs_readunlock(__func__, &ecmcache_lock);
|
|
cs_writelock(__func__, &ecmcache_lock);
|
|
ecmt = ecm;
|
|
if(prv)
|
|
{ prv->next = NULL; }
|
|
else
|
|
{ ecmcwcache = NULL; }
|
|
cs_writeunlock(__func__, &ecmcache_lock);
|
|
break;
|
|
}
|
|
}
|
|
if(!ecmt)
|
|
{ cs_readunlock(__func__, &ecmcache_lock); }
|
|
ecmcwcache_size = count;
|
|
|
|
while(ecmt)
|
|
{
|
|
ecm = ecmt->next;
|
|
free_ecm(ecmt);
|
|
ecmt = ecm;
|
|
}
|
|
|
|
#ifdef CS_CACHEEX
|
|
ecmt=NULL;
|
|
cs_readlock(__func__, &ecm_pushed_deleted_lock);
|
|
for(ecm = ecm_pushed_deleted, prv = NULL; ecm; prv = ecm, ecm = ecm->next)
|
|
{
|
|
ecm_maxcachetime = t_now.time - ((cfg.ctimeout + 500) / 1000 + 3);
|
|
if(ecm->tps.time < ecm_maxcachetime)
|
|
{
|
|
cs_readunlock(__func__, &ecm_pushed_deleted_lock);
|
|
cs_writelock(__func__, &ecm_pushed_deleted_lock);
|
|
ecmt = ecm;
|
|
if(prv)
|
|
{ prv->next = NULL; }
|
|
else
|
|
{ ecm_pushed_deleted = NULL; }
|
|
cs_writeunlock(__func__, &ecm_pushed_deleted_lock);
|
|
break;
|
|
}
|
|
}
|
|
if(!ecmt)
|
|
{ cs_readunlock(__func__, &ecm_pushed_deleted_lock); }
|
|
|
|
while(ecmt)
|
|
{
|
|
ecm = ecmt->next;
|
|
free_push_in_ecm(ecmt);
|
|
ecmt = ecm;
|
|
}
|
|
#endif
|
|
|
|
cs_ftime(&ecmc_time);
|
|
ecmc_next = add_ms_to_timeb_diff(&ecmc_time, 1000);
|
|
}
|
|
|
|
if((cache_next = comp_timeb(&cache_time, &t_now)) <= 10)
|
|
{
|
|
cleanup_cache(false);
|
|
cacheex_cleanup_hitcache(false);
|
|
|
|
cs_ftime(&cache_time);
|
|
cache_next = add_ms_to_timeb_diff(&cache_time, 3000);
|
|
}
|
|
|
|
if((n_request_next = comp_timeb(&n_request_time, &t_now)) <= 10)
|
|
{
|
|
update_n_request();
|
|
cs_ftime(&n_request_time);
|
|
n_request_next = add_ms_to_timeb_diff(&n_request_time, 60 * 1000);
|
|
}
|
|
|
|
msec_wait = next_check;
|
|
#ifdef CS_ANTICASC
|
|
if(!msec_wait || (ac_next > 0 && ac_next < msec_wait))
|
|
{ msec_wait = ac_next; }
|
|
#endif
|
|
if(!msec_wait || (ecmc_next > 0 && ecmc_next < msec_wait))
|
|
{ msec_wait = ecmc_next; }
|
|
|
|
if(!msec_wait || (cache_next > 0 && cache_next < msec_wait))
|
|
{ msec_wait = cache_next; }
|
|
|
|
if(!msec_wait || (n_request_next > 0 && n_request_next < msec_wait))
|
|
{ msec_wait = n_request_next; }
|
|
|
|
if(!msec_wait)
|
|
{ msec_wait = 3000; }
|
|
|
|
cleanupcwcycle();
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
void cw_process_thread_start(void)
|
|
{
|
|
start_thread("cw_process", (void *) &cw_process, NULL, NULL, 1, 1);
|
|
}
|
|
|
|
void cw_process_thread_wakeup(void)
|
|
{
|
|
cw_process_wakeups++; // Do not sleep...
|
|
SAFE_COND_SIGNAL(&cw_process_sleep_cond);
|
|
}
|
|
|
|
void convert_to_beta(struct s_client *cl, ECM_REQUEST *er, uint16_t caidto)
|
|
{
|
|
static uint8_t headerN3[10] = { 0xc7, 0x00, 0x00, 0x00, 0x01, 0x10, 0x10, 0x00, 0x87, 0x12 };
|
|
static uint8_t headerN2[10] = { 0xc9, 0x00, 0x00, 0x00, 0x01, 0x10, 0x10, 0x00, 0x48, 0x12 };
|
|
|
|
er->ocaid = er->caid;
|
|
er->caid = caidto;
|
|
er->prid = 0;
|
|
er->ecmlen = er->ecm[2] + 3;
|
|
|
|
memmove(er->ecm + 13, er->ecm + 3, er->ecmlen - 3);
|
|
|
|
if(er->ecmlen > 0x88)
|
|
{
|
|
memcpy(er->ecm + 3, headerN3, 10);
|
|
if(er->ecm[0] == 0x81)
|
|
{ er->ecm[12] += 1; }
|
|
er->ecm[1] = 0x70;
|
|
}
|
|
else
|
|
{
|
|
memcpy(er->ecm + 3, headerN2, 10);
|
|
}
|
|
|
|
er->ecmlen += 10;
|
|
er->ecm[2] = er->ecmlen - 3;
|
|
er->btun = 1;
|
|
|
|
cl->cwtun++;
|
|
cl->account->cwtun++;
|
|
first_client->cwtun++;
|
|
|
|
cs_log_dbg(D_TRACE, "ECM converted ocaid from 0x%04X to BetaCrypt caid 0x%04X for service id 0x%04X",
|
|
er->ocaid, caidto, er->srvid);
|
|
}
|
|
|
|
void convert_to_nagra(struct s_client *cl, ECM_REQUEST *er, uint16_t caidto)
|
|
{
|
|
cs_log_dbg(D_TRACE, "convert_to_nagra");
|
|
er->ocaid = er->caid;
|
|
er->caid = caidto;
|
|
er->prid = 0;
|
|
er->ecmlen = er->ecm[2] + 3;
|
|
|
|
// not sure
|
|
if(er->ecmlen < 0x52)
|
|
{ er->ecm[1] = 0x30; }
|
|
|
|
memmove(er->ecm + 3, er->ecm + 13, er->ecmlen - 3);
|
|
|
|
er->ecmlen -= 10;
|
|
er->ecm[2] = er->ecmlen - 3;
|
|
er->btun = 1;
|
|
|
|
cl->cwtun++;
|
|
cl->account->cwtun++;
|
|
first_client->cwtun++;
|
|
|
|
cs_log_dbg(D_TRACE, "ECM converted ocaid from: 0x%04X to Nagra: 0x04%X for service id:0x04%X",
|
|
er->ocaid, caidto, er->srvid);
|
|
}
|
|
|
|
void cs_betatunnel(ECM_REQUEST *er)
|
|
{
|
|
int32_t i;
|
|
struct s_client *cl = cur_client();
|
|
uint32_t mask_all = 0xFFFF;
|
|
TUNTAB *ttab = &cl->ttab;
|
|
|
|
for(i = 0; i < ttab->ttnum; i++)
|
|
{
|
|
if((er->caid == ttab->ttdata[i].bt_caidfrom) && ((er->srvid == ttab->ttdata[i].bt_srvid) || (ttab->ttdata[i].bt_srvid) == mask_all))
|
|
{
|
|
if(chk_is_betatunnel_caid(er->caid) == 1 && er->ocaid == 0x0000)
|
|
{
|
|
convert_to_nagra(cl, er, ttab->ttdata[i].bt_caidto);
|
|
}
|
|
else if(er->ocaid == 0x0000)
|
|
{
|
|
convert_to_beta(cl, er, ttab->ttdata[i].bt_caidto);
|
|
}
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
|
|
static void remove_ecm_from_reader(ECM_REQUEST *ecm)
|
|
{
|
|
int32_t i;
|
|
struct s_ecm_answer *ea = ecm->matching_rdr;
|
|
while(ea)
|
|
{
|
|
if((ea->status & REQUEST_SENT) && !(ea->status & REQUEST_ANSWERED))
|
|
{
|
|
// we found a outstanding reader, clean it:
|
|
struct s_reader *rdr = ea->reader;
|
|
if(rdr)
|
|
{
|
|
struct s_client *cl = rdr->client;
|
|
if(check_client(cl))
|
|
{
|
|
ECM_REQUEST *ecmtask = cl->ecmtask;
|
|
if(ecmtask)
|
|
{
|
|
for(i = 0; i < cfg.max_pending; ++i)
|
|
{
|
|
if(ecmtask[i].parent == ecm)
|
|
{
|
|
ecmtask[i].client = NULL;
|
|
cacheex_set_csp_lastnode(&ecmtask[i]);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
ea = ea->next;
|
|
}
|
|
}
|
|
|
|
void free_ecm(ECM_REQUEST *ecm)
|
|
{
|
|
struct s_ecm_answer *ea, *nxt;
|
|
cacheex_free_csp_lastnodes(ecm);
|
|
gbox_free_cards_pending(ecm);
|
|
// remove this ecm from reader queue to avoid segfault on very late answers (when ecm is already disposed)
|
|
// first check for outstanding answers:
|
|
remove_ecm_from_reader(ecm);
|
|
// free matching_rdr list:
|
|
ea = ecm->matching_rdr;
|
|
ecm->matching_rdr = NULL;
|
|
while(ea)
|
|
{
|
|
nxt = ea->next;
|
|
cs_lock_destroy(__func__, &ea->ecmanswer_lock);
|
|
add_garbage(ea);
|
|
ea = nxt;
|
|
}
|
|
if(ecm->src_data)
|
|
{ add_garbage(ecm->src_data); }
|
|
add_garbage(ecm);
|
|
}
|
|
|
|
|
|
void free_push_in_ecm(ECM_REQUEST *ecm)
|
|
{
|
|
cacheex_free_csp_lastnodes(ecm);
|
|
gbox_free_cards_pending(ecm);
|
|
if(ecm->src_data)
|
|
{ NULLFREE(ecm->src_data); }
|
|
NULLFREE(ecm);
|
|
}
|
|
|
|
ECM_REQUEST *get_ecmtask(void)
|
|
{
|
|
ECM_REQUEST *er = NULL;
|
|
struct s_client *cl = cur_client();
|
|
if(!cl)
|
|
{ return NULL; }
|
|
if(!cs_malloc(&er, sizeof(ECM_REQUEST)))
|
|
{ return NULL; }
|
|
cs_ftime(&er->tps);
|
|
er->rc = E_UNHANDLED;
|
|
er->client = cl;
|
|
er->grp = 0; // no readers/cacheex-clients answers yet
|
|
//cs_log("client %s ECMTASK %d module %s", username(cl), n, get_module(cl)->desc);
|
|
return er;
|
|
}
|
|
|
|
void cleanup_ecmtasks(struct s_client *cl)
|
|
{
|
|
if(!cl) { return; }
|
|
|
|
ECM_REQUEST *ecm;
|
|
|
|
// remove this clients ecm from queue. because of cache, just null the client:
|
|
cs_readlock(__func__, &ecmcache_lock);
|
|
for(ecm = ecmcwcache; ecm && cl; ecm = ecm->next)
|
|
{
|
|
if(ecm->client == cl)
|
|
{
|
|
ecm->client = NULL;
|
|
}
|
|
}
|
|
cs_readunlock(__func__, &ecmcache_lock);
|
|
|
|
// remove client from rdr ecm-queue:
|
|
cs_readlock(__func__, &readerlist_lock);
|
|
struct s_reader *rdr = first_active_reader;
|
|
while(rdr)
|
|
{
|
|
if(check_client(rdr->client) && rdr->client->ecmtask)
|
|
{
|
|
int i;
|
|
for(i = 0; (i < cfg.max_pending) && cl; i++)
|
|
{
|
|
ecm = &rdr->client->ecmtask[i];
|
|
if(ecm->client == cl)
|
|
{
|
|
ecm->client = NULL;
|
|
}
|
|
}
|
|
}
|
|
rdr = rdr->next;
|
|
}
|
|
cs_readunlock(__func__, &readerlist_lock);
|
|
|
|
}
|
|
|
|
static void add_cascade_data(struct s_client *client, ECM_REQUEST *er)
|
|
{
|
|
if(!client->cascadeusers)
|
|
{ client->cascadeusers = ll_create("cascade_data"); }
|
|
LLIST *l = client->cascadeusers;
|
|
LL_ITER it = ll_iter_create(l);
|
|
time_t now = time(NULL);
|
|
struct s_cascadeuser *cu;
|
|
int8_t found = 0;
|
|
while((cu = ll_iter_next(&it)))
|
|
{
|
|
if(er->caid == cu->caid && er->prid == cu->prid && er->srvid == cu->srvid) // found it
|
|
{
|
|
if(cu->time < now)
|
|
{ cu->cwrate = now - cu->time; }
|
|
cu->time = now;
|
|
found = 1;
|
|
}
|
|
else if(cu->time + 60 < now) // old
|
|
{ ll_iter_remove_data(&it); }
|
|
}
|
|
|
|
if(!found) // add it if not found
|
|
{
|
|
if(!cs_malloc(&cu, sizeof(struct s_cascadeuser)))
|
|
{ return; }
|
|
cu->caid = er->caid;
|
|
cu->prid = er->prid;
|
|
cu->srvid = er->srvid;
|
|
cu->time = now;
|
|
ll_append(l, cu);
|
|
}
|
|
}
|
|
|
|
int32_t is_double_check_caid(ECM_REQUEST *er, FTAB *double_check_caid)
|
|
{
|
|
if(!double_check_caid->nfilts) { return 1; }
|
|
|
|
int32_t i, k;
|
|
for(i = 0; i < double_check_caid->nfilts; i++)
|
|
{
|
|
uint16_t tcaid = double_check_caid->filts[i].caid;
|
|
if(tcaid && (tcaid == er->caid || (tcaid < 0x0100 && (er->caid >> 8) == tcaid))) // caid match
|
|
{
|
|
int32_t nprids = double_check_caid->filts[i].nprids;
|
|
if(!nprids) // No Provider ->Ok
|
|
{ return 1; }
|
|
|
|
for(k = 0; k < nprids; k++)
|
|
{
|
|
uint32_t prid = double_check_caid->filts[i].prids[k];
|
|
if(prid == er->prid) // Provider matches
|
|
{ return 1; }
|
|
}
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
struct s_ecm_answer *get_ecm_answer(struct s_reader *reader, ECM_REQUEST *er)
|
|
{
|
|
if(!er || !reader) { return NULL; }
|
|
|
|
struct s_ecm_answer *ea;
|
|
|
|
for(ea = er->matching_rdr; ea; ea = ea->next)
|
|
{
|
|
if(ea->reader == reader)
|
|
{
|
|
return ea;
|
|
}
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
void distribute_ea(struct s_ecm_answer *ea)
|
|
{
|
|
struct s_ecm_answer *ea_temp;
|
|
|
|
for(ea_temp = ea->pending; ea_temp; ea_temp = ea_temp->pending_next)
|
|
{
|
|
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} [distribute_ea] send ea (%s) by reader %s answering for client %s", (check_client(ea_temp->er->client) ? ea_temp->er->client->account->usr : "-"), ea_temp->er->caid, ea_temp->er->prid, ea_temp->er->srvid, ea->rc==E_FOUND?"OK":"NOK", ea_temp->reader->label, (check_client(ea->er->client) ? ea->er->client->account->usr : "-"));
|
|
#ifdef CS_CACHEEX_AIO
|
|
if(ea->rc==E_FOUND && ea->er->localgenerated)
|
|
ea_temp->er->localgenerated = 1;
|
|
#endif
|
|
// e.g. we cannot send timeout, because "ea_temp->er->client" could wait/ask other readers! Simply set not_found if different from E_FOUND!
|
|
write_ecm_answer(ea_temp->reader, ea_temp->er, (ea->rc==E_FOUND? E_FOUND : E_NOTFOUND), ea->rcEx, ea->cw, NULL, ea->tier, &ea->cw_ex);
|
|
}
|
|
}
|
|
|
|
int32_t send_dcw(struct s_client *client, ECM_REQUEST *er)
|
|
{
|
|
if(!check_client(client) || client->typ != 'c')
|
|
{ return 0; }
|
|
|
|
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} [send_dcw] rc %d from reader %s", (check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid, er->rc, er->selected_reader ? er->selected_reader->label : "-");
|
|
|
|
static const char stageTxt[] = { '0', 'C', 'L', 'P', 'F', 'X' };
|
|
static const char *stxt[] = { "found", "cache1", "cache2", "cache3", "not found", "timeout", "sleeping",
|
|
"fake", "invalid", "corrupt", "no card", "expdate", "disabled", "stopped" };
|
|
|
|
static const char *stxtEx[16] = {"", "group", "caid", "ident", "class", "chid", "queue", "peer", "sid", "", "", "", "", "", "", ""};
|
|
static const char *stxtWh[16] = {"", "user ", "reader ", "server ", "lserver ", "", "", "", "", "", "", "", "" , "" , "", ""};
|
|
#ifdef CS_CACHEEX_AIO
|
|
char sby[100] = "", sreason[100] = "", scwcinfo[32] = "", schaninfo[CS_SERVICENAME_SIZE] = "", srealecmtime[50]="";
|
|
#else
|
|
char sby[100] = "", sreason[70] = "", scwcinfo[32] = "", schaninfo[CS_SERVICENAME_SIZE] = "", srealecmtime[50]="";
|
|
#endif
|
|
char erEx[32] = "";
|
|
char usrname[38] = "";
|
|
char channame[28];
|
|
struct timeb tpe;
|
|
|
|
snprintf(usrname, sizeof(usrname) - 1, "%s", username(client));
|
|
|
|
#ifdef WITH_DEBUG
|
|
if(cs_dblevel & D_CLIENTECM)
|
|
{
|
|
char buf[ECM_FMT_LEN];
|
|
char ecmd5[17 * 3];
|
|
char cwstr[17 * 3];
|
|
format_ecm(er, buf, ECM_FMT_LEN);
|
|
cs_hexdump(0, er->ecmd5, 16, ecmd5, sizeof(ecmd5));
|
|
cs_hexdump(0, er->cw, 16, cwstr, sizeof(cwstr));
|
|
#ifdef CS_CACHEEX
|
|
char csphash[5 * 3];
|
|
cs_hexdump(0, (void *)&er->csp_hash, 4, csphash, sizeof(csphash));
|
|
cs_log_dbg(D_CLIENTECM, "Client %s csphash %s cw %s rc %d %s", username(client), csphash, cwstr, er->rc, buf);
|
|
#else
|
|
cs_log_dbg(D_CLIENTECM, "Client %s cw %s rc %d %s", username(client), cwstr, er->rc, buf);
|
|
#endif
|
|
}
|
|
#endif
|
|
|
|
struct s_reader *er_reader = er->selected_reader; // responding reader
|
|
struct s_ecm_answer *ea_orig = get_ecm_answer(er_reader, er);
|
|
|
|
|
|
// check if ecm_answer from pending's
|
|
if(ea_orig && ea_orig->is_pending && er->rc == E_FOUND)
|
|
{ er->rc = E_CACHE2; }
|
|
|
|
|
|
// check if answer from cacheex-1 reader
|
|
if(er->rc == E_FOUND && er_reader && cacheex_reader(er_reader)) // so add hit to cacheex mode 1 readers
|
|
{
|
|
er->rc = E_CACHEEX;
|
|
}
|
|
|
|
// real ecm time
|
|
if(ea_orig && !ea_orig->is_pending && er->rc == E_FOUND
|
|
&& (
|
|
#ifdef CS_CACHEEX
|
|
er->cacheex_wait_time ||
|
|
#endif
|
|
(ea_orig->status & READER_FALLBACK)))
|
|
{
|
|
snprintf(srealecmtime, sizeof(srealecmtime) - 1, " (real %d ms)", ea_orig->ecm_time);
|
|
}
|
|
|
|
|
|
if(er->rc == E_TIMEOUT)
|
|
{
|
|
#ifdef CS_CACHEEX
|
|
if(!er->from_cacheex1_client) // cosmetic: show "by" readers only for "normal" clients
|
|
{
|
|
#endif
|
|
struct s_ecm_answer *ea_list;
|
|
int32_t ofs = 0;
|
|
|
|
for(ea_list = er->matching_rdr; ea_list; ea_list = ea_list->next)
|
|
{
|
|
if(ea_list->reader && ofs < (int32_t)sizeof(sby) && ((ea_list->status & REQUEST_SENT) && (ea_list->rc == E_TIMEOUT || ea_list->rc >= E_99))) //Request send, but no cw answered!
|
|
{
|
|
ofs += snprintf(sby + ofs, sizeof(sby) - ofs - 1, "%s%s", ofs ? "," : " by ", ea_list->reader->label);
|
|
}
|
|
}
|
|
|
|
if(er->ocaid && ofs < (int32_t)sizeof(sby))
|
|
{ snprintf(sby + ofs, sizeof(sby) - ofs - 1, "(btun %04X)", er->ocaid); }
|
|
|
|
#ifdef CS_CACHEEX
|
|
}
|
|
#endif
|
|
}
|
|
else if(er_reader)
|
|
{
|
|
// add marker to reader if ECM_REQUEST was betatunneled
|
|
if(er->ocaid)
|
|
{ snprintf(sby, sizeof(sby) - 1, " by %s(btun %04X)", er_reader->label, er->ocaid); }
|
|
else
|
|
{ snprintf(sby, sizeof(sby) - 1, " by %s", er_reader->label); }
|
|
}
|
|
#ifdef CS_CACHEEX
|
|
else if(er->cacheex_src) // only for cacheex mode-3 clients (no mode-1 or mode-2 because reader is set!) and csp
|
|
{
|
|
char *cex_name = "-";
|
|
if(check_client(er->cacheex_src) && er->cacheex_src->account)
|
|
{
|
|
if(er->cacheex_src->account->usr[0] != '\0')
|
|
cex_name = er->cacheex_src->account->usr;
|
|
else
|
|
cex_name = "csp";
|
|
}
|
|
|
|
if(er->ocaid)
|
|
{
|
|
snprintf(sby, sizeof(sby) - 1, " by %s(btun %04X)", cex_name, er->ocaid);
|
|
}
|
|
else
|
|
{
|
|
snprintf(sby, sizeof(sby) - 1, " by %s", cex_name);
|
|
}
|
|
}
|
|
#endif
|
|
|
|
if(er->rc < E_NOTFOUND)
|
|
{
|
|
er->rcEx = 0;
|
|
// memset(er->msglog, 0, MSGLOGSIZE); // remove reader msglog from previous requests that failed, founds never give back msglog!
|
|
}
|
|
|
|
if(er->rcEx)
|
|
{ snprintf(erEx, sizeof(erEx) - 1, "rejected %s%s", stxtWh[er->rcEx >> 4], stxtEx[er->rcEx & 0xf]); }
|
|
|
|
get_servicename_or_null(client, er->srvid, er->prid, er->caid, channame, sizeof(channame));
|
|
if(!channame[0])
|
|
{
|
|
schaninfo[0] = '\0';
|
|
}
|
|
else
|
|
{
|
|
snprintf(schaninfo, sizeof(schaninfo) - 1, " - %s", channame);
|
|
}
|
|
|
|
#ifdef CS_CACHEEX
|
|
int cx = 0;
|
|
if(er->msglog[0])
|
|
{
|
|
cx = snprintf(sreason, sizeof(sreason) - 1, " (%s)", er->msglog);
|
|
}
|
|
#else
|
|
if(er->msglog[0])
|
|
{
|
|
snprintf(sreason, sizeof(sreason) - 1, " (%s)", er->msglog);
|
|
}
|
|
#endif
|
|
#ifdef CW_CYCLE_CHECK
|
|
if(er->cwc_msg_log[0])
|
|
{ snprintf(scwcinfo, sizeof(scwcinfo) - 1, " (%.26s)", er->cwc_msg_log); }
|
|
#endif
|
|
|
|
cs_ftime(&tpe);
|
|
|
|
#ifdef CS_CACHEEX
|
|
int cx2 = 0;
|
|
if(er->rc >= E_CACHEEX && er->cacheex_wait_time && er->cacheex_wait_time_expired)
|
|
{
|
|
cx2 = snprintf(sreason+cx, (sizeof sreason)-cx, " (wait_time over)");
|
|
}
|
|
else
|
|
{
|
|
cx2 = cx;
|
|
}
|
|
|
|
if(er->cw_count>1)
|
|
{
|
|
#ifdef CS_CACHEEX_AIO
|
|
if(er->cw_count > 0x0F000000 || er->localgenerated)
|
|
{
|
|
uint32_t cw_count_cleaned = er->cw_count ^ 0x0F000000;
|
|
if(cw_count_cleaned > 1)
|
|
snprintf(sreason+cx2, (sizeof sreason)-cx2, " (cw count %d) (lg)", cw_count_cleaned);
|
|
else
|
|
snprintf(sreason+cx2, (sizeof sreason)-cx2, " (lg)");
|
|
}
|
|
else
|
|
{
|
|
#endif
|
|
|
|
snprintf (sreason+cx2, (sizeof sreason)-cx2, " (cw count %d)", er->cw_count);
|
|
|
|
#ifdef CS_CACHEEX_AIO
|
|
}
|
|
|
|
}
|
|
else
|
|
{
|
|
if(er->localgenerated)
|
|
snprintf(sreason+cx2, (sizeof sreason)-cx2, " (lg)");
|
|
#endif
|
|
}
|
|
|
|
#endif
|
|
|
|
client->cwlastresptime = comp_timeb(&tpe, &er->tps);
|
|
|
|
time_t now = time(NULL);
|
|
webif_client_add_lastresponsetime(client, client->cwlastresptime, now, er->rc); // add to ringbuffer
|
|
|
|
if(er_reader)
|
|
{
|
|
struct s_client *er_cl = er_reader->client;
|
|
if(check_client(er_cl))
|
|
{
|
|
er_cl->cwlastresptime = client->cwlastresptime;
|
|
webif_client_add_lastresponsetime(er_cl, client->cwlastresptime, now, er->rc);
|
|
er_cl->last_providptr = client->last_providptr;
|
|
er_cl->last_srvidptr = client->last_srvidptr;
|
|
}
|
|
}
|
|
|
|
webif_client_init_lastreader(client, er, er_reader, stxt);
|
|
|
|
client->last = now;
|
|
|
|
//cs_log_dbg(D_TRACE, "CHECK rc=%d er->cacheex_src=%s", er->rc, username(er->cacheex_src));
|
|
switch(er->rc)
|
|
{
|
|
case E_FOUND:
|
|
{
|
|
client->cwfound++;
|
|
client->account->cwfound++;
|
|
first_client->cwfound++;
|
|
break;
|
|
}
|
|
case E_CACHE1:
|
|
case E_CACHE2:
|
|
case E_CACHEEX:
|
|
{
|
|
client->cwcache++;
|
|
client->account->cwcache++;
|
|
first_client->cwcache++;
|
|
#ifdef CS_CACHEEX
|
|
if(check_client(er->cacheex_src))
|
|
{
|
|
first_client->cwcacheexhit++;
|
|
er->cacheex_src->cwcacheexhit++;
|
|
if(er->cacheex_src->account)
|
|
{ er->cacheex_src->account->cwcacheexhit++; }
|
|
}
|
|
#endif
|
|
break;
|
|
}
|
|
case E_NOTFOUND:
|
|
case E_CORRUPT:
|
|
case E_NOCARD:
|
|
{
|
|
if(er->rcEx)
|
|
{
|
|
client->cwignored++;
|
|
client->account->cwignored++;
|
|
first_client->cwignored++;
|
|
}
|
|
else
|
|
{
|
|
client->cwnot++;
|
|
client->account->cwnot++;
|
|
first_client->cwnot++;
|
|
}
|
|
break;
|
|
}
|
|
case E_TIMEOUT:
|
|
{
|
|
client->cwtout++;
|
|
client->account->cwtout++;
|
|
first_client->cwtout++;
|
|
break;
|
|
}
|
|
default:
|
|
{
|
|
client->cwignored++;
|
|
client->account->cwignored++;
|
|
first_client->cwignored++;
|
|
}
|
|
}
|
|
|
|
#ifdef CS_ANTICASC
|
|
// [zaplist] ACoSC anticascading
|
|
if(cfg.acosc_enabled)
|
|
{
|
|
int8_t max_active_sids = 0;
|
|
int8_t zap_limit = 0;
|
|
int8_t penalty = 0;
|
|
int32_t penalty_duration = 0;
|
|
int32_t delay = 0;
|
|
int8_t max_ecms_per_minute = 0;
|
|
char *info1 = NULL;
|
|
char *info2 = NULL;
|
|
char *info3 = NULL;
|
|
char *info4 = NULL;
|
|
char *info5 = NULL;
|
|
char *info6 = NULL;
|
|
|
|
// **global or user value?
|
|
cs_writelock(__func__, &clientlist_lock);
|
|
|
|
max_active_sids = client->account->acosc_max_active_sids == -1 ? cfg.acosc_max_active_sids : client->account->acosc_max_active_sids;
|
|
info1 = client->account->acosc_max_active_sids == -1 ? "Globalvalue" : "Uservalue";
|
|
|
|
zap_limit = client->account->acosc_zap_limit == -1 ? cfg.acosc_zap_limit : client->account->acosc_zap_limit;
|
|
info5 = client->account->acosc_zap_limit == -1 ? "Globalvalue" : "Uservalue";
|
|
|
|
penalty = client->account->acosc_penalty == -1 ? cfg.acosc_penalty : client->account->acosc_penalty;
|
|
info2 = client->account->acosc_penalty == -1 ? "Globalvalue" : "Uservalue";
|
|
|
|
penalty_duration = client->account->acosc_penalty_duration == -1 ? cfg.acosc_penalty_duration : client->account->acosc_penalty_duration;
|
|
info3 = client->account->acosc_penalty_duration == -1 ? "Globalvalue" : "Uservalue";
|
|
|
|
delay = client->account->acosc_delay == -1 ? cfg.acosc_delay : client->account->acosc_delay;
|
|
info4 = client->account->acosc_delay == -1 ? "Globalvalue" : "Uservalue";
|
|
|
|
max_ecms_per_minute = client->account->acosc_max_ecms_per_minute == -1 ? cfg.acosc_max_ecms_per_minute : client->account->acosc_max_ecms_per_minute;
|
|
info6 = client->account->acosc_max_ecms_per_minute == -1 ? "Globalvalue" : "Uservalue";
|
|
|
|
//**
|
|
|
|
if((er->rc < E_NOTFOUND && max_active_sids > 0) || zap_limit > 0 || max_ecms_per_minute > 0)
|
|
{
|
|
int8_t k = 0;
|
|
int8_t active_sid_count = 0;
|
|
time_t zaptime = time(NULL);
|
|
|
|
if(client->account->acosc_penalty_active == 4 && client->account->acosc_penalty_until <= zaptime) // reset penalty_active
|
|
{
|
|
client->account->acosc_penalty_active = 0;
|
|
client->account->acosc_penalty_until = 0;
|
|
}
|
|
|
|
if(client->account->acosc_penalty_active == 0 && max_active_sids > 0)
|
|
{
|
|
for(k=0; k<15 ; k++)
|
|
{
|
|
if(zaptime-30 < client->client_zap_list[k].lasttime && client->client_zap_list[k].request_stage == 10)
|
|
{
|
|
cs_log_dbg(D_TRACE, "[zaplist] ACoSC for Client: %s more then 10 ECM's for %04X@%06X/%04X/%04X", username(client), client->client_zap_list[k].caid, client->client_zap_list[k].provid, client->client_zap_list[k].chid, client->client_zap_list[k].sid);
|
|
active_sid_count ++;
|
|
}
|
|
}
|
|
cs_log_dbg(D_TRACE, "[zaplist] ACoSC for Client: %s active_sid_count= %i with more than 10 followed ECM's (mas:%i (%s))", username(client), active_sid_count, max_active_sids, info1);
|
|
}
|
|
|
|
if(client->account->acosc_penalty_active == 0 && max_active_sids > 0 && active_sid_count > max_active_sids) //max_active_sids reached
|
|
{
|
|
client->account->acosc_penalty_active = 1;
|
|
client->account->acosc_penalty_until = zaptime + penalty_duration;
|
|
}
|
|
|
|
if(client->account->acosc_penalty_active == 0 && zap_limit > 0 && client->account->acosc_user_zap_count > zap_limit) // zap_limit reached
|
|
{
|
|
client->account->acosc_penalty_active = 2;
|
|
client->account->acosc_penalty_until = zaptime + penalty_duration;
|
|
}
|
|
|
|
if(client->account->acosc_penalty_active == 0 && max_ecms_per_minute > 0 && client->n_request[1] >= max_ecms_per_minute && penalty != 4) // max ecms per minute reached
|
|
{
|
|
client->account->acosc_penalty_active = 3;
|
|
client->account->acosc_penalty_until = zaptime + penalty_duration;
|
|
}
|
|
|
|
if(client->account->acosc_penalty_active == 0 && max_ecms_per_minute > 0 && client->n_request[1] > 0 && penalty == 4) // max ecms per minute with hidecards penalty
|
|
{
|
|
client->account->acosc_penalty_active = 3;
|
|
client->account->acosc_penalty_until = zaptime + penalty_duration;
|
|
}
|
|
|
|
if(client->account->acosc_penalty_active > 0)
|
|
{
|
|
if(client->account->acosc_penalty_active == 4)
|
|
{ cs_log_dbg(D_TRACE, "[zaplist] ACoSC for Client: %s penalty_duration: %" PRId64 " seconds left(%s)", username(client), (int64_t)(client->account->acosc_penalty_until - zaptime), info3); }
|
|
|
|
int16_t lt = get_module(client)->listenertype;
|
|
switch(penalty)
|
|
{
|
|
case 1: // NULL CW
|
|
er->rc = E_FAKE; // E_FAKE give only a status fake not a NULL cw
|
|
er->rcEx = E2_WRONG_CHKSUM;
|
|
if(client->account->acosc_penalty_active == 1)
|
|
{ cs_log("[zaplist] ACoSC for Client: %s max_activ_sids reached: %i:%i(%s) penalty: 1(%s) send null CW", username(client), active_sid_count, max_active_sids, info1, info2); }
|
|
if(client->account->acosc_penalty_active == 2)
|
|
{ cs_log("[zaplist] ACoSC for Client: %s zap_limit reached: %i:%i(%s) penalty: 1(%s) send null CW", username(client), client->account->acosc_user_zap_count, zap_limit, info5, info2); }
|
|
if(client->account->acosc_penalty_active == 3)
|
|
{ cs_log("[maxecms] ACoSC for Client: %s max_ecms_per_minute reached: ecms_last_minute=%i ecms_now=%i max=%i(%s) penalty: 1(%s) send null CW", username(client), client->n_request[0], client->n_request[1], max_ecms_per_minute, info6, info2); }
|
|
break;
|
|
|
|
case 2: // ban
|
|
if(lt != LIS_DVBAPI)
|
|
{
|
|
if(client->account->acosc_penalty_active == 1)
|
|
{ cs_log("[zaplist] ACoSC for Client: %s max_activ_sids reached: %i:%i(%s) penalty: 2(%s) BAN Client - Kill and set Client to failban list for %i sec.", username(client), active_sid_count, max_active_sids, info1, info2, penalty_duration); }
|
|
if(client->account->acosc_penalty_active == 2)
|
|
{ cs_log("[zaplist] ACoSC for Client: %s zap_limit reached: %i:%i(%s) penalty: 2(%s) BAN Client - Kill and set Client to failban list for %i sec.", username(client), client->account->acosc_user_zap_count, zap_limit, info5, info2, penalty_duration); }
|
|
if(client->account->acosc_penalty_active == 3)
|
|
{ cs_log("[maxecms] ACoSC for Client: %s max_ecms_per_minute reached: ecms_last_minute=%i ecms_now=%i max=%i(%s) penalty: 2(%s) BAN Client - Kill and set Client to failban list for %i sec.", username(client), client->n_request[0], client->n_request[1], max_ecms_per_minute, info6, info2, penalty_duration); }
|
|
cs_add_violation_acosc(client, client->account->usr, penalty_duration);
|
|
add_job(client, ACTION_CLIENT_KILL, NULL, 0);
|
|
}
|
|
else
|
|
{
|
|
cs_log("[zaplist] ACoSC for Client: %s %i:%i(%s) penalty: 2(%s) BAN Client - don't Ban dvbapi user only stop decoding", username(client), active_sid_count, max_active_sids, info1, info2);
|
|
}
|
|
er->rc = E_DISABLED;
|
|
break;
|
|
|
|
case 3: // delay
|
|
if(client->account->acosc_penalty_active == 1)
|
|
{ cs_log("[zaplist] ACoSC for Client: %s max_activ_sids reached: %i:%i(%s) penalty: 3(%s) delay CW: %ims(%s)", username(client), active_sid_count, max_active_sids, info1, info2, delay, info4); }
|
|
if(client->account->acosc_penalty_active == 2)
|
|
{ cs_log("[zaplist] ACoSC for Client: %s zap_limit reached: %i:%i(%s) penalty: 3(%s) delay CW: %ims(%s)", username(client), client->account->acosc_user_zap_count, zap_limit, info5, info2, delay, info4); }
|
|
if(client->account->acosc_penalty_active == 3)
|
|
{ cs_log("[maxecms] ACoSC for Client: %s max_ecms_per_minute reached: ecms_last_minute=%i ecms_now=%i max=%i(%s) penalty: 3(%s) delay CW: %ims(%s)", username(client), client->n_request[0], client->n_request[1], max_ecms_per_minute, info6, info2, delay, info4); }
|
|
cs_writeunlock(__func__, &clientlist_lock);
|
|
cs_sleepms(delay);
|
|
cs_writelock(__func__, &clientlist_lock);
|
|
client->cwlastresptime += delay;
|
|
snprintf(sreason, sizeof(sreason)-1, " (%d ms penalty delay)", delay);
|
|
break;
|
|
case 4: // hidecards
|
|
if(client->account->acosc_penalty_active == 3)
|
|
{
|
|
cs_log("[maxecms] ACoSC for Client: %s ecms_last_minute=%i ecms_now=%i max=%i(%s) penalty: 4(%s) hidecards - hidecards to the client for %i sec", username(client), client->n_request[0], client->n_request[1], max_ecms_per_minute, info6, info2, penalty_duration);
|
|
client->start_hidecards = 1;
|
|
}
|
|
break;
|
|
default: // logging
|
|
if(client->account->acosc_penalty_active == 1)
|
|
{ cs_log("[zaplist] ACoSC for Client: %s max_activ_sids reached: %i:%i(%s) penalty: 0(%s) only logging", username(client), active_sid_count, max_active_sids, info1, info2); }
|
|
if(client->account->acosc_penalty_active == 2)
|
|
{ cs_log("[zaplist] ACoSC for Client: %s zap_limit reached: %i:%i(%s) penalty: 0(%s) only logging", username(client), client->account->acosc_user_zap_count, zap_limit, info5, info2); }
|
|
if(client->account->acosc_penalty_active == 3)
|
|
{ cs_log("[maxecms] ACoSC for Client: %s max_ecms_per_minute reached: ecms_last_minute=%i ecms_now=%i max=%i(%s) penalty: 0(%s) only logging", username(client), client->n_request[0], client->n_request[1], max_ecms_per_minute, info6, info2); }
|
|
break;
|
|
}
|
|
client->account->acosc_user_zap_count = 0; // we got already a penalty
|
|
client->account->acosc_penalty_active = 3;
|
|
client->account->acosc_penalty_active = 4;
|
|
}
|
|
}
|
|
cs_writeunlock(__func__, &clientlist_lock);
|
|
}
|
|
#endif
|
|
|
|
if(cfg.double_check && er->rc <= E_CACHE2 && er->selected_reader && is_double_check_caid(er, &cfg.double_check_caid))
|
|
{
|
|
if(er->checked == 0) // First CW, save it and wait for next one
|
|
{
|
|
er->checked = 1;
|
|
er->origin_reader = er->selected_reader;
|
|
memcpy(er->cw_checked, er->cw, sizeof(er->cw));
|
|
cs_log("DOUBLE CHECK FIRST CW by %s idx %d cpti %d", er->origin_reader->label, er->idx, er->msgid);
|
|
}
|
|
else if(er->origin_reader != er->selected_reader) // Second (or third and so on) cw. We have to compare
|
|
{
|
|
if(memcmp(er->cw_checked, er->cw, sizeof(er->cw)) == 0)
|
|
{
|
|
er->checked++;
|
|
cs_log("DOUBLE CHECKED! %d. CW by %s idx %d cpti %d", er->checked, er->selected_reader->label, er->idx, er->msgid);
|
|
}
|
|
else
|
|
{
|
|
cs_log("DOUBLE CHECKED NONMATCHING! %d. CW by %s idx %d cpti %d", er->checked, er->selected_reader->label, er->idx, er->msgid);
|
|
}
|
|
}
|
|
if(er->checked < 2) // less as two same cw? mark as pending!
|
|
{
|
|
er->rc = E_UNHANDLED;
|
|
goto ESC;
|
|
}
|
|
}
|
|
|
|
ac_chk(client, er, 1);
|
|
int32_t is_fake = 0;
|
|
if(er->rc == E_FAKE)
|
|
{
|
|
is_fake = 1;
|
|
er->rc = E_FOUND;
|
|
}
|
|
|
|
get_module(client)->send_dcw(client, er);
|
|
|
|
add_cascade_data(client, er);
|
|
|
|
if(is_fake)
|
|
{ er->rc = E_FAKE; }
|
|
|
|
#ifdef CS_ANTICASC
|
|
cs_writelock(__func__, &clientlist_lock);
|
|
if(client->start_hidecards)
|
|
{
|
|
client->start_hidecards = 0;
|
|
add_job(client, ACTION_CLIENT_HIDECARDS, NULL, 0);
|
|
}
|
|
cs_writeunlock(__func__, &clientlist_lock);
|
|
#endif
|
|
|
|
if(!(er->rc == E_SLEEPING && client->cwlastresptime == 0))
|
|
{
|
|
char buf[ECM_FMT_LEN];
|
|
format_ecm(er, buf, ECM_FMT_LEN);
|
|
if(er->reader_avail == 1 || er->stage == 0)
|
|
{
|
|
cs_log("%s (%s): %s (%d ms)%s%s%s%s", usrname, buf, er->rcEx ? erEx : stxt[er->rc],
|
|
client->cwlastresptime, sby, schaninfo, sreason, scwcinfo);
|
|
}
|
|
else
|
|
{
|
|
cs_log("%s (%s): %s (%d ms)%s (%c/%d/%d/%d)%s%s%s%s", usrname, buf, er->rcEx ? erEx : stxt[er->rc],
|
|
client->cwlastresptime, sby, stageTxt[er->stage], er->reader_requested,
|
|
(er->reader_count + er->fallback_reader_count), er->reader_avail, schaninfo,
|
|
srealecmtime, sreason, scwcinfo);
|
|
}
|
|
}
|
|
|
|
cs_log_dump_dbg(D_ATR, er->cw, 16, "cw:");
|
|
led_status_cw_not_found(er);
|
|
|
|
ESC:
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* write_ecm_request():
|
|
*/
|
|
static int32_t write_ecm_request(struct s_reader *rdr, ECM_REQUEST *er)
|
|
{
|
|
add_job(rdr->client, ACTION_READER_ECM_REQUEST, (void *)er, 0);
|
|
return 1;
|
|
}
|
|
|
|
/**
|
|
* sends the ecm request to the readers
|
|
* ECM_REQUEST er : the ecm
|
|
* er->stage: 0 = no reader asked yet
|
|
* 2 = ask only local reader (skipped without preferlocalcards)
|
|
* 3 = ask any non fallback reader
|
|
* 4 = ask fallback reader
|
|
**/
|
|
void request_cw_from_readers(ECM_REQUEST *er, uint8_t stop_stage)
|
|
{
|
|
struct s_ecm_answer *ea;
|
|
int8_t sent = 0;
|
|
|
|
if(er->stage >= 4) { return; }
|
|
|
|
while(1)
|
|
{
|
|
if(stop_stage && er->stage >= stop_stage) { return; }
|
|
|
|
er->stage++;
|
|
|
|
#ifdef CS_CACHEEX
|
|
if(er->stage == 1 && er->preferlocalcards==2)
|
|
{ er->stage++; }
|
|
#else
|
|
if(er->stage == 1)
|
|
{ er->stage++; }
|
|
#endif
|
|
|
|
if(er->stage == 2 && !er->preferlocalcards)
|
|
{ er->stage++; }
|
|
|
|
for(ea = er->matching_rdr; ea; ea = ea->next)
|
|
{
|
|
switch(er->stage)
|
|
{
|
|
#ifdef CS_CACHEEX
|
|
case 1:
|
|
{
|
|
// Cache-Exchange
|
|
if((ea->status & REQUEST_SENT) ||
|
|
(ea->status & (READER_CACHEEX | READER_ACTIVE)) != (READER_CACHEEX | READER_ACTIVE))
|
|
{ continue; }
|
|
break;
|
|
}
|
|
#endif
|
|
case 2:
|
|
{
|
|
// only local reader
|
|
if((ea->status & REQUEST_SENT) ||
|
|
(ea->status & (READER_ACTIVE | READER_FALLBACK | READER_LOCAL)) != (READER_ACTIVE | READER_LOCAL))
|
|
{ continue; }
|
|
break;
|
|
}
|
|
case 3:
|
|
{
|
|
// any non fallback reader not asked yet
|
|
if((ea->status & REQUEST_SENT) ||
|
|
(ea->status & (READER_ACTIVE | READER_FALLBACK)) != READER_ACTIVE)
|
|
{ continue; }
|
|
break;
|
|
}
|
|
default:
|
|
{
|
|
// only fallbacks
|
|
if((ea->status & REQUEST_SENT) ||
|
|
(ea->status & (READER_ACTIVE | READER_FALLBACK)) != (READER_ACTIVE | READER_FALLBACK))
|
|
{ continue; }
|
|
break;
|
|
}
|
|
}
|
|
|
|
struct s_reader *rdr = ea->reader;
|
|
#ifdef WITH_DEBUG
|
|
if (cs_dblevel & (D_TRACE | D_CSP))
|
|
{
|
|
char ecmd5[17 * 3];
|
|
cs_hexdump(0, er->ecmd5, 16, ecmd5, sizeof(ecmd5));
|
|
cs_log_dbg(D_TRACE | D_CSP, "request_cw stage=%d to reader %s ecm hash=%s", er->stage, rdr ? rdr->label : "", ecmd5);
|
|
}
|
|
#endif
|
|
// maxparallel: check if reader has capacity (slot is reserved later when CW found)
|
|
if(rdr->maxparallel > 0 && !reader_has_capacity(rdr, er->srvid, er->client))
|
|
{
|
|
cs_log_dbg(D_LB, "reader %s skipped for %s srvid %04X (maxparallel %d reached)",
|
|
rdr->label, check_client(er->client) ? er->client->account->usr : "-",
|
|
er->srvid, rdr->maxparallel);
|
|
continue;
|
|
}
|
|
|
|
ea->status |= REQUEST_SENT;
|
|
cs_ftime(&ea->time_request_sent);
|
|
|
|
er->reader_requested++;
|
|
|
|
write_ecm_request(ea->reader, er);
|
|
|
|
// set sent=1 only if reader is active/connected. If not, switch to next stage!
|
|
if(!sent && rdr)
|
|
{
|
|
struct s_client *rcl = rdr->client;
|
|
if(check_client(rcl))
|
|
{
|
|
if(rcl->typ == 'r' && rdr->card_status == CARD_INSERTED)
|
|
{ sent = 1; }
|
|
else if(rcl->typ == 'p' && (rdr->card_status == CARD_INSERTED || rdr->tcp_connected))
|
|
{ sent = 1; }
|
|
}
|
|
}
|
|
|
|
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} [write_ecm_request] reader %s --> SENT %d", (check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid, rdr ? ea->reader->label : "-", sent);
|
|
}
|
|
|
|
if(sent || er->stage >= 4)
|
|
{ break; }
|
|
}
|
|
}
|
|
|
|
void add_cache_from_reader(ECM_REQUEST *er, struct s_reader *rdr, uint32_t csp_hash, uint8_t *ecmd5, uint8_t *cw, int16_t caid, int32_t prid, int16_t srvid
|
|
#ifdef CS_CACHEEX_AIO
|
|
, int32_t ecm_time
|
|
#endif
|
|
)
|
|
{
|
|
ECM_REQUEST *ecm;
|
|
if (cs_malloc(&ecm, sizeof(ECM_REQUEST)))
|
|
{
|
|
cs_ftime(&ecm->tps);
|
|
|
|
ecm->cwc_cycletime = er->cwc_cycletime;
|
|
ecm->cwc_next_cw_cycle = er->cwc_next_cw_cycle;
|
|
memcpy(ecm->ecm, er->ecm, sizeof(ecm->ecm)); // ecm[0] is pushed to cacheexclients so we need a copy from it
|
|
ecm->caid = caid;
|
|
ecm->prid = prid;
|
|
ecm->srvid = srvid;
|
|
memcpy(ecm->ecmd5, ecmd5, CS_ECMSTORESIZE);
|
|
ecm->csp_hash = csp_hash;
|
|
ecm->rc = E_FOUND;
|
|
memcpy(ecm->cw, cw, sizeof(ecm->cw));
|
|
ecm->grp = rdr->grp;
|
|
ecm->selected_reader = rdr;
|
|
#ifdef CS_CACHEEX_AIO
|
|
ecm->ecm_time = ecm_time;
|
|
ecm->localgenerated = er->localgenerated;
|
|
#endif
|
|
#ifdef CS_CACHEEX
|
|
if(rdr && cacheex_reader(rdr))
|
|
{ ecm->cacheex_src = rdr->client; } //so adds hits to reader
|
|
#endif
|
|
|
|
add_cache(ecm); //add cw to cache
|
|
|
|
#ifdef CS_CACHEEX
|
|
cs_writelock(__func__, &ecm_pushed_deleted_lock);
|
|
ecm->next = ecm_pushed_deleted;
|
|
ecm_pushed_deleted = ecm;
|
|
cs_writeunlock(__func__, &ecm_pushed_deleted_lock);
|
|
#else
|
|
NULLFREE(ecm);
|
|
#endif
|
|
}
|
|
}
|
|
|
|
void chk_dcw(struct s_ecm_answer *ea)
|
|
{
|
|
if(!ea || !ea->er)
|
|
{ return; }
|
|
|
|
ECM_REQUEST *ert = ea->er;
|
|
struct s_ecm_answer *ea_list;
|
|
struct s_reader *eardr = ea->reader;
|
|
if(!ert || !eardr)
|
|
{ return; }
|
|
|
|
// ecm request already answered!
|
|
if(ert->rc < E_99)
|
|
{
|
|
#ifdef CS_CACHEEX
|
|
if(ea && ert->rc < E_NOTFOUND && ea->rc < E_NOTFOUND && memcmp(ea->cw, ert->cw, sizeof(ert->cw)) != 0)
|
|
{
|
|
char cw1[16 * 3 + 2], cw2[16 * 3 + 2];
|
|
#ifdef WITH_DEBUG
|
|
if(cs_dblevel & D_TRACE)
|
|
{
|
|
cs_hexdump(0, ea->cw, 16, cw1, sizeof(cw1));
|
|
cs_hexdump(0, ert->cw, 16, cw2, sizeof(cw2));
|
|
}
|
|
#endif
|
|
char ip1[20] = "", ip2[20] = "";
|
|
if(ea->reader && check_client(ea->reader->client)) { cs_strncpy(ip1, cs_inet_ntoa(ea->reader->client->ip), sizeof(ip1)); }
|
|
if(ert->cacheex_src) { cs_strncpy(ip2, cs_inet_ntoa(ert->cacheex_src->ip), sizeof(ip2)); }
|
|
else if(ert->selected_reader && check_client(ert->selected_reader->client)) { cs_strncpy(ip2, cs_inet_ntoa(ert->selected_reader->client->ip), sizeof(ip2)); }
|
|
|
|
ECM_REQUEST *er = ert;
|
|
debug_ecm(D_TRACE, "WARNING2: Different CWs %s from %s(%s)<>%s(%s): %s<>%s", buf,
|
|
username(ea->reader ? ea->reader->client : ert->client), ip1,
|
|
er->cacheex_src ? username(er->cacheex_src) : (ert->selected_reader ? ert->selected_reader->label : "unknown/csp"), ip2,
|
|
cw1, cw2);
|
|
}
|
|
#endif
|
|
|
|
return;
|
|
}
|
|
|
|
#ifdef CS_CACHEEX
|
|
/* if answer from cacheex-1 reader, not send answer to client!
|
|
* thread check_cache will check counter and send answer to client!
|
|
* Anyway, we should check if we have to go to oher stage (>1)
|
|
*/
|
|
|
|
if(eardr && cacheex_reader(eardr))
|
|
{
|
|
// if wait_time, and not wait_time expired and wait_time due to hitcache(or awtime>0),
|
|
// we have to wait cacheex timeout before call other readers (stage>1)
|
|
if(cacheex_reader(eardr) && !ert->cacheex_wait_time_expired && ert->cacheex_hitcache)
|
|
{ return; }
|
|
|
|
int8_t cacheex_left = 0;
|
|
uint8_t has_cacheex = 0;
|
|
if(ert->stage == 1)
|
|
{
|
|
for(ea_list = ert->matching_rdr; ea_list; ea_list = ea_list->next)
|
|
{
|
|
cs_readlock(__func__, &ea_list->ecmanswer_lock);
|
|
if(((ea_list->status & (READER_CACHEEX | READER_FALLBACK | READER_ACTIVE))) == (READER_CACHEEX | READER_ACTIVE))
|
|
{ has_cacheex = 1; }
|
|
if((!(ea_list->status & READER_FALLBACK) && ((ea_list->status & (REQUEST_SENT | REQUEST_ANSWERED | READER_CACHEEX | READER_ACTIVE)) == (REQUEST_SENT | READER_CACHEEX | READER_ACTIVE))) || ea_list->rc < E_NOTFOUND)
|
|
{ cacheex_left++; }
|
|
cs_readunlock(__func__, &ea_list->ecmanswer_lock);
|
|
}
|
|
|
|
if(has_cacheex && !cacheex_left) { request_cw_from_readers(ert, 0); }
|
|
}
|
|
|
|
return;
|
|
}
|
|
#endif
|
|
|
|
int32_t reader_left = 0, local_left = 0, reader_not_flb_left = 0, has_not_fallback = 0, has_local = 0;
|
|
ert->selected_reader = eardr;
|
|
|
|
switch(ea->rc)
|
|
{
|
|
case E_FOUND:
|
|
// maxparallel: register this service on the winning reader
|
|
if(eardr->maxparallel > 0)
|
|
{ reader_register_service(eardr, ert->srvid, ert->client); }
|
|
|
|
memcpy(ert->cw, ea->cw, 16);
|
|
ert->cw_ex = ea->cw_ex;
|
|
ert->rcEx = 0;
|
|
ert->rc = ea->rc;
|
|
ert->grp |= eardr->grp;
|
|
cs_strncpy(ert->msglog, ea->msglog, sizeof(ert->msglog));
|
|
#ifdef HAVE_DVBAPI
|
|
ert->adapter_index = ea->er->adapter_index;
|
|
#endif
|
|
break;
|
|
|
|
case E_INVALID:
|
|
case E_NOTFOUND:
|
|
{
|
|
// check if there are other readers to ask, and if not send NOT_FOUND to client
|
|
ert->rcEx = ea->rcEx;
|
|
cs_strncpy(ert->msglog, ea->msglog, sizeof(ert->msglog));
|
|
|
|
for(ea_list = ert->matching_rdr; ea_list; ea_list = ea_list->next)
|
|
{
|
|
cs_readlock(__func__, &ea_list->ecmanswer_lock);
|
|
|
|
if((!(ea_list->status & READER_FALLBACK) && ((ea_list->status & (REQUEST_SENT | REQUEST_ANSWERED | READER_LOCAL | READER_ACTIVE)) == (REQUEST_SENT | READER_LOCAL | READER_ACTIVE))) || ea_list->rc < E_NOTFOUND)
|
|
{ local_left++; }
|
|
|
|
if((!(ea_list->status & READER_FALLBACK) && ((ea_list->status & (REQUEST_SENT | REQUEST_ANSWERED | READER_ACTIVE)) == (REQUEST_SENT | READER_ACTIVE))) || ea_list->rc < E_NOTFOUND)
|
|
{ reader_not_flb_left++; }
|
|
|
|
if(((ea_list->status & (REQUEST_ANSWERED | READER_ACTIVE)) == (READER_ACTIVE)) || ea_list->rc < E_NOTFOUND)
|
|
{ reader_left++; }
|
|
|
|
if(((ea_list->status & (READER_FALLBACK | READER_ACTIVE))) == (READER_ACTIVE))
|
|
{ has_not_fallback = 1; }
|
|
if(((ea_list->status & (READER_LOCAL | READER_FALLBACK | READER_ACTIVE))) == (READER_LOCAL | READER_ACTIVE))
|
|
{ has_local = 1; }
|
|
|
|
cs_readunlock(__func__, &ea_list->ecmanswer_lock);
|
|
}
|
|
|
|
switch(ert->stage)
|
|
{
|
|
case 2: // only local reader (used only if preferlocalcards=1)
|
|
{
|
|
if(has_local && !local_left) { request_cw_from_readers(ert, 0); }
|
|
break;
|
|
}
|
|
case 3:
|
|
{
|
|
// any fallback reader not asked yet
|
|
if(has_not_fallback && !reader_not_flb_left) { request_cw_from_readers(ert, 0); }
|
|
break;
|
|
}
|
|
}
|
|
|
|
if(!reader_left // no more matching reader
|
|
#ifdef CS_CACHEEX
|
|
&& !cfg.wait_until_ctimeout
|
|
#endif
|
|
)
|
|
{ ert->rc = E_NOTFOUND; } // so we set the return code
|
|
|
|
break;
|
|
}
|
|
|
|
case E_TIMEOUT: // if timeout, we have to send timeout to client: this is done by ecm_timeout callback
|
|
return;
|
|
break;
|
|
|
|
case E_UNHANDLED:
|
|
return;
|
|
break;
|
|
|
|
default:
|
|
cs_log("unexpected ecm answer rc=%d.", ea->rc);
|
|
return;
|
|
break;
|
|
}
|
|
|
|
if(ert->rc < E_99)
|
|
send_dcw(ert->client, ert);
|
|
}
|
|
|
|
uint32_t chk_provid(uint8_t *ecm, uint16_t caid)
|
|
{
|
|
int32_t i, len, descriptor_length = 0;
|
|
uint32_t provid = 0;
|
|
|
|
switch(caid >> 8)
|
|
{
|
|
case 0x01: // seca
|
|
provid = b2i(2, ecm + 3);
|
|
break;
|
|
|
|
case 0x05: // viaccess
|
|
i = (ecm[4] == 0xD2) ? ecm[5] + 2 : 0; // skip d2 nano
|
|
if((ecm[5 + i] == 3) && ((ecm[4 + i] == 0x90) || (ecm[4 + i] == 0x40)))
|
|
{ provid = (b2i(3, ecm + 6 + i) & 0xFFFFF0); }
|
|
|
|
i = (ecm[6] == 0xD2) ? ecm[7] + 2 : 0; // skip d2 nano long ecm
|
|
if((ecm[7 + i] == 7) && ((ecm[6 + i] == 0x90) || (ecm[6 + i] == 0x40)))
|
|
{ provid = (b2i(3, ecm + 8 + i) & 0xFFFFF0); }
|
|
break;
|
|
|
|
case 0x0D: // cryptoworks
|
|
len = (((ecm[1] & 0xf) << 8) | ecm[2]) + 3;
|
|
for(i = 8; i < len; i += descriptor_length + 2)
|
|
{
|
|
descriptor_length = ecm[i + 1];
|
|
if(ecm[i] == 0x83)
|
|
{
|
|
provid = (uint32_t)ecm[i + 2] & 0xFE;
|
|
break;
|
|
}
|
|
}
|
|
break;
|
|
|
|
case 0x18: // nagra2
|
|
if (caid == 0x1801) // more safety
|
|
provid = b2i(2, ecm + 5);
|
|
break;
|
|
}
|
|
|
|
return provid;
|
|
}
|
|
|
|
void update_chid(ECM_REQUEST *er)
|
|
{
|
|
er->chid = get_subid(er);
|
|
}
|
|
|
|
/*
|
|
* This function writes the current CW from ECM struct to a cwl file.
|
|
* The filename is re-calculated and file re-opened every time.
|
|
* This will consume a bit cpu time, but nothing has to be stored between
|
|
* each call. If not file exists, a header is prepended
|
|
*/
|
|
static void logCWtoFile(ECM_REQUEST *er, uint8_t *cw)
|
|
{
|
|
FILE *pfCWL;
|
|
char srvname[CS_SERVICENAME_SIZE];
|
|
/* %s / %s _I %04X _ %s .cwl */
|
|
char buf[256 + sizeof(srvname)];
|
|
char date[9];
|
|
uint8_t i, parity, writeheader = 0;
|
|
struct tm timeinfo;
|
|
|
|
/*
|
|
* search service name for that id and change characters
|
|
* causing problems in file name
|
|
*/
|
|
|
|
get_servicename(cur_client(), er->srvid, er->prid, er->caid, srvname, sizeof(srvname));
|
|
|
|
for(i = 0; srvname[i]; i++)
|
|
if(srvname[i] == ' ') { srvname[i] = '_'; }
|
|
|
|
/* calc log file name */
|
|
time_t walltime = cs_time();
|
|
localtime_r(&walltime, &timeinfo);
|
|
strftime(date, sizeof(date), "%Y%m%d", &timeinfo);
|
|
snprintf(buf, sizeof(buf), "%s/%s_I%04X_%s.cwl", cfg.cwlogdir, date, er->srvid, srvname);
|
|
|
|
/* open failed, assuming file does not exist, yet */
|
|
if((pfCWL = fopen(buf, "r")) == NULL)
|
|
{
|
|
writeheader = 1;
|
|
}
|
|
else
|
|
{
|
|
/* we need to close the file if it was opened correctly */
|
|
fclose(pfCWL);
|
|
}
|
|
|
|
if((pfCWL = fopen(buf, "a+")) == NULL)
|
|
{
|
|
/* maybe this fails because the subdir does not exist. Is there a common function to create it?
|
|
for the moment do not print32_t to log on every ecm
|
|
cs_log(""error opening cw logfile for writing: %s (errno=%d %s)", buf, errno, strerror(errno)); */
|
|
return;
|
|
}
|
|
if(writeheader)
|
|
{
|
|
/* no global macro for cardserver name :( */
|
|
fprintf(pfCWL, "# OSCam cardserver v%s - %s\n", CS_VERSION, SCM_URL);
|
|
fprintf(pfCWL, "# control word log file for use with tsdec offline decrypter\n");
|
|
strftime(buf, sizeof(buf), "DATE %Y-%m-%d, TIME %H:%M:%S, TZ %Z\n", &timeinfo);
|
|
fprintf(pfCWL, "# %s", buf);
|
|
fprintf(pfCWL, "# CAID 0x%04X, SID 0x%04X, SERVICE \"%s\"\n", er->caid, er->srvid, srvname);
|
|
}
|
|
|
|
parity = er->ecm[0] & 1;
|
|
fprintf(pfCWL, "%d ", parity);
|
|
for(i = parity * 8; i < 8 + parity * 8; i++)
|
|
{ fprintf(pfCWL, "%02X ", cw[i]); }
|
|
/* better use incoming time er->tps rather than current time? */
|
|
strftime(buf, sizeof(buf), "%H:%M:%S\n", &timeinfo);
|
|
fprintf(pfCWL, "# %s", buf);
|
|
fflush(pfCWL);
|
|
fclose(pfCWL);
|
|
}
|
|
|
|
int32_t write_ecm_answer(struct s_reader *reader, ECM_REQUEST *er, int8_t rc, uint8_t rcEx, uint8_t *cw, char *msglog, uint16_t used_cardtier, EXTENDED_CW* cw_ex)
|
|
{
|
|
if(!reader || !er || !er->tps.time) { return 0; }
|
|
|
|
// drop too late answers, to avoid seg fault --> only answer until tps.time+((cfg.ctimeout+500)/1000+1) is accepted
|
|
time_t timeout = time(NULL) - ((cfg.ctimeout + 500) / 1000 + 1);
|
|
if(er->tps.time < timeout) // < and NOT <=
|
|
{ return 0; }
|
|
|
|
struct timeb now;
|
|
cs_ftime(&now);
|
|
|
|
#ifdef CS_CACHEEX_AIO
|
|
uint8_t dontsetAnswered = 0;
|
|
#endif
|
|
uint8_t dontwriteStats = 0;
|
|
|
|
if(er && er->parent)
|
|
{
|
|
// parent is only set on reader->client->ecmtask[], but we want original er
|
|
ECM_REQUEST *er_reader_cp = er;
|
|
er = er->parent; // Now er is "original" ecm, before it was the reader-copy
|
|
er_reader_cp->rc = rc;
|
|
er_reader_cp->idx = 0;
|
|
#ifdef CS_CACHEEX_AIO
|
|
er->localgenerated = er_reader_cp->localgenerated;
|
|
#endif
|
|
|
|
timeout = time(NULL) - ((cfg.ctimeout + 500) / 1000 + 1);
|
|
if(er->tps.time < timeout)
|
|
{ return 0; }
|
|
}
|
|
|
|
#ifdef CS_CACHEEX_AIO
|
|
if(rc < E_NOTFOUND && !er->localgenerated && (reader->cacheex.localgenerated_only_in || chk_lg_only(er, &reader->cacheex.lg_only_in_tab)) && !chk_srvid_localgenerated_only_exception(er))
|
|
{
|
|
cs_log_dbg(D_CACHEEX, "reader: %s !er->localgenerated - rc: E_NOTFOUND set, no stats written for reader", reader ? reader->label : "-");
|
|
rc = E_NOTFOUND;
|
|
dontsetAnswered = 1;
|
|
dontwriteStats = 1;
|
|
}
|
|
#endif
|
|
|
|
struct s_ecm_answer *ea = get_ecm_answer(reader, er);
|
|
if(!ea) { return 0; }
|
|
|
|
cs_writelock(__func__, &ea->ecmanswer_lock);
|
|
|
|
if((ea->status & REQUEST_ANSWERED))
|
|
{
|
|
cs_log_dbg(D_READER, "Reader %s already answer, skip this ecm answer!", reader ? reader->label : "-");
|
|
cs_writeunlock(__func__, &ea->ecmanswer_lock);
|
|
return 0;
|
|
}
|
|
|
|
// bad/wrong chksum/ecm
|
|
if(rc == E_NOTFOUND && rcEx == E2_WRONG_CHKSUM)
|
|
{
|
|
cs_log_dbg(D_READER, "ECM for reader %s was bad/has a wrong chksum!", reader ? reader->label : "-");
|
|
rc = E_INVALID;
|
|
rcEx = E2_WRONG_CHKSUM;
|
|
er->stage = 5;
|
|
|
|
// dont write stats for bad/wrong chksum/ecm
|
|
dontwriteStats = 1;
|
|
|
|
// set all other matching_readers => inactive to skip them and dont spread the bad ecm
|
|
struct s_ecm_answer *ea_list;
|
|
for(ea_list = er->matching_rdr; ea_list; ea_list = ea_list->next)
|
|
{
|
|
ea_list->status &= ~(READER_ACTIVE | READER_FALLBACK);
|
|
}
|
|
}
|
|
|
|
if(rc < E_NOTFOUND && cw && chk_is_null_CW(cw))
|
|
{
|
|
rc = E_NOTFOUND;
|
|
cs_log_dbg(D_TRACE | D_LB, "WARNING: reader %s send fake cw, set rc=E_NOTFOUND!", reader ? reader->label : "-");
|
|
}
|
|
|
|
if(rc < E_NOTFOUND && cw && !chk_halfCW(er,cw))
|
|
{
|
|
rc = E_NOTFOUND;
|
|
cs_log_dbg(D_TRACE | D_LB, "WARNING: reader %s send wrong swapped NDS cw, set rc=E_NOTFOUND!", reader ? reader->label : "-");
|
|
}
|
|
|
|
if(reader && cw && rc < E_NOTFOUND)
|
|
{
|
|
if(!cfg.disablecrccws && !reader->disablecrccws)
|
|
{
|
|
if(!chk_if_ignore_checksum(er, &cfg.disablecrccws_only_for) && !chk_if_ignore_checksum(er, &reader->disablecrccws_only_for))
|
|
{
|
|
uint8_t i, c;
|
|
for(i = 0; i < 16; i += 4)
|
|
{
|
|
c = ((cw[i] + cw[i + 1] + cw[i + 2]) & 0xff);
|
|
|
|
if(cw[i + 3] != c)
|
|
{
|
|
uint8_t nano = 0x00;
|
|
if(er->caid == 0x100 && er->ecm[5] > 0x00)
|
|
{
|
|
nano = er->ecm[5]; // seca nano protection
|
|
}
|
|
|
|
if(reader->dropbadcws && !nano) // only drop controlword if no cw encryption is applied
|
|
{
|
|
rc = E_NOTFOUND;
|
|
rcEx = E2_WRONG_CHKSUM;
|
|
break;
|
|
}
|
|
else
|
|
{
|
|
if(!nano) // only fix checksum if no cw encryption is applied (nano = 0)
|
|
{
|
|
cs_log_dbg(D_TRACE, "notice: changed dcw checksum byte cw[%i] from %02x to %02x", i + 3, cw[i + 3], c);
|
|
cw[i + 3] = c;
|
|
}
|
|
else
|
|
{
|
|
if(i == 12) // there are servers delivering correct controlwords but with failing last cw checksum (on purpose?!)
|
|
{
|
|
cs_log_dbg(D_TRACE,"NANO%02d: BAD PEER DETECTED, oscam has fixed the last cw crc that wasn't matching!", nano);
|
|
cw[i + 3] = c; // fix the last controlword
|
|
}
|
|
else
|
|
{
|
|
cs_log_dbg(D_TRACE,"NANO%02d: not fixing the crc of this cw since its still encrypted!", nano);
|
|
break; // crc failed so stop!
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
else
|
|
{
|
|
cs_log_dbg(D_TRACE, "notice: CW checksum check disabled for %04X:%06X", er->caid, er->prid);
|
|
}
|
|
}
|
|
else
|
|
{
|
|
cs_log_dbg(D_TRACE, "notice: CW checksum check disabled");
|
|
}
|
|
|
|
if(chk_if_ignore_checksum(er, &reader->disablecrccws_only_for) && caid_is_videoguard(er->caid)
|
|
#ifdef CS_CACHEEX_AIO
|
|
&& !chk_srvid_disablecrccws_only_for_exception(er)
|
|
#endif
|
|
)
|
|
{
|
|
uint8_t k, csum;
|
|
uint8_t hit = 0;
|
|
uint8_t oe = checkCWpart(cw, 0) ? 0 : 8;
|
|
for(k = 0; k < 8; k += 4)
|
|
{
|
|
csum = ((cw[k + oe] + cw[k + oe + 1] + cw[k + oe + 2]) & 0xff);
|
|
if(cw[k + oe + 3] == csum)
|
|
{
|
|
hit++;
|
|
}
|
|
}
|
|
if(hit > 1)
|
|
{
|
|
char ecmd5s[17 * 3];
|
|
cs_hexdump(0, er->ecmd5, 16, ecmd5s, sizeof(ecmd5s));
|
|
if(reader->dropbadcws)
|
|
{
|
|
rc = E_NOTFOUND;
|
|
rcEx = E2_WRONG_CHKSUM;
|
|
cs_log("Probably got bad CW from reader: %s, caid %04X, srvid %04X (%s) - dropping CW, lg: %i", reader->label, er->caid, er->srvid, ecmd5s
|
|
#ifdef CS_CACHEEX_AIO
|
|
, er->localgenerated);
|
|
#else
|
|
, 0);
|
|
#endif
|
|
}
|
|
else
|
|
{
|
|
cs_log("Probably got bad CW from reader: %s, caid %04X, srvid %04X (%s), lg: %i", reader->label, er->caid, er->srvid, ecmd5s
|
|
#ifdef CS_CACHEEX_AIO
|
|
, er->localgenerated);
|
|
#else
|
|
, 0);
|
|
#endif
|
|
}
|
|
}
|
|
}
|
|
|
|
}
|
|
|
|
#ifdef CW_CYCLE_CHECK
|
|
uint8_t cwc_ct = er->cwc_cycletime > 0 ? er->cwc_cycletime : 0;
|
|
uint8_t cwc_ncwc = er->cwc_next_cw_cycle < 2 ? er->cwc_next_cw_cycle : 2;
|
|
if(!checkcwcycle(er->client, er, reader, cw, rc, cwc_ct, cwc_ncwc))
|
|
{
|
|
#ifdef CS_CACHEEX_AIO
|
|
if(!er->localgenerated)
|
|
{
|
|
#endif
|
|
rc = E_NOTFOUND;
|
|
rcEx = E2_WRONG_CHKSUM;
|
|
cs_log_dbg(D_CACHEEX | D_CWC | D_LB, "{client %s, caid %04X, srvid %04X} [write_ecm_answer] cyclecheck failed! Reader: %s set rc: %i", (er->client ? er->client->account->usr : "-"), er->caid, er->srvid, reader ? reader->label : "-", rc);
|
|
|
|
#ifdef CS_CACHEEX_AIO
|
|
}
|
|
else
|
|
{
|
|
cs_log_dbg(D_CACHEEX | D_CWC | D_LB, "{client %s, caid %04X, srvid %04X} [write_ecm_answer] cyclecheck failed! Reader: %s set rc: %i -> lg-flagged CW -> do nothing", (er->client ? er->client->account->usr : "-"), er->caid, er->srvid, reader ? reader->label : "-", rc);
|
|
}
|
|
#endif
|
|
}
|
|
else { cs_log_dbg(D_CACHEEX | D_CWC | D_LB, "{client %s, caid %04X, srvid %04X} [write_ecm_answer] cyclecheck passed! Reader: %s rc: %i", (er->client ? er->client->account->usr : "-"), er->caid, er->srvid, reader ? reader->label : "-", rc); }
|
|
#endif
|
|
//END -- SPECIAL CHECKs for rc
|
|
|
|
|
|
//////////////// TRUST / ENTROPY / AUTODETECT BLOCK ////////////////////////////////////////////////////////////////
|
|
if(rc == E_FOUND && cw && !chk_is_null_CW(cw))
|
|
{
|
|
/* SKIP CACHEEX readers */
|
|
if(reader && reader->cacheex.mode)
|
|
goto TRUST_DONE;
|
|
|
|
/* Używamy lokalnej kopii CW do testów */
|
|
uint8_t testcw[16];
|
|
memcpy(testcw, cw, 16);
|
|
|
|
////////////////////////////////////////////////////////////
|
|
// ENTROPY CALC
|
|
////////////////////////////////////////////////////////////
|
|
|
|
double full = entropy_calc(testcw, 16);
|
|
double half1 = entropy_calc(testcw, 8);
|
|
double half2 = entropy_calc(testcw + 8, 8);
|
|
|
|
double threshold;
|
|
double entropy_value;
|
|
|
|
// NDS HALF entropy (always 64bit CAID)
|
|
if(er->caid == 0x098D || er->caid == 0x098C)
|
|
{
|
|
entropy_value = (half1 > half2) ? half1 : half2;
|
|
threshold = 1.35;
|
|
}
|
|
else
|
|
{
|
|
entropy_value = full;
|
|
threshold = 2.5778;
|
|
}
|
|
|
|
////////////////////////////////////////////////////////////
|
|
// ENTROPY BLOCK (LEVEL 1)
|
|
////////////////////////////////////////////////////////////
|
|
|
|
if(entropy_value < threshold)
|
|
{
|
|
cs_log("TRUST BLOCK ENTROPY reader=%s (%04X@%06X/%04X) entropy=%.4f threshold=%.4f",
|
|
reader ? reader->label : "?",
|
|
er->caid,
|
|
er->prid,
|
|
er->srvid,
|
|
entropy_value,
|
|
threshold);
|
|
|
|
rc = E_NOTFOUND;
|
|
rcEx = E2_WRONG_CHKSUM;
|
|
|
|
cw = NULL;
|
|
|
|
goto TRUST_DONE;
|
|
}
|
|
|
|
////////////////////////////////////////////////////////////
|
|
// AUTODETECT BLOCK (LEVEL 2)
|
|
////////////////////////////////////////////////////////////
|
|
|
|
/* ustaw testcw tymczasowo do er->cw dla autodetect */
|
|
uint8_t original_cw[16];
|
|
memcpy(original_cw, er->cw, 16);
|
|
|
|
memcpy(er->cw, testcw, 16);
|
|
|
|
if(!cw_autodetect_and_block(reader, er))
|
|
{
|
|
cs_log("TRUST BLOCK AUTODETECT reader=%s (%04X@%06X/%04X) entropy=%.4f threshold=%.4f",
|
|
reader ? reader->label : "?",
|
|
er->caid,
|
|
er->prid,
|
|
er->srvid,
|
|
entropy_value,
|
|
threshold);
|
|
|
|
rc = E_NOTFOUND;
|
|
rcEx = E2_WRONG_CHKSUM;
|
|
|
|
memset(er->cw, 0, 16);
|
|
cw = NULL;
|
|
|
|
goto TRUST_DONE;
|
|
}
|
|
|
|
////////////////////////////////////////////////////////////
|
|
// TRUST OK → FINAL COPY
|
|
////////////////////////////////////////////////////////////
|
|
|
|
memcpy(er->cw, testcw, 16);
|
|
|
|
cs_log("TRUST OK reader=%s (%04X@%06X/%04X) entropy=%.4f threshold=%.4f",
|
|
reader ? reader->label : "?",
|
|
er->caid,
|
|
er->prid,
|
|
er->srvid,
|
|
entropy_value,
|
|
threshold);
|
|
|
|
TRUST_DONE:
|
|
;
|
|
}
|
|
|
|
// global protection
|
|
if(rc != E_FOUND)
|
|
{
|
|
cw = NULL;
|
|
}
|
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
#ifdef CS_CACHEEX_AIO
|
|
if(!dontsetAnswered)
|
|
{
|
|
#endif
|
|
ea->status |= REQUEST_ANSWERED;
|
|
#ifdef CS_CACHEEX_AIO
|
|
}
|
|
#endif
|
|
ea->rc = rc;
|
|
ea->ecm_time = comp_timeb(&now, &ea->time_request_sent);
|
|
if(ea->ecm_time < 1) { ea->ecm_time = 1; } // set ecm_time 1 if answer immediately
|
|
ea->rcEx = rcEx;
|
|
if(cw) { memcpy(ea->cw, cw, 16); }
|
|
if(msglog) { memcpy(ea->msglog, msglog, MSGLOGSIZE); }
|
|
ea->tier = used_cardtier;
|
|
if(cw_ex)
|
|
{
|
|
ea->cw_ex = *cw_ex;
|
|
}
|
|
|
|
cs_writeunlock(__func__, &ea->ecmanswer_lock);
|
|
|
|
struct timeb tpe;
|
|
cs_ftime(&tpe);
|
|
int32_t ntime = comp_timeb(&tpe, &er->tps);
|
|
if(ntime < 1) { ntime = 1; }
|
|
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} [write_ecm_answer] reader %s rc %d, ecm time %d ms (%d ms)", (check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid, reader ? reader->label : "-", rc, ea->ecm_time, ntime);
|
|
|
|
// send ea for ecm request
|
|
int32_t res = 0;
|
|
struct s_client *cl = er->client;
|
|
if(check_client(cl))
|
|
{
|
|
res = 1;
|
|
add_job(er->client, ACTION_ECM_ANSWER_READER, ea, 0); // chk_dcw
|
|
}
|
|
|
|
// distribute ea for pendings
|
|
if(ea->pending) // has pending ea
|
|
{ distribute_ea(ea); }
|
|
|
|
|
|
if(!ea->is_pending) // not for pending ea - only once for ea
|
|
{
|
|
// cache update
|
|
if(ea && (ea->rc < E_NOTFOUND) && !chk_is_null_CW(ea->cw))
|
|
{
|
|
#ifdef CS_CACHEEX_AIO
|
|
int32_t ecmtime = ea->ecm_time;
|
|
|
|
if(er->cacheex_wait_time_expired && er->cacheex_wait_time)
|
|
ecmtime = ea->ecm_time + er->cacheex_wait_time;
|
|
#endif
|
|
add_cache_from_reader(er, reader, er->csp_hash, er->ecmd5, ea->cw, er->caid, er->prid, er->srvid
|
|
#ifdef CS_CACHEEX_AIO
|
|
, ecmtime);
|
|
#else
|
|
);
|
|
#endif
|
|
}
|
|
|
|
if(!dontwriteStats)
|
|
{
|
|
// readers stats for LB
|
|
send_reader_stat(reader, er, ea, ea->rc);
|
|
}
|
|
|
|
// reader checks
|
|
#ifdef WITH_DEBUG
|
|
if(cs_dblevel & D_TRACE)
|
|
{
|
|
char ecmd5[17 * 3];
|
|
cs_hexdump(0, er->ecmd5, 16, ecmd5, sizeof(ecmd5));
|
|
rdr_log_dbg(reader, D_TRACE, "ecm answer for ecm hash %s rc=%d", ecmd5, ea->rc);
|
|
}
|
|
#endif
|
|
// Update reader stats:
|
|
if(ea->rc == E_FOUND)
|
|
{
|
|
if(cfg.cwlogdir != NULL)
|
|
{ logCWtoFile(er, ea->cw); } // CWL logging only if cwlogdir is set in config
|
|
|
|
reader->ecmsok++;
|
|
#ifdef CS_CACHEEX_AIO
|
|
if(er->localgenerated)
|
|
reader->ecmsoklg++;
|
|
#endif
|
|
reader->webif_ecmsok++;
|
|
#ifdef CS_CACHEEX
|
|
struct s_client *eacl = reader->client;
|
|
if(cacheex_reader(reader) && check_client(eacl))
|
|
{
|
|
eacl->cwcacheexgot++;
|
|
cacheex_add_stats(eacl, ea->er->caid, ea->er->srvid, ea->er->prid, 1
|
|
#ifdef CS_CACHEEX_AIO
|
|
, er->localgenerated);
|
|
#else
|
|
);
|
|
#endif
|
|
first_client->cwcacheexgot++;
|
|
#ifdef CS_CACHEEX_AIO
|
|
if(er->localgenerated)
|
|
{
|
|
eacl->cwcacheexgotlg++;
|
|
first_client->cwcacheexgotlg++;
|
|
}
|
|
#endif
|
|
}
|
|
#endif
|
|
}
|
|
else if(ea->rc == E_NOTFOUND)
|
|
{
|
|
reader->ecmsnok++;
|
|
reader->webif_ecmsnok++;
|
|
if(reader->ecmnotfoundlimit && reader->ecmsnok >= reader->ecmnotfoundlimit)
|
|
{
|
|
rdr_log(reader, "ECM not found limit reached %u. Restarting the reader.",
|
|
reader->ecmsnok);
|
|
reader->ecmsnok = 0; // Reset the variable
|
|
reader->ecmshealthnok = 0; // Reset the variable
|
|
add_job(reader->client, ACTION_READER_RESTART, NULL, 0);
|
|
}
|
|
}
|
|
|
|
// this fixes big oscam mistake
|
|
// wrong reader status on web info aka not counted timeouts which dispalyed
|
|
// reader info 100 percent OK but reader had a ton of unhandled timeouts!
|
|
else if(ea->rc == E_TIMEOUT)
|
|
{
|
|
#ifdef WITH_LB
|
|
STAT_QUERY q;
|
|
readerinfofix_get_stat_query(er, &q);
|
|
READER_STAT *s;
|
|
s = readerinfofix_get_add_stat(reader, &q);
|
|
if (s)
|
|
{
|
|
cs_log_dbg(D_LB, "inc fail {client %s, caid %04X, prid %06X, srvid %04X} [write_ecm_answer] reader %s rc %d, ecm time %d ms (%d ms)", (check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid, reader ? reader->label : "-", rc, ea->ecm_time, ntime);
|
|
readerinfofix_inc_fail(s); // now increase fail factor for unhandled timeouts
|
|
}
|
|
#endif
|
|
reader->ecmstout++; // now append timeouts to the readerinfo timeout count
|
|
reader->webif_ecmstout++;
|
|
}
|
|
|
|
// Reader ECMs Health Try (by Pickser)
|
|
if(reader->ecmsok != 0 || reader->ecmsnok != 0 || reader->ecmstout != 0)
|
|
{
|
|
reader->ecmshealthok = ((double) reader->ecmsok / (reader->ecmsok + reader->ecmsnok + reader->ecmstout)) * 100;
|
|
#ifdef CS_CACHEEX_AIO
|
|
reader->ecmshealthoklg = ((double) reader->ecmsoklg / (reader->ecmsok + reader->ecmsnok + reader->ecmstout)) * 100;
|
|
#endif
|
|
reader->ecmshealthnok = ((double) reader->ecmsnok / (reader->ecmsok + reader->ecmsnok + reader->ecmstout)) * 100;
|
|
reader->ecmshealthtout = ((double) reader->ecmstout / (reader->ecmsok + reader->ecmsnok + reader->ecmstout)) * 100;
|
|
}
|
|
|
|
if(rc == E_FOUND && reader->resetcycle > 0)
|
|
{
|
|
reader->resetcounter++;
|
|
if(reader->resetcounter > reader->resetcycle)
|
|
{
|
|
reader->resetcounter = 0;
|
|
rdr_log(reader, "Resetting reader, resetcyle of %d ecms reached", reader->resetcycle);
|
|
reader->card_status = CARD_NEED_INIT;
|
|
cardreader_reset(cl);
|
|
}
|
|
}
|
|
}
|
|
|
|
return res;
|
|
}
|
|
|
|
// chid calculation from module stat to here
|
|
// to improve the quickfix concerning ecm chid info and extend it
|
|
// to all client requests wereby the chid is known in module stat
|
|
|
|
uint32_t get_subid(ECM_REQUEST *er)
|
|
{
|
|
if(!er->ecmlen)
|
|
{ return 0; }
|
|
|
|
uint32_t id = 0;
|
|
switch(er->caid >> 8)
|
|
{
|
|
case 0x01: // seca
|
|
id = b2i(2, er->ecm + 7);
|
|
break;
|
|
|
|
case 0x05: // viaccess
|
|
id = b2i(2, er->ecm + 8);
|
|
break;
|
|
|
|
case 0x06: // irdeto
|
|
id = b2i(2, er->ecm + 6);
|
|
break;
|
|
|
|
case 0x09: // videoguard
|
|
id = b2i(2, er->ecm + 11);
|
|
break;
|
|
|
|
case 0x4A: // DRE-Crypt, Bulcrypt, Tongfang and others?
|
|
if(!caid_is_bulcrypt(er->caid) && !caid_is_dre(er->caid))
|
|
{ id = b2i(2, er->ecm + 6); }
|
|
break;
|
|
}
|
|
return id;
|
|
}
|
|
|
|
static void set_readers_counter(ECM_REQUEST *er)
|
|
{
|
|
struct s_ecm_answer *ea;
|
|
|
|
er->reader_count = 0;
|
|
er->fallback_reader_count = 0;
|
|
er->localreader_count = 0;
|
|
er->cacheex_reader_count = 0;
|
|
|
|
for(ea = er->matching_rdr; ea; ea = ea->next)
|
|
{
|
|
if(ea->status & READER_ACTIVE)
|
|
{
|
|
if(!(ea->status & READER_FALLBACK))
|
|
{ er->reader_count++; }
|
|
else
|
|
{ er->fallback_reader_count++; }
|
|
|
|
if(cacheex_reader(ea->reader))
|
|
{ er->cacheex_reader_count++; }
|
|
else if(is_localreader(ea->reader, er))
|
|
{ er->localreader_count++; }
|
|
}
|
|
}
|
|
}
|
|
|
|
void write_ecm_answer_fromcache(struct s_write_from_cache *wfc)
|
|
{
|
|
ECM_REQUEST *er = NULL;
|
|
ECM_REQUEST *ecm = NULL;
|
|
|
|
er = wfc->er_new;
|
|
ecm = wfc->er_cache;
|
|
|
|
#ifdef CS_CACHEEX_AIO
|
|
if(ecm->localgenerated || (ecm->cw_count > 0x0F000000))
|
|
er->localgenerated = 1;
|
|
#endif
|
|
|
|
int8_t rc_orig = er->rc;
|
|
|
|
er->grp |= ecm->grp; // update group
|
|
#ifdef CS_CACHEEX
|
|
if(ecm->from_csp) { er->csp_answered = 1; } // update er as answered by csp (csp have no group)
|
|
#endif
|
|
|
|
if(er->rc >= E_NOTFOUND)
|
|
{
|
|
#ifdef CS_CACHEEX
|
|
if(ecm->cacheex_src) // from cacheex or csp
|
|
{
|
|
er->rc = E_CACHEEX;
|
|
}
|
|
else
|
|
#endif
|
|
{ er->rc=E_CACHE1; } // from normal readers
|
|
|
|
memcpy(er->cw, ecm->cw, 16);
|
|
|
|
er->selected_reader = ecm->selected_reader;
|
|
|
|
entropy_log_ecm(er, er->selected_reader);
|
|
|
|
//////////////// AUTODETECT INVALID CW BLOCK ////////////////////////////////////////////////
|
|
if(!cw_autodetect_and_block(er->selected_reader, er))
|
|
{
|
|
return;
|
|
}
|
|
////////////////////////////////////////////////////////////////////////////////////////////
|
|
|
|
er->cw_count = ecm->cw_count;
|
|
|
|
#ifdef CS_CACHEEX
|
|
// here we should be sure cex client has not been freed!
|
|
if(ecm->cacheex_src && is_valid_client(ecm->cacheex_src) && !ecm->cacheex_src->kill)
|
|
{
|
|
er->cacheex_src = ecm->cacheex_src;
|
|
er->cwc_cycletime = ecm->cwc_cycletime;
|
|
er->cwc_next_cw_cycle = ecm->cwc_next_cw_cycle;
|
|
}
|
|
else
|
|
{
|
|
er->cacheex_src = NULL;
|
|
}
|
|
|
|
int8_t cacheex = check_client(er->client) && er->client->account ? er->client->account->cacheex.mode : 0;
|
|
if(cacheex == 1 && check_client(er->client))
|
|
{
|
|
cacheex_add_stats(er->client, er->caid, er->srvid, er->prid, 0
|
|
#ifdef CS_CACHEEX_AIO
|
|
, er->localgenerated);
|
|
#else
|
|
);
|
|
#endif
|
|
er->client->cwcacheexpush++;
|
|
if(er->client->account)
|
|
{ er->client->account->cwcacheexpush++; }
|
|
first_client->cwcacheexpush++;
|
|
|
|
#ifdef CS_CACHEEX_AIO
|
|
if(er->localgenerated)
|
|
{
|
|
er->client->cwcacheexpushlg++;
|
|
first_client->cwcacheexpushlg++;
|
|
}
|
|
#endif
|
|
|
|
}
|
|
#endif
|
|
|
|
#ifdef CS_CACHEEX
|
|
if(cfg.delay && cacheex!=1) // No delay on cacheexchange mode 1 client!
|
|
{ cs_sleepms(cfg.delay); }
|
|
#else
|
|
if(cfg.delay)
|
|
{ cs_sleepms(cfg.delay); }
|
|
#endif
|
|
|
|
if(rc_orig == E_UNHANDLED)
|
|
{
|
|
cs_log_dbg(D_LB,"{client %s, caid %04X, prid %06X, srvid %04X} [write_ecm_answer_fromcache] found cw in CACHE (count %d)!", (check_client(er->client)?er->client->account->usr:"-"),er->caid, er->prid, er->srvid,
|
|
#ifdef CS_CACHEEX_AIO
|
|
(er->cw_count > 0x0F000000) ? er->cw_count ^= 0x0F000000 : er->cw_count);
|
|
#else
|
|
er->cw_count);
|
|
#endif
|
|
send_dcw(er->client, er);
|
|
}
|
|
}
|
|
}
|
|
|
|
#ifdef CS_CACHEEX_AIO
|
|
static bool ecm_cache_check(ECM_REQUEST *er)
|
|
{
|
|
if(ecm_cache_init_done && cfg.ecm_cache_droptime > 0)
|
|
{
|
|
ECM_CACHE *ecm_cache = NULL;
|
|
SAFE_RWLOCK_WRLOCK(&ecm_cache_lock);
|
|
ecm_cache = find_hash_table(&ht_ecm_cache, &er->csp_hash, sizeof(uint32_t), &compare_csp_hash_ecmcache);
|
|
if(!ecm_cache)
|
|
{
|
|
// ecm_cache-size(count/memory) pre-check
|
|
if(
|
|
(cfg.ecm_cache_size && (cfg.ecm_cache_size > tommy_hashlin_count(&ht_ecm_cache)))
|
|
|| (cfg.ecm_cache_memory && (cfg.ecm_cache_memory*1024*1024 > tommy_hashlin_memory_usage(&ht_ecm_cache)))
|
|
)
|
|
{
|
|
if(cs_malloc(&ecm_cache, sizeof(ECM_CACHE)))
|
|
{
|
|
ecm_cache->csp_hash = er->csp_hash;
|
|
cs_ftime(&ecm_cache->first_recv_time);
|
|
cs_ftime(&ecm_cache->upd_time);
|
|
|
|
tommy_hashlin_insert(&ht_ecm_cache, &ecm_cache->ht_node, ecm_cache, tommy_hash_u32(0, &er->csp_hash, sizeof(er->csp_hash)));
|
|
tommy_list_insert_tail(&ll_ecm_cache, &ecm_cache->ll_node, ecm_cache);
|
|
|
|
SAFE_RWLOCK_UNLOCK(&ecm_cache_lock);
|
|
return true;
|
|
}
|
|
else{
|
|
SAFE_RWLOCK_UNLOCK(&ecm_cache_lock);
|
|
cs_log("[ecm_cache] ERROR: NO added HASH to ecm_cache!!");
|
|
return false;
|
|
}
|
|
}
|
|
else{
|
|
// clean cache call;
|
|
SAFE_RWLOCK_UNLOCK(&ecm_cache_lock);
|
|
ecm_cache_cleanup(true);
|
|
return false;
|
|
}
|
|
}
|
|
// ecm found
|
|
else{
|
|
int64_t gone_diff = 0;
|
|
gone_diff = comp_timeb(&er->tps, &ecm_cache->first_recv_time);
|
|
cs_ftime(&ecm_cache->upd_time);
|
|
|
|
if(gone_diff >= cfg.ecm_cache_droptime * 1000)
|
|
{
|
|
cs_log_dbg(D_CW_CACHE, "[ecm_cache] ECM drop, current ecm_cache_size: %i - ecm_cache-mem-size: %i MiB", count_hash_table(&ht_ecm_cache), (int)(tommy_hashlin_memory_usage(&ht_ecm_cache)/1024/1024));
|
|
SAFE_RWLOCK_UNLOCK(&ecm_cache_lock);
|
|
return false;
|
|
}
|
|
}
|
|
|
|
SAFE_RWLOCK_UNLOCK(&ecm_cache_lock);
|
|
return true;
|
|
}
|
|
else{
|
|
cs_log_dbg(D_CW_CACHE,"[ecm_cache] ecm_cache_init_done %i cfg.ecm_cache_size: %"PRIu32" cfg.ecm_cache_memory %"PRIu32" MiB", ecm_cache_init_done, cfg.ecm_cache_size, cfg.ecm_cache_memory);
|
|
return true;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
void get_cw(struct s_client *client, ECM_REQUEST *er)
|
|
{
|
|
// clienttimeout per CAID override
|
|
uint32_t client_timeout = cfg.ctimeout;
|
|
int32_t percaid_timeout = caidvaluetab_get_value(&cfg.ctimeouttab, er->caid, 0);
|
|
|
|
if(percaid_timeout > 0)
|
|
{
|
|
client_timeout = percaid_timeout;
|
|
}
|
|
|
|
er->client_timeout = client_timeout;
|
|
|
|
#ifdef CS_CACHEEX_AIO
|
|
|
|
|
|
cacheex_update_hash(er);
|
|
if(!ecm_cache_check(er))
|
|
{
|
|
er->rc = E_INVALID;
|
|
send_dcw(client, er);
|
|
free_ecm(er);
|
|
return;
|
|
}
|
|
#endif
|
|
cs_log_dump_dbg(D_ATR, er->ecm, er->ecmlen, "get cw for ecm:");
|
|
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} [get_cw] NEW REQUEST!", (check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid);
|
|
increment_n_request(client);
|
|
|
|
int32_t i, j, m;
|
|
time_t now = time((time_t *)0);
|
|
uint32_t line = 0;
|
|
uint16_t sct_len;
|
|
|
|
er->client = client;
|
|
er->rc = E_UNHANDLED; // set default rc status to unhandled
|
|
er->cwc_next_cw_cycle = 2; // set it to: we dont know
|
|
|
|
// user was on freetv or didn't request for some time
|
|
// so we reset lastswitch to get correct stats/webif display
|
|
if(now - client->lastecm > cfg.hideclient_to) { client->lastswitch = 0; }
|
|
client->lastecm = now;
|
|
|
|
if(client == first_client || !client ->account || client->account == first_client->account)
|
|
{
|
|
// DVBApi+serial is allowed to request anonymous accounts:
|
|
int16_t listenertype = get_module(client)->listenertype;
|
|
if(listenertype != LIS_DVBAPI && listenertype != LIS_SERIAL)
|
|
{
|
|
er->rc = E_INVALID;
|
|
er->rcEx = E2_GLOBAL;
|
|
snprintf(er->msglog, sizeof(er->msglog), "invalid user account %s", username(client));
|
|
}
|
|
}
|
|
|
|
// ecmlen must be 0 (no ecm) or >2 (because SCT_LEN() needs at least 3 bytes)
|
|
if(er->ecmlen < 0 || er->ecmlen == 1 || er->ecmlen == 2)
|
|
{
|
|
er->rc = E_INVALID;
|
|
er->rcEx = E2_GLOBAL;
|
|
snprintf(er->msglog, sizeof(er->msglog), "ECM size %d invalid, ignored! client %s", er->ecmlen, username(client));
|
|
}
|
|
|
|
if(er->ecmlen > MAX_ECM_SIZE)
|
|
{
|
|
er->rc = E_INVALID;
|
|
er->rcEx = E2_GLOBAL;
|
|
snprintf(er->msglog, sizeof(er->msglog), "ECM size %d > Max ECM size %d, ignored! client %s", er->ecmlen, MAX_ECM_SIZE, username(client));
|
|
}
|
|
|
|
if(er->ecmlen > 2)
|
|
{
|
|
sct_len = SCT_LEN(er->ecm);
|
|
if(sct_len > er->ecmlen || sct_len < 4)
|
|
{
|
|
er->rc = E_INVALID;
|
|
er->rcEx = E2_GLOBAL;
|
|
snprintf(er->msglog, sizeof(er->msglog), "Real ECM size %d > ECM size %d, ignored! client %s", sct_len, er->ecmlen, username(client));
|
|
}
|
|
er->ecmlen = sct_len;
|
|
}
|
|
|
|
if(!client->grp)
|
|
{
|
|
er->rc = E_INVALID;
|
|
er->rcEx = E2_GROUP;
|
|
snprintf(er->msglog, sizeof(er->msglog), "invalid user group %s", username(client));
|
|
}
|
|
|
|
// add chid for all client requests as in module stat
|
|
update_chid(er);
|
|
|
|
// betacrypt ecm with nagra header
|
|
if(chk_is_betatunnel_caid(er->caid) == 1 && (er->ecmlen == 0x89 || er->ecmlen == 0x4A) && er->ecm[3] == 0x07 && (er->ecm[4] == 0x84 || er->ecm[4] == 0x45))
|
|
{
|
|
if(er->caid == 0x1702)
|
|
{
|
|
er->caid = 0x1833;
|
|
}
|
|
else
|
|
{
|
|
check_lb_auto_betatunnel_mode(er);
|
|
}
|
|
cs_log_dbg(D_TRACE, "Quickfix remap beta->nagra: 0x%X, 0x%X, 0x%X, 0x%X", er->caid, er->ecmlen, er->ecm[3], er->ecm[4]);
|
|
}
|
|
|
|
// nagra ecm with betacrypt header 1801, 1833, 1834, 1835
|
|
if(chk_is_betatunnel_caid(er->caid) == 2 && (er->ecmlen == 0x93 || er->ecmlen == 0x54) && er->ecm[13] == 0x07 && (er->ecm[14] == 0x84 || er->ecm[14] == 0x45))
|
|
{
|
|
if(er->caid == 0x1833)
|
|
{
|
|
er->caid = 0x1702;
|
|
}
|
|
else
|
|
{
|
|
er->caid = 0x1722;
|
|
}
|
|
cs_log_dbg(D_TRACE, "Quickfix remap nagra->beta: 0x%X, 0x%X, 0x%X, 0x%X", er->caid, er->ecmlen, er->ecm[13], er->ecm[44]);
|
|
}
|
|
|
|
// Ariva quickfix (invalid nagra provider)
|
|
if(((er->caid & 0xFF00) == 0x1800) && er->prid > 0x00FFFF)
|
|
{ er->prid = 0; }
|
|
|
|
// Check for invalid provider, extract provider out of ecm:
|
|
uint32_t prid = chk_provid(er->ecm, er->caid);
|
|
if(!er->prid)
|
|
{
|
|
er->prid = prid;
|
|
}
|
|
else
|
|
{
|
|
if(prid && prid != er->prid)
|
|
{
|
|
cs_log_dbg(D_TRACE, "provider fixed: %04X@%06X to %04X@%06X", er->caid, er->prid, er->caid, prid);
|
|
er->prid = prid;
|
|
}
|
|
}
|
|
|
|
#ifdef MODULE_NEWCAMD
|
|
// Set providerid for newcamd clients if none is given
|
|
if(!er->prid && client->ncd_server)
|
|
{
|
|
int32_t pi = client->port_idx;
|
|
if(pi >= 0 && cfg.ncd_ptab.nports && cfg.ncd_ptab.nports >= pi && cfg.ncd_ptab.ports[pi].ncd)
|
|
{ er->prid = cfg.ncd_ptab.ports[pi].ncd->ncd_ftab.filts[0].prids[0]; }
|
|
}
|
|
#endif
|
|
|
|
// CAID not supported or found
|
|
if(!er->caid)
|
|
{
|
|
er->rc = E_INVALID;
|
|
er->rcEx = E2_CAID;
|
|
snprintf(er->msglog, MSGLOGSIZE, "CAID not supported or found");
|
|
}
|
|
|
|
// user expired
|
|
if(client->expirationdate && client->expirationdate < client->lastecm)
|
|
{ er->rc = E_EXPDATE; }
|
|
|
|
// out of timeframe
|
|
if(client->allowedtimeframe_set)
|
|
{
|
|
struct tm acttm;
|
|
localtime_r(&now, &acttm);
|
|
int32_t curday = acttm.tm_wday;
|
|
char *dest = strstr(weekdstr,"ALL");
|
|
int32_t all_idx = (dest - weekdstr) / 3;
|
|
uint8_t allowed = 0;
|
|
|
|
// checkout if current time is allowed in the current day
|
|
allowed = CHECK_BIT(client->allowedtimeframe[curday][acttm.tm_hour][acttm.tm_min / 30], (acttm.tm_min % 30));
|
|
|
|
// or checkout if current time is allowed for all days
|
|
allowed |= CHECK_BIT(client->allowedtimeframe[all_idx][acttm.tm_hour][acttm.tm_min / 30], (acttm.tm_min % 30));
|
|
|
|
if(!(allowed))
|
|
{
|
|
er->rc = E_EXPDATE;
|
|
}
|
|
cs_log_dbg(D_TRACE, "Check Timeframe - result: %d, day:%s time: %02dH%02d, allowed: %s\n", er->rc, shortDay[curday], acttm.tm_hour, acttm.tm_min, allowed ? "true" : "false");
|
|
}
|
|
|
|
// user disabled
|
|
if(client->disabled != 0)
|
|
{
|
|
if(client->failban & BAN_DISABLED)
|
|
{
|
|
cs_add_violation(client, client->account->usr);
|
|
cs_disconnect_client(client);
|
|
}
|
|
er->rc = E_DISABLED;
|
|
}
|
|
|
|
if(!chk_global_whitelist(er, &line))
|
|
{
|
|
debug_ecm(D_TRACE, "whitelist filtered: %s (%s) line %d", username(client), buf, line);
|
|
er->rc = E_INVALID;
|
|
}
|
|
|
|
#ifdef CS_CACHEEX
|
|
if(client->account && client->account->cacheex.mode == 2 && !client->account->cacheex.allow_request)
|
|
{
|
|
er->rc = E_INVALID;
|
|
snprintf(er->msglog, MSGLOGSIZE, "invalid request from cacheex-2 client");
|
|
}
|
|
#endif
|
|
|
|
// rc < 100 -> ecm error
|
|
if(er->rc >= E_UNHANDLED)
|
|
{
|
|
m = er->caid;
|
|
i = er->srvid;
|
|
|
|
if(i != client->last_srvid || !client->lastswitch)
|
|
{
|
|
if(cfg.usrfileflag)
|
|
{ cs_statistics(client); }
|
|
client->lastswitch = now;
|
|
}
|
|
|
|
// user sleeping
|
|
if(client->tosleep && (now - client->lastswitch > client->tosleep))
|
|
{
|
|
if(client->failban & BAN_SLEEPING)
|
|
{
|
|
cs_add_violation(client, client->account->usr);
|
|
cs_disconnect_client(client);
|
|
}
|
|
if(client->c35_sleepsend != 0)
|
|
{
|
|
er->rc = E_STOPPED; // send sleep command CMD08 {00 255}
|
|
}
|
|
else
|
|
{
|
|
er->rc = E_SLEEPING;
|
|
}
|
|
}
|
|
|
|
client->last_srvid = i;
|
|
client->last_caid = m;
|
|
client->last_provid = er->prid;
|
|
|
|
int32_t ecm_len = (((er->ecm[1] & 0x0F) << 8) | er->ecm[2]) + 3;
|
|
|
|
for(j = 0; (j < 6) && (er->rc >= E_UNHANDLED); j++)
|
|
{
|
|
switch(j)
|
|
{
|
|
case 0:
|
|
// fake (uniq)
|
|
if(client->dup)
|
|
{ er->rc = E_FAKE; }
|
|
break;
|
|
|
|
case 1:
|
|
// invalid (caid)
|
|
if(!chk_bcaid(er, &client->ctab))
|
|
{
|
|
er->rc = E_INVALID;
|
|
er->rcEx = E2_CAID;
|
|
snprintf(er->msglog, MSGLOGSIZE, "invalid caid 0x%04X", er->caid);
|
|
}
|
|
break;
|
|
|
|
case 2:
|
|
// invalid (srvid)
|
|
// matching srvids (or 0000) specified in betatunnel will bypass this filter
|
|
if(!chk_srvid(client, er))
|
|
{
|
|
if(!chk_on_btun(SRVID_ZERO, client, er))
|
|
{
|
|
er->rc = E_INVALID;
|
|
snprintf(er->msglog, MSGLOGSIZE, "invalid SID");
|
|
}
|
|
}
|
|
break;
|
|
|
|
case 3:
|
|
// invalid (ufilters)
|
|
if(!chk_ufilters(er))
|
|
{ er->rc = E_INVALID; }
|
|
break;
|
|
|
|
case 4:
|
|
// invalid (sfilter)
|
|
if(!chk_sfilter(er, &get_module(client)->ptab))
|
|
{ er->rc = E_INVALID; }
|
|
break;
|
|
|
|
case 5:
|
|
// corrupt
|
|
if((i = er->ecmlen - ecm_len))
|
|
{
|
|
if(i > 0)
|
|
{
|
|
cs_log_dbg(D_TRACE, "warning: ecm size adjusted from %d to %d", er->ecmlen, ecm_len);
|
|
er->ecmlen = ecm_len;
|
|
}
|
|
else
|
|
{ er->rc = E_CORRUPT; }
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
// Check for odd/even byte
|
|
if(get_odd_even(er) == 0)
|
|
{
|
|
cs_log_dbg(D_TRACE, "warning: ecm with null odd/even byte from %s", (check_client(er->client) ? er->client->account->usr : "-"));
|
|
er->rc = E_INVALID;
|
|
}
|
|
|
|
// not continue, send rc to client
|
|
if(er->rc < E_UNHANDLED)
|
|
{
|
|
send_dcw(client, er);
|
|
free_ecm(er);
|
|
return;
|
|
}
|
|
|
|
|
|
#ifdef CS_CACHEEX
|
|
int8_t cacheex = client->account ? client->account->cacheex.mode : 0;
|
|
er->from_cacheex1_client = 0;
|
|
if(cacheex == 1) {er->from_cacheex1_client = 1;}
|
|
#endif
|
|
|
|
|
|
// set preferlocalcards for this ecm request (actually, paramter
|
|
// is per user based, maybe in fiture it will be caid based too)
|
|
er->preferlocalcards = cfg.preferlocalcards;
|
|
if(client->account && client->account->preferlocalcards > -1)
|
|
{
|
|
er->preferlocalcards = client->account->preferlocalcards;
|
|
}
|
|
if(er->preferlocalcards <0 || er->preferlocalcards >2) {er->preferlocalcards=0;}
|
|
|
|
|
|
if(chk_is_betatunnel_caid(er->caid) && client->ttab.ttnum)
|
|
{
|
|
cs_log_dump_dbg(D_TRACE, er->ecm, 13, "betatunnel? ecmlen=%d", er->ecmlen);
|
|
cs_betatunnel(er);
|
|
}
|
|
|
|
|
|
// ignore ecm...
|
|
int32_t offset = 3;
|
|
|
|
// ...and betacrypt header for cache md5 calculation
|
|
if(caid_is_betacrypt(er->caid))
|
|
{ offset = 13; }
|
|
|
|
uint8_t md5tmp[MD5_DIGEST_LENGTH];
|
|
|
|
// store ECM in cache
|
|
memcpy(er->ecmd5, MD5(er->ecm + offset, er->ecmlen - offset, md5tmp), CS_ECMSTORESIZE);
|
|
cacheex_update_hash(er);
|
|
ac_chk(client, er, 0);
|
|
|
|
|
|
//******** CHECK IF FOUND ECM IN CACHE
|
|
struct ecm_request_t *ecm = NULL;
|
|
ecm = check_cache(er, client);
|
|
if(ecm) // found in cache
|
|
{
|
|
cs_log_dbg(D_LB,"{client %s, caid %04X, prid %06X, srvid %04X} [get_cw] cw found immediately in cache! ", (check_client(er->client)?er->client->account->usr:"-"),er->caid, er->prid, er->srvid);
|
|
|
|
struct s_write_from_cache *wfc = NULL;
|
|
if(!cs_malloc(&wfc, sizeof(struct s_write_from_cache)))
|
|
{
|
|
NULLFREE(ecm);
|
|
free_ecm(er);
|
|
return;
|
|
}
|
|
|
|
wfc->er_new = er;
|
|
wfc->er_cache = ecm;
|
|
write_ecm_answer_fromcache(wfc);
|
|
NULLFREE(wfc);
|
|
NULLFREE(ecm);
|
|
free_ecm(er);
|
|
|
|
return;
|
|
}
|
|
|
|
// zaplist ACoSC
|
|
#ifdef CS_ANTICASC
|
|
if(cfg.acosc_enabled)
|
|
{
|
|
cs_writelock(__func__, &clientlist_lock);
|
|
insert_zaplist(er, client);
|
|
cs_writeunlock(__func__, &clientlist_lock);
|
|
}
|
|
#endif
|
|
|
|
er->reader_avail = 0;
|
|
er->readers = 0;
|
|
|
|
struct s_ecm_answer *ea, *prv = NULL;
|
|
struct s_reader *rdr;
|
|
|
|
cs_readlock(__func__, &readerlist_lock);
|
|
cs_readlock(__func__, &clientlist_lock);
|
|
|
|
for(rdr = first_active_reader; rdr; rdr = rdr->next)
|
|
{
|
|
uint8_t is_fallback = chk_is_fixed_fallback(rdr, er);
|
|
int8_t match = matching_reader(er, rdr);
|
|
|
|
if(!match) // if this reader does not match, check betatunnel for it
|
|
match = lb_check_auto_betatunnel(er, rdr);
|
|
|
|
if(match)
|
|
{
|
|
er->reader_avail++;
|
|
|
|
#ifdef CS_CACHEEX
|
|
if(cacheex == 1 && !cacheex_reader(rdr)) // ex1-cl only ask ex1-rdr
|
|
{ continue; }
|
|
#endif
|
|
|
|
if(!cs_malloc(&ea, sizeof(struct s_ecm_answer)))
|
|
{ goto OUT; }
|
|
|
|
#ifdef WITH_EXTENDED_CW
|
|
// Correct CSA mode is CBC - default to that instead
|
|
ea->cw_ex.algo_mode = CW_ALGO_MODE_CBC;
|
|
#endif
|
|
|
|
er->readers++;
|
|
|
|
ea->reader = rdr;
|
|
ea->er = er;
|
|
ea->rc = E_UNHANDLED;
|
|
if(prv)
|
|
{ prv->next = ea; }
|
|
else
|
|
{ er->matching_rdr = ea; }
|
|
prv = ea;
|
|
|
|
ea->status = READER_ACTIVE;
|
|
if(cacheex_reader(rdr))
|
|
{ ea->status |= READER_CACHEEX; }
|
|
else if(is_localreader(rdr, er))
|
|
{ ea->status |= READER_LOCAL; }
|
|
|
|
if(is_fallback && (!is_localreader(rdr, er) || (is_localreader(rdr, er) && !er->preferlocalcards)))
|
|
{ ea->status |= READER_FALLBACK; }
|
|
|
|
ea->pending = NULL;
|
|
ea->is_pending = false;
|
|
cs_lock_create(__func__, &ea->ecmanswer_lock, "ecmanswer_lock", 5000);
|
|
}
|
|
}
|
|
|
|
OUT:
|
|
cs_readunlock(__func__, &clientlist_lock);
|
|
cs_readunlock(__func__, &readerlist_lock);
|
|
|
|
lb_set_best_reader(er);
|
|
|
|
// set reader_count and fallback_reader_count
|
|
set_readers_counter(er);
|
|
|
|
// if preferlocalcards > 0, check if we have local readers selected:
|
|
// if not, switch to preferlocalcards = 0 for this ecm
|
|
if(er->preferlocalcards > 0)
|
|
{
|
|
if(er->localreader_count == 0)
|
|
{
|
|
er->preferlocalcards = 0;
|
|
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} NO local readers, set preferlocalcards = %d",
|
|
(check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid, er->preferlocalcards);
|
|
}
|
|
}
|
|
|
|
#ifdef CS_CACHEEX
|
|
// WAIT_TIME
|
|
uint32_t cacheex_wait_time = 0;
|
|
uint32_t wait_time_no_hitcache = 0;
|
|
uint32_t wait_time_hitcache = 0;
|
|
|
|
if(client->account && !client->account->no_wait_time
|
|
#ifdef CS_CACHEEX_AIO
|
|
&& !chk_srvid_no_wait_time(er)
|
|
#endif
|
|
&& er->preferlocalcards<2)
|
|
{
|
|
wait_time_no_hitcache = get_cacheex_wait_time(er,NULL); // NO check hitcache. Wait_time is dwtime, or, if 0, awtime.
|
|
wait_time_hitcache = get_cacheex_wait_time(er,client); // check hitcache for calculating wait_time! If hitcache wait_time is biggest value between dwtime and awtime, else it's awtime.
|
|
|
|
if(
|
|
// If "normal" client and ex1-rdr>0, we cannot use hitcache for calculating wait_time because we cannot know if cw is available or not on ex1 server!
|
|
(cacheex != 1 && er->cacheex_reader_count)
|
|
||
|
|
/* Cw for ex1-cl comes from: INT. cache by "normal" readers (normal clients that ask normal readers), ex1-rdr and ex2-rdr and ex3-rdr.
|
|
* If readers, we have to wait cws generating by normal clients asking normal readers and answers by ex1-rdr (cannot use hitcache).
|
|
* If no readers, use hitcache for calculating wait_time.
|
|
*/
|
|
(cacheex == 1 && er->reader_avail)
|
|
)
|
|
{ cacheex_wait_time = wait_time_no_hitcache; }
|
|
else
|
|
{ cacheex_wait_time = wait_time_hitcache; }
|
|
}
|
|
|
|
cs_log_dbg(D_TRACE | D_CACHEEX, "[GET_CW] wait_time %d caid %04X prov %06X srvid %04X rc %d cacheex cl mode %d ex1rdr %d", cacheex_wait_time, er->caid, er->prid, er->srvid, er->rc, cacheex, er->cacheex_reader_count);
|
|
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} [get_cw] wait_time %d - client cacheex mode %d, reader avail for ecm %d, hitcache %d, preferlocalcards %d", (check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid, cacheex_wait_time, cacheex == 1 ? 1 : 0, er->reader_avail, wait_time_hitcache ? 1 : 0, er->preferlocalcards);
|
|
// END WAIT_TIME calculation
|
|
|
|
if(!cacheex_wait_time && (er->reader_count + er->fallback_reader_count) == 0)
|
|
#else
|
|
if((er->reader_count + er->fallback_reader_count) == 0)
|
|
#endif
|
|
{
|
|
er->rc = E_NOTFOUND;
|
|
if(!er->rcEx)
|
|
{ er->rcEx = E2_GROUP; }
|
|
snprintf(er->msglog, MSGLOGSIZE, "no matching reader");
|
|
cs_log_dbg(D_LB, "{client %s, caid %04X, prid %06X, srvid %04X} [get_cw] NO Readers and NO wait_time... not_found! ", (check_client(er->client) ? er->client->account->usr : "-"), er->caid, er->prid, er->srvid);
|
|
send_dcw(client, er);
|
|
free_ecm(er);
|
|
return;
|
|
}
|
|
|
|
//insert it in ecmcwcache!
|
|
cs_writelock(__func__, &ecmcache_lock);
|
|
er->next = ecmcwcache;
|
|
ecmcwcache = er;
|
|
ecmcwcache_size++;
|
|
cs_writeunlock(__func__, &ecmcache_lock);
|
|
|
|
er->rcEx = 0;
|
|
#ifdef CS_CACHEEX
|
|
er->cacheex_wait_time = 0;
|
|
er->cacheex_wait_time_expired = 1;
|
|
er->cacheex_hitcache = 0;
|
|
er->cacheex_mode1_delay = 0;
|
|
|
|
if(cacheex_wait_time) // wait time for cacheex
|
|
{
|
|
er->cacheex_wait_time = cacheex_wait_time;
|
|
er->cacheex_wait_time_expired = 0;
|
|
er->cacheex_hitcache = wait_time_hitcache ? 1 : 0; // usefull only when cacheex mode 1 readers answers before wait_time and we have to decide if we have to wait until wait_time expires.
|
|
er->cacheex_mode1_delay = get_cacheex_mode1_delay(er);
|
|
|
|
if(!er->cacheex_mode1_delay && er->cacheex_reader_count > 0)
|
|
{
|
|
request_cw_from_readers(er, 1); // setting stop_stage=1, we request only cacheex mode 1 readers. Others are requested at cacheex timeout!
|
|
}
|
|
}
|
|
else
|
|
#endif
|
|
request_cw_from_readers(er, 0);
|
|
|
|
#ifdef WITH_DEBUG
|
|
if(D_CLIENTECM & cs_dblevel)
|
|
{
|
|
char buf[ECM_FMT_LEN];
|
|
format_ecm(er, buf, ECM_FMT_LEN);
|
|
cs_log_dump_dbg(D_CLIENTECM, er->ecm, er->ecmlen, "Client %s ECM dump %s", username(client), buf);
|
|
}
|
|
#endif
|
|
|
|
cw_process_thread_wakeup();
|
|
}
|
|
|
|
int32_t ecmfmt(char *result, size_t size, uint16_t caid, uint16_t onid, uint32_t prid, uint16_t chid, uint16_t pid,
|
|
uint16_t srvid, uint16_t l, char *ecmd5hex, char *csphash, char *cw, uint16_t origin_peer, uint8_t distance, char *payload, char *tier)
|
|
{
|
|
if(!cfg.ecmfmt)
|
|
{
|
|
if(tier && payload)
|
|
{
|
|
return snprintf(result, size, "%04X@%06X/%04X/%04X/%02X:%s:0F06%s:%s", caid, prid, chid, srvid, l, ecmd5hex, payload, tier);
|
|
}
|
|
else if(tier)
|
|
{
|
|
return snprintf(result, size, "%04X@%06X/%04X/%04X/%02X:%s:%s", caid, prid, chid, srvid, l, ecmd5hex, tier);
|
|
}
|
|
else if(payload)
|
|
{
|
|
return snprintf(result, size, "%04X@%06X/%04X/%04X/%02X:%s:0F06%s", caid, prid, chid, srvid, l, ecmd5hex, payload);
|
|
}
|
|
else
|
|
{
|
|
return snprintf(result, size, "%04X@%06X/%04X/%04X/%02X:%s", caid, prid, chid, srvid, l, ecmd5hex);
|
|
}
|
|
}
|
|
|
|
#define ECMFMT_NUMBER 0
|
|
#define ECMFMT_STRING 1
|
|
#define ECMFMT_CHAR 2
|
|
|
|
uint8_t type = 0;
|
|
uint32_t ivalue = 0;
|
|
char *ifmt = NULL, *sfmt = NULL;
|
|
char *svalue = NULL, cvalue = '\0';
|
|
uint8_t hide_if_zero = 0;
|
|
char *c;
|
|
uint32_t s = 0;
|
|
|
|
for(c = cfg.ecmfmt; *c; c++)
|
|
{
|
|
if(*c == '0')
|
|
{
|
|
hide_if_zero = 1;
|
|
continue;
|
|
}
|
|
|
|
sfmt = NULL;
|
|
|
|
switch(*c)
|
|
{
|
|
case 't':
|
|
type = ECMFMT_STRING;
|
|
svalue = tier;
|
|
if(tier == NULL && !hide_if_zero)
|
|
{
|
|
type = ECMFMT_NUMBER;
|
|
ifmt = "%04X";
|
|
ivalue = 0;
|
|
}
|
|
break;
|
|
|
|
case 'c':
|
|
type = ECMFMT_NUMBER;
|
|
ifmt = "%04X";
|
|
ivalue = caid;
|
|
break;
|
|
|
|
case 'o':
|
|
type = ECMFMT_NUMBER;
|
|
ifmt = "%04X";
|
|
ivalue = onid;
|
|
break;
|
|
|
|
case 'p':
|
|
type = ECMFMT_NUMBER;
|
|
ifmt = "%06X";
|
|
ivalue = prid;
|
|
break;
|
|
|
|
case 'i':
|
|
type = ECMFMT_NUMBER;
|
|
ifmt = "%04X";
|
|
ivalue = chid;
|
|
break;
|
|
|
|
case 'd':
|
|
type = ECMFMT_NUMBER;
|
|
ifmt = "%04X";
|
|
ivalue = pid;
|
|
break;
|
|
|
|
case 's':
|
|
type = ECMFMT_NUMBER;
|
|
ifmt = "%04X";
|
|
ivalue = srvid;
|
|
break;
|
|
|
|
case 'l':
|
|
type = ECMFMT_NUMBER;
|
|
ifmt = "%02X";
|
|
ivalue = l;
|
|
break;
|
|
|
|
case 'h':
|
|
type = ECMFMT_STRING;
|
|
svalue = ecmd5hex;
|
|
break;
|
|
|
|
case 'e':
|
|
type = ECMFMT_STRING;
|
|
svalue = csphash;
|
|
break;
|
|
|
|
case 'w':
|
|
type = ECMFMT_STRING;
|
|
svalue = cw;
|
|
break;
|
|
|
|
case 'j':
|
|
type = ECMFMT_NUMBER;
|
|
ifmt = "%02X";
|
|
ivalue = distance;
|
|
break;
|
|
|
|
case 'g':
|
|
type = ECMFMT_NUMBER;
|
|
ifmt = "%04X";
|
|
ivalue = origin_peer;
|
|
break;
|
|
|
|
case '\\':
|
|
c++;
|
|
type = ECMFMT_CHAR;
|
|
cvalue = *c;
|
|
|
|
if(cvalue == '\0')
|
|
{ return s; }
|
|
break;
|
|
|
|
case 'y':
|
|
type = ECMFMT_STRING;
|
|
svalue = payload;
|
|
sfmt = "0F06%.06s";
|
|
if(payload == NULL && !hide_if_zero)
|
|
{
|
|
type = ECMFMT_NUMBER;
|
|
ifmt = "0F06%06X";
|
|
ivalue = 0;
|
|
}
|
|
break;
|
|
|
|
case 'Y':
|
|
type = ECMFMT_STRING;
|
|
svalue = payload;
|
|
sfmt = "0F06%s";
|
|
if(payload == NULL && !hide_if_zero)
|
|
{
|
|
type = ECMFMT_NUMBER;
|
|
ifmt = "0F06%12X";
|
|
ivalue = 0;
|
|
}
|
|
break;
|
|
|
|
default:
|
|
type = ECMFMT_CHAR;
|
|
cvalue = *c;
|
|
break;
|
|
}
|
|
|
|
if(hide_if_zero)
|
|
{
|
|
if(type == ECMFMT_NUMBER && ivalue == 0)
|
|
{
|
|
hide_if_zero = 0;
|
|
continue;
|
|
}
|
|
else if(type == ECMFMT_STRING && svalue == NULL)
|
|
{
|
|
hide_if_zero = 0;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
switch(type)
|
|
{
|
|
case ECMFMT_NUMBER:
|
|
s += snprintf(result + s, size - s, ifmt, ivalue);
|
|
break;
|
|
|
|
case ECMFMT_STRING:
|
|
s += snprintf(result + s, size - s , sfmt != NULL ? sfmt : "%s", svalue);
|
|
break;
|
|
|
|
case ECMFMT_CHAR:
|
|
if(size - s > 1)
|
|
{
|
|
result[s] = cvalue;
|
|
result[s+1] = '\0';
|
|
s++;
|
|
}
|
|
break;
|
|
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
|
|
return s;
|
|
}
|
|
|
|
int32_t format_ecm(ECM_REQUEST *ecm, char *result, size_t size)
|
|
{
|
|
char ecmd5hex[(16*2)+1];
|
|
char csphash[(4*2)+1] = { 0 };
|
|
char cwhex[(16*2)+1];
|
|
char *payload = NULL;
|
|
char *tier = NULL;
|
|
#ifdef READER_VIDEOGUARD
|
|
char payload_string[(6*2)+1];
|
|
char tier_string[83];
|
|
struct s_ecm_answer *ea;
|
|
|
|
if(ecm->selected_reader && caid_is_videoguard(ecm->selected_reader->caid) && !is_network_reader(ecm->selected_reader))
|
|
{
|
|
for(ea = ecm->matching_rdr; ea; ea = ea->next)
|
|
{
|
|
if(ea->tier && (ea->status & REQUEST_ANSWERED) && !is_network_reader(ea->reader))
|
|
{
|
|
get_tiername_defaultid(ea->tier, ecm->selected_reader->caid, tier_string);
|
|
tier = tier_string;
|
|
break;
|
|
}
|
|
}
|
|
|
|
cs_hexdump(0, ecm->selected_reader->VgLastPayload, 6, payload_string, sizeof(payload_string));
|
|
payload = payload_string;
|
|
}
|
|
#endif
|
|
cs_hexdump(0, ecm->ecmd5, 16, ecmd5hex, sizeof(ecmd5hex));
|
|
#ifdef CS_CACHEEX
|
|
cs_hexdump(0, (void *)&ecm->csp_hash, 4, csphash, sizeof(csphash));
|
|
#endif
|
|
cs_hexdump(0, ecm->cw, 16, cwhex, sizeof(cwhex));
|
|
#ifdef MODULE_GBOX
|
|
if(check_client(ecm->client) && get_module(ecm->client)->num == R_GBOX && ecm->gbox_ecm_dist)
|
|
{ return ecmfmt(result, size, ecm->caid, ecm->onid, ecm->prid, ecm->chid, ecm->pid, ecm->srvid, ecm->ecmlen, ecmd5hex, csphash, cwhex, ecm->gbox_ecm_src_peer, ecm->gbox_ecm_dist, payload, tier); }
|
|
else if (ecm->selected_reader && ecm->selected_reader->typ == R_GBOX && !ecm->gbox_ecm_dist)
|
|
{ return ecmfmt(result, size, ecm->caid, ecm->onid, ecm->prid, ecm->chid, ecm->pid, ecm->srvid, ecm->ecmlen, ecmd5hex, csphash, cwhex, ecm->selected_reader->gbox_cw_src_peer, ecm->selected_reader->currenthops, payload, tier); }
|
|
else
|
|
#endif
|
|
return ecmfmt(result, size, ecm->caid, ecm->onid, ecm->prid, ecm->chid, ecm->pid, ecm->srvid, ecm->ecmlen, ecmd5hex, csphash, cwhex, 0,
|
|
((ecm->selected_reader && ecm->selected_reader->currenthops) ? ecm->selected_reader->currenthops : 0), payload, tier);
|
|
}
|