Sélectionner une révision Git
module_repo.c 27,38 Kio
#define _POSIX_C_SOURCE 200809L
#define __LKT_MODULE_MAIN_SOURCE__
#include <lektor/lktmodule.h>
#include "worker.h"
#include <pthread.h>
#include <errno.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <strings.h>
#include <limits.h>
#include <stdlib.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <time.h>
#include <stdarg.h>
/* Different kinds of updates */
#define REPO_UPDATE_KARA (1 << 1) /* Downloading or rescanning the bakabase */
#define REPO_UPDATE_FAV (1 << 2) /* Downloading the favorites */
#define REPO_UPDATE_TYPE_COUNT 2 /* Different kinds of download, for some sizes... */
/***********
* Globals *
***********/
static volatile unsigned int __curl_init = false;
/*********************
* Private structure *
*********************/
struct module_repo_internal;
struct kara {
int ignored_count;
int update_count;
volatile sqlite3 *db;
struct module_repo_internal *repo;
long id;
long unix_timestamp;
struct kara_metadata mdt;
char mkvpropedit[LKT_LINE_MAX];
char url[LKT_LINE_MAX];
char database_filepath[PATH_MAX];
char filename[PATH_MAX];
};
struct module_repo_internal {
/* Just the repo */
char *name;
char *base_url;
char *kara_dir;
char *get_all_json;
char *get_id_json;
char *get_id_file;
char *get_fav_json;
const uint64_t version;
/* Worker threads */
struct worker_pool workers;
pthread_mutex_t mtx; /* Protect the updating field */
// *INDENT-OFF*
volatile unsigned int updating : REPO_UPDATE_TYPE_COUNT; /* The correct size */
// *INDENT-ON*
/* The database and the queue */
struct queue *queue;
volatile sqlite3 *db;
/* Craft a filename for newly downloaded karas. Arguments:
* - str: the destination string, the prefix, i.e. the kara_dir, is already here
* - len: the length of the string, not its size (which is PATH_MAX)
* - kara: the kara structure, which contains all the data to create the filename */
void (*craft_filename)(char str[PATH_MAX], size_t, struct kara *);
};
struct __memory {
void *mem;
size_t size;
};
struct __file {
const char *path;
int fd;
};
/*********************
* Private functions *
*********************/
/* Recursive mkdir, where the last word of the string is a file, not a folder. */
static inline void
__mkdir(const char *dir, unsigned int umask)
{
/* TODO pour le Kubat du futur: include le umask dans la conf. */
if (umask == 0)
umask = 00700;
char tmp[PATH_MAX];
char *p = NULL;
safe_snprintf(tmp, sizeof(tmp) / sizeof(char), "%s", dir);
size_t len = strlen(tmp);
if (tmp[len - 1] == '/')
tmp[len - 1] = 0;
for (p = tmp + 1; *p; p++) {
if (*p == '/') {
*p = 0;
mkdir(tmp, umask);
*p = '/';
}
}
/* Do the final mkdir, because it's a folder. */
mkdir(tmp, umask);
}
static inline void
__craft_filename_obfuscate(char str[PATH_MAX], size_t len, struct kara *kara)
{
/* Obfuscate filename */
safe_snprintf(str + len, PATH_MAX - len, "%ld.mkv", kara->id);
}
static inline void
__craft_filename_non_obfuscate(char str[PATH_MAX], size_t len, struct kara *kara)
{
/* Not obfuscate filename, need to create directories, won't fail if not
* possible. The program will fail later, when write will be attempted. */
len += safe_snprintf(str + len, PATH_MAX - len, "%s/%s/%s/", kara->mdt.category,
kara->mdt.language, kara->mdt.author_name);
__mkdir(str, 0);
if (access(str, R_OK | W_OK))
LOG_ERROR("REPO", "No access in read / write for folder %s", str);
safe_snprintf(str + len, PATH_MAX - len, "%s - %s%d - %s.mkv", kara->mdt.source_name,
kara->mdt.song_type, kara->mdt.song_number, kara->mdt.song_name);
}
static inline void
__clean_file(struct __file *f)
{
if (f->fd) {
close(f->fd);
f->fd = -1;
}
}
static inline void
__clean_memory(struct __memory *m)
{
if (m->mem) {
free(m->mem);
m->mem = NULL;
m->size = 0;
}
}
static size_t
__write_mem(char *data, size_t size, size_t nmem, void *user)
{
size_t realsize = size * nmem;
struct __memory *mem = (struct __memory *) user;
void *ptr = realloc(mem->mem, mem->size + realsize);
RETURN_UNLESS(ptr, "Out of memory", 0);
mem->mem = ptr;
memcpy(((uint8_t *) mem->mem) + mem->size, data, realsize);
mem->size += realsize;
return realsize;
}
static size_t
__write_disk(char *data, size_t size, size_t nmem, void *user)
{
ssize_t realsize = size * nmem;
struct __file *file = (struct __file *) user;
RETURN_IF(write(file->fd, data, realsize) != realsize, "Failed to write", 0);
return realsize;
}
static int
__json_dl(const char *url, char **json)
{
RETURN_UNLESS(json, "Invalid argument", 1);
CURL *curl_handle;
CURLcode res;
struct curl_slist *headers = NULL;
int ret = 1;
struct __memory file = {
.mem = NULL,
.size = 0.
};
/* Only accept json file */
headers = curl_slist_append(headers, "Accept: application/json");
headers = curl_slist_append(headers, "Content-Type: application/json");
curl_handle = curl_easy_init();
curl_easy_setopt(curl_handle, CURLOPT_HTTPHEADER, headers);
curl_easy_setopt(curl_handle, CURLOPT_URL, url);
curl_easy_setopt(curl_handle, CURLOPT_WRITEFUNCTION, __write_mem);
curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, (void *) &file);
curl_easy_setopt(curl_handle, CURLOPT_USERAGENT, "libcurl-agent/1.0");
res = curl_easy_perform(curl_handle);
if (res != CURLE_OK) {
LOG_ERROR("CURL", "curl_easy_perform failed on url %s: %s", url, curl_easy_strerror(res));
goto err;
}
/* Check if recieved json is not empty */
if (file.size == 0) {
LOG_ERROR("CURL", "Gottent file is empty");
goto err;
}
*json = file.mem;
ret = 0;
err:
if (ret != 0)
__clean_memory(&file);
curl_easy_cleanup(curl_handle);
curl_slist_free_all(headers);
return ret;
}
static inline int
__download_kara(const char *url, const char *path, int override)
{
CURL *curl_handle;
char ret = 1;
errno = 0;
int fd = open(path, O_WRONLY | O_APPEND | O_CREAT | O_EXCL | O_NOFOLLOW, S_IRUSR | S_IWUSR);
retest:
if (fd < 0) {
if (errno == EEXIST && ! override) {
LOG_ERROR("REPO", "File '%s' already exists", path);
return 1;
}
else if (errno == EEXIST && override) {
if (unlink(path)) {
LOG_ERROR("REPO", "Failed to unlink file '%s'", path);
return 1;
}
override = false;
fd = open(path, O_WRONLY | O_CREAT | O_NOFOLLOW, S_IRUSR | S_IWUSR);
goto retest;
}
else {
LOG_ERROR("REPO", "Could not open file '%s'", path);
return 1;
}
}
/* TODO: Buffered writes */
struct __file file = {
.path = path,
.fd = fd,
};
curl_handle = curl_easy_init();
curl_easy_setopt(curl_handle, CURLOPT_URL, url);
curl_easy_setopt(curl_handle, CURLOPT_WRITEFUNCTION, __write_disk);
curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, (void *) &file);
curl_easy_setopt(curl_handle, CURLOPT_USERAGENT, "libcurl-agent/1.0");
if (CURLE_OK != (ret = curl_easy_perform(curl_handle))) {
LOG_ERROR("CURL", "curl_easy_perform failed: %s", curl_easy_strerror(ret));
goto err;
}
ret = 0;
err:
__clean_file(&file);
curl_easy_cleanup(curl_handle);
return ret;
}
static void
__handle_got_json_dl(struct kara *kara, int current_id)
{
/* Download the kara */
database_queue_current_kara(kara->db, NULL, ¤t_id);
if (current_id == (int) kara->id) {
LOG_WARN("REPO", "Update currently playing kara %d, skip it", current_id);
lkt_queue_send(kara->repo->queue, lkt_event_skip_current, NULL);
}
if (!database_update_add(kara->db, kara->filename, &kara->mdt, kara->id, false)) {
LOG_ERROR("REPO", "Could not add unavailable kara %ld to db", kara->id);
return;
}
safe_snprintf(kara->url, LKT_LINE_MAX, kara->repo->get_id_file, kara->id);
if (__download_kara(kara->url, kara->filename, true)) {
LOG_WARN("REPO", "Could not download kara %ld at path '%s'",
kara->id, kara->filename);
return;
}
if (kara_metadata_write(&kara->mdt, kara->filename, kara->mkvpropedit)) {
LOG_WARN("REPO", "Could not write metadata to kara '%ld' with path '%s'",
kara->id, kara->filename);
return;
}
if (!database_update_set_available(kara->db, kara->id)) {
LOG_WARN("REPO", "Could not set kara %ld available", kara->id);
return;
}
database_stamp(kara->db);
++(kara->update_count);
LOG_INFO("REPO", "Added kara %ld from repo %s, filepath is %s",
kara->id, kara->repo->name, kara->filename);
}
static void
__handle_got_json_internal_callback(const char *key, const char *val, int comp, void *user)
{
struct kara *kara = (struct kara *) user;
/* Get the fields */
if (!comp && key && val) {
#define __get_field_string(field) { \
if (STR_MATCH(#field, key)) { \
safe_strncpy(kara->mdt.field, val, LEKTOR_TAG_MAX); \
}}
#define __get_field_long_ex(field, json) { \
if (STR_MATCH(#json, key)) { \
kara->field = strtol(val, NULL, 0); \
}}
#define __get_field_long_mdt(field) __get_field_long_ex(mdt.field, field)
#define __get_field_long_kara(field) __get_field_long_ex(field, field)
__get_field_long_kara(id);
__get_field_long_kara(unix_timestamp);
__get_field_long_mdt(song_number);
__get_field_string(song_name);
__get_field_string(source_name);
__get_field_string(category);
__get_field_string(language);
__get_field_string(author_name);
__get_field_string(song_type);
#undef __get_field_long_mdt
#undef __get_field_long_kara
#undef __get_field_long_ex
#undef __get_field_string
}
/* The `void *user` is complete */
else if (comp) {
struct timespec time_sleep = {
.tv_sec = 0,
.tv_nsec = 100000000L,
}; /* Sleep for 0.1s */
nanosleep(&time_sleep, NULL); /* Sleep a bit, better for Hard drive */
long filestamp = 0;
int current_id = 0;
/* Timestamp and presence verification */
if (!database_get_kara_path(kara->db, kara->id, kara->database_filepath))
goto do_it;
/* Override calculated filename if it exists */
size_t db_fp_size = (strlen(kara->database_filepath) + 1) * sizeof(char);
memcpy(kara->filename, kara->database_filepath, db_fp_size);
filestamp = get_mtime(kara->filename);
if (!(filestamp > kara->unix_timestamp))
goto do_it;
else {
++(kara->ignored_count);
database_update_touch(kara->db, kara->id);
database_update_set_available(kara->db, kara->id);
LOG_DEBUG("REPO", "Ignore kara '%ld' with path '%s'", kara->id, kara->filename);
return;
}
do_it:
__handle_got_json_dl(kara, current_id);
}
else
LOG_ERROR("REPO", "Invalid call to this function, 'comp', 'key' and 'val' are null...");
}
static inline void
__handle_got_json(volatile sqlite3 *db, struct module_repo_internal *repo, const char *json)
{
size_t len = json_parse_get_count(json, 1);
struct kara kara = {
.repo = repo,
.db = db,
.ignored_count = 0,
.update_count = 0,
};
RETURN_UNLESS(len > 0, "Json invalid or array empty", NOTHING);
RETURN_UNLESS(database_config_get_text(db, "externals", "mkvpropedit",
kara.mkvpropedit, PATH_MAX),
"Can't get the mkvpropedit executable path", NOTHING);
/* Craft a folder path here, it will be used later */
size_t kara_dir_len = strlen(repo->kara_dir);
kara_dir_len = sizeof(char) * (kara_dir_len + 1);
memcpy(kara.database_filepath, repo->kara_dir, kara_dir_len);
if (kara.database_filepath[kara_dir_len - 1] != '/') {
strncat(kara.database_filepath, "/", PATH_MAX - 1);
kara.database_filepath[++kara_dir_len] = 0;
}
/* Handle the json */
LOG_INFO("REPO", "Starting to process json for repo %s", repo->name);
json_parse(json, 1, __handle_got_json_internal_callback, (void *) &kara);
LOG_INFO("REPO", "Updated %ld karas and ignored %ld karas, total is %ld",
kara.update_count, kara.ignored_count, len);
}
static inline void
__handle_deleted_kara(volatile sqlite3 *db)
{
size_t len, i;
int *kara_ids;
char filepath[PATH_MAX];
database_deleted_kara(db, &kara_ids, &len);
for (i = 0; i < len; ++i) {
if (!database_get_kara_path(db, kara_ids[i], filepath))
continue;
database_update_del(db, kara_ids[i]);
}
free(kara_ids);
}
static void *
__worker_update(void *__repo)
{
struct module_repo_internal *repo = __repo;
lkt_queue_send(repo->queue, lkt_event_db_updating, LKT_DB_UPDATING_PROGRESS);
GOTO_IF(pthread_mutex_lock(&(repo->mtx)), "Failed to lock", end_no_lock);
repo->updating &= REPO_UPDATE_KARA;
GOTO_IF(pthread_mutex_unlock(&(repo->mtx)), "Failed to unlock", end_no_lock);
char *json;
LOG_INFO("REPO", "Download kara list from %s (%s), directory is %s",
repo->name, repo->get_all_json, repo->kara_dir);
if (__json_dl(repo->get_all_json, &json)) {
LOG_ERROR("REPO", "Failed to get json, possibly no internet connexion or repo is down");
pthread_exit(NULL);
}
__handle_got_json(repo->db, repo, json);
LOG_INFO("REPO", "Finished to download and insert kara list");
free(json);
__handle_deleted_kara(repo->db);
LOG_INFO("REPO", "Finished to deal with deleted kara");
database_updated(repo->db);
GOTO_IF(pthread_mutex_lock(&(repo->mtx)), "Failed to lock", end_no_lock);
repo->updating &= (~ REPO_UPDATE_KARA);
GOTO_IF(pthread_mutex_unlock(&(repo->mtx)), "Failed to unlock", end_no_lock);
end_no_lock:
lkt_queue_send(repo->queue, lkt_event_db_updating, LKT_DB_UPDATING_FINISHED);
pthread_exit(NULL);
}
static void *
__worker_rescan(void *__repo)
{
struct module_repo_internal *repo = __repo;
char kara_prefix[LKT_LINE_MAX];
lkt_queue_send(repo->queue, lkt_event_db_updating, LKT_DB_UPDATING_PROGRESS);
if (!database_config_get_text(repo->db, "database", "kara_dir", kara_prefix, LKT_LINE_MAX)) {
LOG_ERROR("REPO", "Failed to get kara prefix from config");
pthread_exit(NULL);
}
GOTO_IF(pthread_mutex_lock(&(repo->mtx)), "Failed to lock", end_no_lock);
repo->updating &= REPO_UPDATE_KARA;
GOTO_IF(pthread_mutex_unlock(&(repo->mtx)), "Failed to unlock", end_no_lock);
database_update(repo->db, kara_prefix, 0);
/* Don't check timestamp. TODO: Sometimes we want to check them */
GOTO_IF(pthread_mutex_lock(&(repo->mtx)), "Failed to lock", end_no_lock);
repo->updating &= (~ REPO_UPDATE_KARA);
GOTO_IF(pthread_mutex_unlock(&(repo->mtx)), "Failed to unlock", end_no_lock);
end_no_lock:
lkt_queue_send(repo->queue, lkt_event_db_updating, LKT_DB_UPDATING_FINISHED);
pthread_exit(NULL);
}
static inline void
__handle_fav_list(struct module_repo_internal *repo, char *fav, size_t fav_size)
{
struct json_object *json, *item_json = NULL;
char fav_url[LKT_LINE_MAX];
char *fav_origin = NULL;
memset(fav_url, 0, LKT_LINE_MAX * sizeof(char));
strncat(fav_url, repo->get_fav_json, LKT_LINE_MAX - 1);
strncat(fav_url, fav, LKT_LINE_MAX - 1);
RETURN_IF(__json_dl(fav_url, &json), "Failed to download fav list", NOTHING);
/* Prepend by `@`, to diferentiate with playlists */
size_t fav_len = strlen(fav);
if (fav_size - 1 == fav_len) {
fav_origin = fav;
LOG_WARN("REPO", "Fav list has a name to big to prepend it by '@'. "
"Possible collision with other playlists");
} else {
memmove(fav + sizeof(char), fav, fav_len * sizeof(char));
fav_origin = fav + sizeof(char);
fav[0] = (char) ('@');
LOG_INFO("REPO", "Importing fav list '%s' as '%s'", fav_origin, fav);
}
database_plt_touch(repo->db, fav);
struct lkt_uri uri = {
.type = uri_id,
.is_int = true,
};
size_t len = json_object_array_length(json), i;
long id;
for (i = 0; i < len; ++i) {
item_json = json_object_array_get_idx(json, i);
if (item_json == NULL) {
LOG_ERROR("REPO", "There is no kara at index %ld in fav list %s", i, fav);
continue;
}
if (__safe_json_get_long(item_json, "id", &id)) {
LOG_ERROR("REPO", "Failed to get the id of the kara in fav list %s", fav_origin);
continue;
}
uri.id = id;
if (!database_plt_add_uri(repo->db, fav, &uri)) {
LOG_ERROR("REPO", "Failed to add kara %ld to playlist %s", id, fav);
continue;
}
}
LOG_INFO("REPO", "Finished importing fav list '%s' as '%s'", fav_origin, fav);
json_object_put(json);
}
static void *
__worker_import_favorites(void *__repo)
{
struct module_repo_internal *repo = __repo;
GOTO_IF(pthread_mutex_lock(&(repo->mtx)), "Failed to lock", end_no_lock);
repo->updating &= REPO_UPDATE_FAV;
GOTO_IF(pthread_mutex_unlock(&(repo->mtx)), "Failed to unlock", end_no_lock);
struct json_object *json, *item_json = NULL;
LOG_INFO("REPO", "Download favorite lists from %s (%s)", repo->name, repo->get_fav_json);
if (__json_dl(repo->get_fav_json, &json)) {
LOG_ERROR("REPO", "Failed to get json, possibly no internet connexion or repo is down");
pthread_exit(NULL);
}
LOG_INFO("REPO", "Finished to dl favorite lists");
size_t len = json_object_array_length(json), i;
char fav_name[LKT_LINE_MAX];
for (i = 0; i < len; ++i) {
item_json = json_object_array_get_idx(json, i);
if (item_json == NULL) {
LOG_ERROR("REPO", "There is no item with index %ld in fav list", i);
continue;
}
GOTO_IF(__safe_json_get_string(item_json, "pseudo", fav_name, LKT_LINE_MAX),
"Field 'pseudo' not found in json item", error);
/* TODO: Add a way to use the workers to do this for each fav list */
__handle_fav_list(repo, fav_name, LKT_LINE_MAX);
}
error:
json_object_put(json);
LOG_INFO("REPO", "Finished to deal with %ld favorite lists", len);
GOTO_IF(pthread_mutex_lock(&(repo->mtx)), "Failed to lock", end_no_lock);
repo->updating &= (~ REPO_UPDATE_FAV);
GOTO_IF(pthread_mutex_unlock(&(repo->mtx)), "Failed to unlock", end_no_lock);
end_no_lock:
pthread_exit(NULL);
}
/***********************************************
* Functions that will be wrapped and exported *
***********************************************/
static inline void
module_repo_close(struct module_repo_internal *repo)
{
LOG_INFO("REPO", "Waiting for workers to finish");
worker_pool_waitall(&repo->workers);
LOG_INFO("REPO", "All workers have finished");
}
static inline bool
module_repo_new(struct module_repo_internal *repo_, struct queue *queue, volatile sqlite3 *db)
{
RETURN_UNLESS(repo_ && queue && db, "Invalid argument", 1);
if (!__curl_init) {
curl_global_init(CURL_GLOBAL_ALL);
__curl_init = 1;
} else
++__curl_init;
int obfuscate;
if (!database_config_get_int(db, "repo", "obfuscate", &obfuscate)) {
LOG_ERROR("REPO", "Configuration invalid, but it should not be at this point");
exit(EXIT_FAILURE);
}
obfuscate = obfuscate ? 1 : 0; /* Safe values */
LOG_WARN("REPO", "Downloading base in %s mode, mode is number: %d",
obfuscate ? "obfuscation" : "clear", obfuscate);
if (!database_config_obfuscation_check(db, obfuscate))
LOG_WARN("REPO", "Database obfuscation mismatch. File names may be inconsistent");
struct module_repo_internal repo = {
.version = 1,
.queue = queue,
.db = db,
.updating = 0,
.name = safe_zero_malloc(LKT_LINE_MAX * sizeof(char)),
.kara_dir = safe_zero_malloc(PATH_MAX * sizeof(char)),
.get_id_json = safe_zero_malloc(LKT_LINE_MAX * sizeof(char)),
.get_id_file = safe_zero_malloc(LKT_LINE_MAX * sizeof(char)),
.get_fav_json = safe_zero_malloc(LKT_LINE_MAX * sizeof(char)),
.get_all_json = safe_zero_malloc(LKT_LINE_MAX * sizeof(char)),
.base_url = safe_zero_malloc(LKT_LINE_MAX * sizeof(char)),
.craft_filename = obfuscate ? __craft_filename_obfuscate : __craft_filename_non_obfuscate,
};
/* Copies */
if (!database_config_get_text(db, "database", "kara_dir", repo.kara_dir, PATH_MAX) ||
!database_config_get_text(db, "repo", "name", repo.name, LKT_LINE_MAX) ||
!database_config_get_text(db, "repo", "url", repo.base_url, LKT_LINE_MAX) ||
!database_config_get_text(db, "repo", "id_json", repo.get_id_json, LKT_LINE_MAX) ||
!database_config_get_text(db, "repo", "id_kara", repo.get_id_file, LKT_LINE_MAX) ||
!database_config_get_text(db, "repo", "fav_json", repo.get_fav_json, LKT_LINE_MAX) ||
!database_config_get_text(db, "repo", "json", repo.get_all_json, LKT_LINE_MAX) ) {
LOG_ERROR("REPO", "Configuration invalid, but it should not be at this point");
exit(EXIT_FAILURE);
}
memcpy(repo_, &repo, sizeof(struct module_repo_internal));
/* Init the worker only now ! */
int workers_count;
if (!database_config_get_int(db, "repo", "workers_count", &workers_count))
workers_count = 5;
if (worker_pool_new(&repo_->workers,
10 /* Initial number of elements in the call queue */,
workers_count /* Number of worker threads */)) {
LOG_ERROR("REPO", "Out of memory");
return false;
}
return true;
}
/********************
* Export functions *
********************/
static int
mod_new(va_list *va)
{
va_list copy;
struct module_repo_internal **repo;
va_copy(copy, *va);
repo = (struct module_repo_internal **) va_arg(copy, void **);
struct queue *queue = va_arg(copy, struct queue *);
volatile sqlite3 *db = va_arg(copy, volatile sqlite3 *);
if (NULL != *repo) {
LOG_ERROR("REPO", "Can't init two times the module");
return 1;
}
*repo = malloc(sizeof(struct module_repo_internal));
if (NULL == *repo) {
LOG_ERROR("REPO", "Out of memory");
return 1;
}
bool ret = module_repo_new(*repo, queue, db);
lkt_queue_make_available(queue, lkt_event_db_updating);
va_end(copy);
if (!ret)
LOG_ERROR("REPO", "Failed to create the module");
return ! ret;
}
static int
mod_close(va_list *va)
{
va_list copy;
struct module_repo_internal **repo;
va_copy(copy, *va);
repo = (struct module_repo_internal **) va_arg(copy, void **);
module_repo_close(*repo);
va_end(copy);
return 0;
}
static int
mod_free(va_list *va)
{
va_list copy;
struct module_repo_internal **repo;
va_copy(copy, *va);
repo = (struct module_repo_internal **) va_arg(copy, void **);
module_repo_close(*repo);
worker_pool_free(&(*repo)->workers);
--__curl_init;
if (!__curl_init)
curl_global_cleanup();
free((*repo)->kara_dir);
free((*repo)->get_id_json);
free((*repo)->get_id_file);
free((*repo)->base_url);
free((*repo)->get_all_json);
free((*repo)->get_fav_json);
LOG_INFO("REPO", "Repo module terminated");
va_end(copy);
return 0;
}
static int
mod_update(va_list *va)
{
va_list copy;
struct module_repo_internal **repo;
va_copy(copy, *va);
repo = (struct module_repo_internal **) va_arg(copy, void **);
int ret = 0;
GOTO_IF(pthread_mutex_lock(&(*repo)->mtx), "Failed to lock", end_no_lock);
if ((*repo)->updating & REPO_UPDATE_KARA) {
LOG_WARN("REPO", "Already updating");
goto end;
}
(*repo)->updating &= REPO_UPDATE_KARA;
if (worker_pool_push(&(*repo)->workers, __worker_update, (void *) *repo)) {
LOG_ERROR("REPO", "Out of memory");
ret = 1;
goto end;
}
LOG_INFO("REPO", "Update started (update)");
end:
GOTO_IF(pthread_mutex_unlock(&(*repo)->mtx), "Failed to unlock", end_no_lock);
end_no_lock:
va_end(copy);
return ret;
}
static int
mod_rescan(va_list *va)
{
va_list copy;
struct module_repo_internal **repo;
va_copy(copy, *va);
repo = (struct module_repo_internal **) va_arg(copy, void **);
int ret = 0;
GOTO_IF(pthread_mutex_lock(&(*repo)->mtx), "Failed to lock", end_no_lock);
if ((*repo)->updating & REPO_UPDATE_KARA) {
LOG_WARN("REPO", "Already updating");
goto end;
}
(*repo)->updating &= REPO_UPDATE_KARA;
if (worker_pool_push(&(*repo)->workers, __worker_rescan, (void *) *repo)) {
LOG_ERROR("REPO", "Out of memory");
ret = 1;
goto end;
}
LOG_INFO("REPO", "Update started (rescan)");
end:
GOTO_IF(pthread_mutex_unlock(&(*repo)->mtx), "Failed to unlock", end_no_lock);
end_no_lock:
va_end(copy);
return ret;
}
static int
mod_is_updating(va_list *va)
{
va_list copy;
struct module_repo_internal **repo;
int *ret;
va_copy(copy, *va);
repo = (struct module_repo_internal **) va_arg(copy, void **);
ret = va_arg(copy, int *);
if (ret == NULL)
return 1;
/* Don't differentiate kinds of downloads here */
*ret = (*repo)->updating;
va_end(copy);
return 0;
}
static int
mod_import(va_list *va)
{
va_list copy;
struct module_repo_internal **repo;
va_copy(copy, *va);
repo = (struct module_repo_internal **) va_arg(copy, void **);
int ret = 0;
GOTO_IF(pthread_mutex_lock(&(*repo)->mtx), "Failed to lock", end_no_lock);
if ((*repo)->updating & REPO_UPDATE_FAV) {
LOG_WARN("REPO", "Already importing favorites");
goto end;
}
(*repo)->updating &= REPO_UPDATE_FAV;
if (worker_pool_push(&(*repo)->workers, __worker_import_favorites, (void *) *repo)) {
LOG_ERROR("REPO", "Out of memory");
ret = 1;
goto end;
}
end:
GOTO_IF(pthread_mutex_unlock(&(*repo)->mtx), "Failed to unlock", end_no_lock);
end_no_lock:
va_end(copy);
return ret;
}
REG_BEGIN(repo_reg)
REG_ADD_NAMED("new", mod_new) /* Create the module */
REG_ADD_NAMED("free", mod_free) /* Close and free ressources */
REG_ADD_NAMED("close", mod_close) /* Stop the module */
REG_ADD_NAMED("update", mod_update) /* Update from kurisu */
REG_ADD_NAMED("rescan", mod_rescan) /* Rescan from hard drive */
REG_ADD_NAMED("import", mod_import) /* Import favorites from kurisu */
REG_ADD_NAMED("is_updating", mod_is_updating) /* Is the module updating the
* base, either the favorites
* or the bakabase */
REG_END()
#if ! defined (LKT_STATIC_MODULE)
REG_EXPORT(repo_reg)
#endif