static void hkeysCommand(redisClient *c);
static void hvalsCommand(redisClient *c);
static void hgetallCommand(redisClient *c);
+static void hexistsCommand(redisClient *c);
/*================================= Globals ================================= */
{"hkeys",hkeysCommand,2,REDIS_CMD_INLINE,1,1,1},
{"hvals",hvalsCommand,2,REDIS_CMD_INLINE,1,1,1},
{"hgetall",hgetallCommand,2,REDIS_CMD_INLINE,1,1,1},
+ {"hexists",hexistsCommand,3,REDIS_CMD_BULK,1,1,1},
{"incrby",incrbyCommand,3,REDIS_CMD_INLINE|REDIS_CMD_DENYOOM,1,1,1},
{"decrby",decrbyCommand,3,REDIS_CMD_INLINE|REDIS_CMD_DENYOOM,1,1,1},
{"getset",getsetCommand,3,REDIS_CMD_BULK|REDIS_CMD_DENYOOM,1,1,1},
return size1 - size2;
}
+ #define REDIS_AGGR_SUM 1
+ #define REDIS_AGGR_MIN 2
+ #define REDIS_AGGR_MAX 3
+
+ inline static void zunionInterAggregate(double *target, double val, int aggregate) {
+ if (aggregate == REDIS_AGGR_SUM) {
+ *target = *target + val;
+ } else if (aggregate == REDIS_AGGR_MIN) {
+ *target = val < *target ? val : *target;
+ } else if (aggregate == REDIS_AGGR_MAX) {
+ *target = val > *target ? val : *target;
+ } else {
+ /* safety net */
+ redisAssert(0 != 0);
+ }
+ }
+
static void zunionInterGenericCommand(redisClient *c, robj *dstkey, int op) {
int i, j, zsetnum;
+ int aggregate = REDIS_AGGR_SUM;
zsetopsrc *src;
robj *dstobj;
zset *dstzset;
/* parse optional extra arguments */
if (j < c->argc) {
- int remaining = c->argc-j;
+ int remaining = c->argc - j;
while (remaining) {
- if (!strcasecmp(c->argv[j]->ptr,"weights")) {
+ if (remaining >= (zsetnum + 1) && !strcasecmp(c->argv[j]->ptr,"weights")) {
j++; remaining--;
- if (remaining < zsetnum) {
- zfree(src);
- addReplySds(c,sdsnew("-ERR not enough weights for ZUNION/ZINTER\r\n"));
- return;
- }
for (i = 0; i < zsetnum; i++, j++, remaining--) {
src[i].weight = strtod(c->argv[j]->ptr, NULL);
}
+ } else if (remaining >= 2 && !strcasecmp(c->argv[j]->ptr,"aggregate")) {
+ j++; remaining--;
+ if (!strcasecmp(c->argv[j]->ptr,"sum")) {
+ aggregate = REDIS_AGGR_SUM;
+ } else if (!strcasecmp(c->argv[j]->ptr,"min")) {
+ aggregate = REDIS_AGGR_MIN;
+ } else if (!strcasecmp(c->argv[j]->ptr,"max")) {
+ aggregate = REDIS_AGGR_MAX;
+ } else {
+ zfree(src);
+ addReply(c,shared.syntaxerr);
+ return;
+ }
+ j++; remaining--;
} else {
zfree(src);
addReply(c,shared.syntaxerr);
}
}
+ /* sort sets from the smallest to largest, this will improve our
+ * algorithm's performance */
+ qsort(src,zsetnum,sizeof(zsetopsrc), qsortCompareZsetopsrcByCardinality);
+
dstobj = createZsetObject();
dstzset = dstobj->ptr;
if (op == REDIS_OP_INTER) {
- /* sort sets from the smallest to largest, this will improve our
- * algorithm's performance */
- qsort(src,zsetnum,sizeof(zsetopsrc), qsortCompareZsetopsrcByCardinality);
-
/* skip going over all entries if the smallest zset is NULL or empty */
if (src[0].dict && dictSize(src[0].dict) > 0) {
/* precondition: as src[0].dict is non-empty and the zsets are ordered
* from small to large, all src[i > 0].dict are non-empty too */
di = dictGetIterator(src[0].dict);
while((de = dictNext(di)) != NULL) {
- double *score = zmalloc(sizeof(double));
- *score = 0.0;
+ double *score = zmalloc(sizeof(double)), value;
+ *score = src[0].weight * (*(double*)dictGetEntryVal(de));
- for (j = 0; j < zsetnum; j++) {
- dictEntry *other = (j == 0) ? de : dictFind(src[j].dict,dictGetEntryKey(de));
+ for (j = 1; j < zsetnum; j++) {
+ dictEntry *other = dictFind(src[j].dict,dictGetEntryKey(de));
if (other) {
- *score = *score + src[j].weight * (*(double*)dictGetEntryVal(other));
+ value = src[j].weight * (*(double*)dictGetEntryVal(other));
+ zunionInterAggregate(score, value, aggregate);
} else {
break;
}
/* skip key when already processed */
if (dictFind(dstzset->dict,dictGetEntryKey(de)) != NULL) continue;
- double *score = zmalloc(sizeof(double));
- *score = 0.0;
- for (j = 0; j < zsetnum; j++) {
- if (!src[j].dict) continue;
+ double *score = zmalloc(sizeof(double)), value;
+ *score = src[i].weight * (*(double*)dictGetEntryVal(de));
- dictEntry *other = (i == j) ? de : dictFind(src[j].dict,dictGetEntryKey(de));
+ /* because the zsets are sorted by size, its only possible
+ * for sets at larger indices to hold this entry */
+ for (j = (i+1); j < zsetnum; j++) {
+ dictEntry *other = dictFind(src[j].dict,dictGetEntryKey(de));
if (other) {
- *score = *score + src[j].weight * (*(double*)dictGetEntryVal(other));
+ value = src[j].weight * (*(double*)dictGetEntryVal(other));
+ zunionInterAggregate(score, value, aggregate);
}
}
tryObjectEncoding(c->argv[2]);
/* note that c->argv[3] is already encoded, as the latest arg
* of a bulk command is always integer encoded if possible. */
- if (dictAdd(o->ptr,c->argv[2],c->argv[3]) == DICT_OK) {
+ if (dictReplace(o->ptr,c->argv[2],c->argv[3])) {
incrRefCount(c->argv[2]);
} else {
update = 1;
genericHgetallCommand(c,REDIS_GETALL_KEYS|REDIS_GETALL_VALS);
}
+static void hexistsCommand(redisClient *c) {
+ robj *o;
+ int exists = 0;
+
+ if ((o = lookupKeyReadOrReply(c,c->argv[1],shared.czero)) == NULL ||
+ checkType(c,o,REDIS_HASH)) return;
+
+ if (o->encoding == REDIS_ENCODING_ZIPMAP) {
+ robj *field;
+ unsigned char *zm = o->ptr;
+
+ field = getDecodedObject(c->argv[2]);
+ exists = zipmapExists(zm,field->ptr,sdslen(field->ptr));
+ decrRefCount(field);
+ } else {
+ exists = dictFind(o->ptr,c->argv[2]) != NULL;
+ }
+ addReply(c,exists ? shared.cone : shared.czero);
+}
+
static void convertToRealHash(robj *o) {
unsigned char *key, *val, *p, *zm = o->ptr;
unsigned int klen, vlen;
list [$r zunion zsetc 2 zseta zsetb weights 2 3] [$r zrange zsetc 0 -1 withscores]
} {4 {a 2 b 7 d 9 c 12}}
+ test {ZUNION with AGGREGATE MIN} {
+ list [$r zunion zsetc 2 zseta zsetb aggregate min] [$r zrange zsetc 0 -1 withscores]
+ } {4 {a 1 b 1 c 2 d 3}}
+
+ test {ZUNION with AGGREGATE MAX} {
+ list [$r zunion zsetc 2 zseta zsetb aggregate max] [$r zrange zsetc 0 -1 withscores]
+ } {4 {a 1 b 2 c 3 d 3}}
+
test {ZINTER basics} {
list [$r zinter zsetc 2 zseta zsetb] [$r zrange zsetc 0 -1 withscores]
} {2 {b 3 c 5}}
list [$r zinter zsetc 2 zseta zsetb weights 2 3] [$r zrange zsetc 0 -1 withscores]
} {2 {b 7 c 12}}
+ test {ZINTER with AGGREGATE MIN} {
+ list [$r zinter zsetc 2 zseta zsetb aggregate min] [$r zrange zsetc 0 -1 withscores]
+ } {2 {b 1 c 2}}
+
+ test {ZINTER with AGGREGATE MAX} {
+ list [$r zinter zsetc 2 zseta zsetb aggregate max] [$r zrange zsetc 0 -1 withscores]
+ } {2 {b 2 c 3}}
+
test {SORT against sorted sets} {
$r del zset
$r zadd zset 1 a
set _ $err
} {}
+ test {HSET in update and insert mode} {
+ set rv {}
+ set k [lindex [array names smallhash *] 0]
+ lappend rv [$r hset smallhash $k newval1]
+ set smallhash($k) newval1
+ lappend rv [$r hget smallhash $k]
+ lappend rv [$r hset smallhash __foobar123__ newval]
+ set k [lindex [array names bighash *] 0]
+ lappend rv [$r hset bighash $k newval2]
+ set bighash($k) newval2
+ lappend rv [$r hget bighash $k]
+ lappend rv [$r hset bighash __foobar123__ newval]
+ lappend rv [$r hdel smallhash __foobar123__]
+ lappend rv [$r hdel bighash __foobar123__]
+ set _ $rv
+ } {0 newval1 1 0 newval2 1 1 1}
+
+ test {HGET against non existing key} {
+ set rv {}
+ lappend rv [$r hget smallhash __123123123__]
+ lappend rv [$r hget bighash __123123123__]
+ set _ $rv
+ } {{} {}}
+
+ test {HKEYS - small hash} {
+ lsort [$r hkeys smallhash]
+ } [lsort [array names smallhash *]]
+
+ test {HKEYS - big hash} {
+ lsort [$r hkeys bighash]
+ } [lsort [array names bighash *]]
+
+ test {HVALS - small hash} {
+ set vals {}
+ foreach {k v} [array get smallhash] {
+ lappend vals $v
+ }
+ set _ [lsort $vals]
+ } [lsort [$r hvals smallhash]]
+
+ test {HVALS - big hash} {
+ set vals {}
+ foreach {k v} [array get bighash] {
+ lappend vals $v
+ }
+ set _ [lsort $vals]
+ } [lsort [$r hvals bighash]]
+
+ test {HGETALL - small hash} {
+ lsort [$r hgetall smallhash]
+ } [lsort [array get smallhash]]
+
+ test {HGETALL - big hash} {
+ lsort [$r hgetall bighash]
+ } [lsort [array get bighash]]
+
+ test {HDEL and return value} {
+ set rv {}
+ lappend rv [$r hdel smallhash nokey]
+ lappend rv [$r hdel bighash nokey]
+ set k [lindex [array names smallhash *] 0]
+ lappend rv [$r hdel smallhash $k]
+ lappend rv [$r hdel smallhash $k]
+ lappend rv [$r hget smallhash $k]
+ unset smallhash($k)
+ set k [lindex [array names bighash *] 0]
+ lappend rv [$r hdel bighash $k]
+ lappend rv [$r hdel bighash $k]
+ lappend rv [$r hget bighash $k]
+ unset bighash($k)
+ set _ $rv
+ } {0 0 1 0 {} 1 0 {}}
+
+ test {HEXISTS} {
+ set rv {}
+ set k [lindex [array names smallhash *] 0]
+ lappend rv [$r hexists smallhash $k]
+ lappend rv [$r hexists smallhash nokey]
+ set k [lindex [array names bighash *] 0]
+ lappend rv [$r hexists bighash $k]
+ lappend rv [$r hexists bighash nokey]
+ } {1 0 1 0}
+
+ test {Is a zipmap encoded Hash promoted on big payload?} {
+ $r hset smallhash foo [string repeat a 1024]
+ $r debug object smallhash
+ } {*hashtable*}
+
+ # TODO:
+ # Randomized test, small and big
+ # .rdb / AOF consistency test should include hashes
+
test {EXPIRE - don't set timeouts multiple times} {
$r set x foobar
set v1 [$r expire x 5]