2019-07-17 22:25:42 +00:00
package encryption
2018-09-24 18:07:34 +00:00
import (
2024-02-29 09:51:38 +00:00
"context"
2018-09-24 18:07:34 +00:00
"crypto/ecdsa"
"database/sql"
2023-10-12 15:45:23 +00:00
"errors"
2018-11-06 08:05:32 +00:00
"strings"
2018-09-24 18:07:34 +00:00
dr "github.com/status-im/doubleratchet"
2020-01-02 09:10:19 +00:00
2019-11-23 17:57:05 +00:00
"github.com/status-im/status-go/eth-node/crypto"
2019-07-03 19:13:11 +00:00
2019-11-21 16:19:22 +00:00
"github.com/status-im/status-go/protocol/encryption/multidevice"
2018-09-24 18:07:34 +00:00
)
2019-07-17 22:25:42 +00:00
// RatchetInfo holds the current ratchet state.
type RatchetInfo struct {
ID [ ] byte
Sk [ ] byte
PrivateKey [ ] byte
PublicKey [ ] byte
Identity [ ] byte
BundleID [ ] byte
EphemeralKey [ ] byte
InstallationID string
2018-09-24 18:07:34 +00:00
}
2019-07-17 22:25:42 +00:00
// A safe max number of rows.
const maxNumberOfRows = 100000000
2018-11-28 11:34:39 +00:00
2019-07-17 22:25:42 +00:00
type sqlitePersistence struct {
DB * sql . DB
keysStorage dr . KeysStorage
sessionStorage dr . SessionStorage
2018-09-24 18:07:34 +00:00
}
2019-07-17 22:25:42 +00:00
func newSQLitePersistence ( db * sql . DB ) * sqlitePersistence {
return & sqlitePersistence {
DB : db ,
keysStorage : newSQLiteKeysStorage ( db ) ,
sessionStorage : newSQLiteSessionStorage ( db ) ,
2018-09-24 18:07:34 +00:00
}
}
// GetKeysStorage returns the associated double ratchet KeysStorage object
2019-07-17 22:25:42 +00:00
func ( s * sqlitePersistence ) KeysStorage ( ) dr . KeysStorage {
2018-09-24 18:07:34 +00:00
return s . keysStorage
}
// GetSessionStorage returns the associated double ratchet SessionStorage object
2019-07-17 22:25:42 +00:00
func ( s * sqlitePersistence ) SessionStorage ( ) dr . SessionStorage {
2018-09-24 18:07:34 +00:00
return s . sessionStorage
}
// AddPrivateBundle adds the specified BundleContainer to the database
2019-07-17 22:25:42 +00:00
func ( s * sqlitePersistence ) AddPrivateBundle ( bc * BundleContainer ) error {
2019-06-03 14:29:14 +00:00
tx , err := s . DB . Begin ( )
2018-09-24 18:07:34 +00:00
if err != nil {
return err
}
2018-11-06 08:05:32 +00:00
for installationID , signedPreKey := range bc . GetBundle ( ) . GetSignedPreKeys ( ) {
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
var version uint32
2018-11-28 11:34:39 +00:00
stmt , err := tx . Prepare ( ` SELECT version
FROM bundles
WHERE installation_id = ? AND identity = ?
ORDER BY version DESC
LIMIT 1 ` )
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
if err != nil {
return err
}
defer stmt . Close ( )
2018-11-06 08:05:32 +00:00
err = stmt . QueryRow ( installationID , bc . GetBundle ( ) . GetIdentity ( ) ) . Scan ( & version )
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
if err != nil && err != sql . ErrNoRows {
return err
}
2018-11-28 11:34:39 +00:00
stmt , err = tx . Prepare ( ` INSERT INTO bundles ( identity , private_key , signed_pre_key , installation_id , version , timestamp )
VALUES ( ? , ? , ? , ? , ? , ? ) ` )
2018-09-24 18:07:34 +00:00
if err != nil {
return err
}
defer stmt . Close ( )
_ , err = stmt . Exec (
2018-11-06 08:05:32 +00:00
bc . GetBundle ( ) . GetIdentity ( ) ,
bc . GetPrivateSignedPreKey ( ) ,
2018-09-24 18:07:34 +00:00
signedPreKey . GetSignedPreKey ( ) ,
installationID ,
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
version + 1 ,
2018-11-06 08:05:32 +00:00
bc . GetBundle ( ) . GetTimestamp ( ) ,
2018-09-24 18:07:34 +00:00
)
if err != nil {
_ = tx . Rollback ( )
return err
}
}
if err := tx . Commit ( ) ; err != nil {
_ = tx . Rollback ( )
return err
}
return nil
}
// AddPublicBundle adds the specified Bundle to the database
2019-07-17 22:25:42 +00:00
func ( s * sqlitePersistence ) AddPublicBundle ( b * Bundle ) error {
2019-06-03 14:29:14 +00:00
tx , err := s . DB . Begin ( )
2018-09-24 18:07:34 +00:00
if err != nil {
return err
}
for installationID , signedPreKeyContainer := range b . GetSignedPreKeys ( ) {
signedPreKey := signedPreKeyContainer . GetSignedPreKey ( )
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
version := signedPreKeyContainer . GetVersion ( )
2018-11-28 11:34:39 +00:00
insertStmt , err := tx . Prepare ( ` INSERT INTO bundles ( identity , signed_pre_key , installation_id , version , timestamp )
VALUES ( ? , ? , ? , ? , ? ) ` )
2018-09-24 18:07:34 +00:00
if err != nil {
return err
}
defer insertStmt . Close ( )
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
2018-09-24 18:07:34 +00:00
_ , err = insertStmt . Exec (
b . GetIdentity ( ) ,
signedPreKey ,
installationID ,
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
version ,
2018-11-06 08:05:32 +00:00
b . GetTimestamp ( ) ,
2018-09-24 18:07:34 +00:00
)
if err != nil {
_ = tx . Rollback ( )
return err
}
// Mark old bundles as expired
2018-11-28 11:34:39 +00:00
updateStmt , err := tx . Prepare ( ` UPDATE bundles
SET expired = 1
WHERE identity = ? AND installation_id = ? AND version < ? ` )
2018-09-24 18:07:34 +00:00
if err != nil {
return err
}
defer updateStmt . Close ( )
_ , err = updateStmt . Exec (
b . GetIdentity ( ) ,
installationID ,
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
version ,
2018-09-24 18:07:34 +00:00
)
if err != nil {
_ = tx . Rollback ( )
return err
}
}
return tx . Commit ( )
}
// GetAnyPrivateBundle retrieves any bundle from the database containing a private key
2019-07-17 22:25:42 +00:00
func ( s * sqlitePersistence ) GetAnyPrivateBundle ( myIdentityKey [ ] byte , installations [ ] * multidevice . Installation ) ( * BundleContainer , error ) {
2018-11-06 08:05:32 +00:00
2019-05-23 07:54:28 +00:00
versions := make ( map [ string ] uint32 )
2018-11-06 08:05:32 +00:00
/* #nosec */
2018-11-28 11:34:39 +00:00
statement := ` SELECT identity , private_key , signed_pre_key , installation_id , timestamp , version
FROM bundles
2019-05-23 07:54:28 +00:00
WHERE expired = 0 AND identity = ? AND installation_id IN ( ? ` + strings . Repeat ( ",?" , len ( installations ) - 1 ) + ")"
2019-06-03 14:29:14 +00:00
stmt , err := s . DB . Prepare ( statement )
2018-09-24 18:07:34 +00:00
if err != nil {
return nil , err
}
defer stmt . Close ( )
var timestamp int64
var identity [ ] byte
2018-10-16 10:31:05 +00:00
var privateKey [ ] byte
2018-11-28 11:34:39 +00:00
var version uint32
2018-09-24 18:07:34 +00:00
2019-05-23 07:54:28 +00:00
args := make ( [ ] interface { } , len ( installations ) + 1 )
2018-11-06 08:05:32 +00:00
args [ 0 ] = myIdentityKey
2019-05-23 07:54:28 +00:00
for i , installation := range installations {
// Lookup up map for versions
versions [ installation . ID ] = installation . Version
args [ i + 1 ] = installation . ID
2018-11-06 08:05:32 +00:00
}
rows , err := stmt . Query ( args ... )
2018-09-24 18:07:34 +00:00
rowCount := 0
if err != nil {
return nil , err
}
defer rows . Close ( )
2019-07-17 22:25:42 +00:00
bundle := & Bundle {
SignedPreKeys : make ( map [ string ] * SignedPreKey ) ,
2018-09-24 18:07:34 +00:00
}
2019-07-17 22:25:42 +00:00
bundleContainer := & BundleContainer {
2018-10-16 10:31:05 +00:00
Bundle : bundle ,
}
2018-09-24 18:07:34 +00:00
for rows . Next ( ) {
var signedPreKey [ ] byte
var installationID string
rowCount ++
err = rows . Scan (
& identity ,
2018-10-16 10:31:05 +00:00
& privateKey ,
2018-09-24 18:07:34 +00:00
& signedPreKey ,
& installationID ,
& timestamp ,
2018-11-28 11:34:39 +00:00
& version ,
2018-09-24 18:07:34 +00:00
)
if err != nil {
return nil , err
}
2018-10-16 10:31:05 +00:00
// If there is a private key, we set the timestamp of the bundle container
if privateKey != nil {
2018-11-06 08:05:32 +00:00
bundle . Timestamp = timestamp
2018-10-16 10:31:05 +00:00
}
2018-09-24 18:07:34 +00:00
2019-07-17 22:25:42 +00:00
bundle . SignedPreKeys [ installationID ] = & SignedPreKey {
2019-05-23 07:54:28 +00:00
SignedPreKey : signedPreKey ,
Version : version ,
ProtocolVersion : versions [ installationID ] ,
}
2018-09-24 18:07:34 +00:00
bundle . Identity = identity
}
2018-10-16 10:31:05 +00:00
// If no records are found or no record with private key, return nil
2018-11-06 08:05:32 +00:00
if rowCount == 0 || bundleContainer . GetBundle ( ) . Timestamp == 0 {
2018-09-24 18:07:34 +00:00
return nil , nil
}
2018-10-16 10:31:05 +00:00
return bundleContainer , nil
2018-09-24 18:07:34 +00:00
}
// GetPrivateKeyBundle retrieves a private key for a bundle from the database
2019-07-17 22:25:42 +00:00
func ( s * sqlitePersistence ) GetPrivateKeyBundle ( bundleID [ ] byte ) ( [ ] byte , error ) {
2019-06-03 14:29:14 +00:00
stmt , err := s . DB . Prepare ( ` SELECT private_key
2018-11-28 11:34:39 +00:00
FROM bundles
2019-02-28 12:09:43 +00:00
WHERE signed_pre_key = ? LIMIT 1 ` )
2018-09-24 18:07:34 +00:00
if err != nil {
return nil , err
}
defer stmt . Close ( )
var privateKey [ ] byte
err = stmt . QueryRow ( bundleID ) . Scan ( & privateKey )
switch err {
case sql . ErrNoRows :
return nil , nil
case nil :
return privateKey , nil
default :
return nil , err
}
}
2018-11-06 08:05:32 +00:00
// MarkBundleExpired expires any private bundle for a given identity
2019-07-17 22:25:42 +00:00
func ( s * sqlitePersistence ) MarkBundleExpired ( identity [ ] byte ) error {
2019-06-03 14:29:14 +00:00
stmt , err := s . DB . Prepare ( ` UPDATE bundles
2018-11-28 11:34:39 +00:00
SET expired = 1
WHERE identity = ? AND private_key IS NOT NULL ` )
2018-09-24 18:07:34 +00:00
if err != nil {
return err
}
defer stmt . Close ( )
_ , err = stmt . Exec ( identity )
return err
}
// GetPublicBundle retrieves an existing Bundle for the specified public key from the database
2019-07-17 22:25:42 +00:00
func ( s * sqlitePersistence ) GetPublicBundle ( publicKey * ecdsa . PublicKey , installations [ ] * multidevice . Installation ) ( * Bundle , error ) {
2018-11-06 08:05:32 +00:00
2019-05-23 07:54:28 +00:00
if len ( installations ) == 0 {
2018-11-06 08:05:32 +00:00
return nil , nil
}
2018-09-24 18:07:34 +00:00
2019-05-23 07:54:28 +00:00
versions := make ( map [ string ] uint32 )
2018-09-24 18:07:34 +00:00
identity := crypto . CompressPubkey ( publicKey )
2018-11-06 08:05:32 +00:00
/* #nosec */
2018-11-28 11:34:39 +00:00
statement := ` SELECT signed_pre_key , installation_id , version
FROM bundles
2019-05-23 07:54:28 +00:00
WHERE expired = 0 AND identity = ? AND installation_id IN ( ? ` + strings.Repeat(",?", len(installations)-1) + ` )
2018-11-28 11:34:39 +00:00
ORDER BY version DESC `
2019-06-03 14:29:14 +00:00
stmt , err := s . DB . Prepare ( statement )
2018-09-24 18:07:34 +00:00
if err != nil {
return nil , err
}
defer stmt . Close ( )
2019-05-23 07:54:28 +00:00
args := make ( [ ] interface { } , len ( installations ) + 1 )
2018-11-06 08:05:32 +00:00
args [ 0 ] = identity
2019-05-23 07:54:28 +00:00
for i , installation := range installations {
// Lookup up map for versions
versions [ installation . ID ] = installation . Version
args [ i + 1 ] = installation . ID
2018-11-06 08:05:32 +00:00
}
rows , err := stmt . Query ( args ... )
2018-09-24 18:07:34 +00:00
rowCount := 0
if err != nil {
return nil , err
}
defer rows . Close ( )
2019-07-17 22:25:42 +00:00
bundle := & Bundle {
2018-09-24 18:07:34 +00:00
Identity : identity ,
2019-07-17 22:25:42 +00:00
SignedPreKeys : make ( map [ string ] * SignedPreKey ) ,
2018-09-24 18:07:34 +00:00
}
for rows . Next ( ) {
var signedPreKey [ ] byte
var installationID string
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
var version uint32
2018-09-24 18:07:34 +00:00
rowCount ++
err = rows . Scan (
& signedPreKey ,
& installationID ,
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
& version ,
2018-09-24 18:07:34 +00:00
)
if err != nil {
return nil , err
}
2019-07-17 22:25:42 +00:00
bundle . SignedPreKeys [ installationID ] = & SignedPreKey {
2019-05-23 07:54:28 +00:00
SignedPreKey : signedPreKey ,
Version : version ,
ProtocolVersion : versions [ installationID ] ,
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
}
2018-09-24 18:07:34 +00:00
}
if rowCount == 0 {
return nil , nil
}
return bundle , nil
}
// AddRatchetInfo persists the specified ratchet info into the database
2019-07-17 22:25:42 +00:00
func ( s * sqlitePersistence ) AddRatchetInfo ( key [ ] byte , identity [ ] byte , bundleID [ ] byte , ephemeralKey [ ] byte , installationID string ) error {
2019-06-03 14:29:14 +00:00
stmt , err := s . DB . Prepare ( ` INSERT INTO ratchet_info_v2 ( symmetric_key , identity , bundle_id , ephemeral_key , installation_id )
2018-11-28 11:34:39 +00:00
VALUES ( ? , ? , ? , ? , ? ) ` )
2018-09-24 18:07:34 +00:00
if err != nil {
return err
}
defer stmt . Close ( )
_ , err = stmt . Exec (
key ,
identity ,
bundleID ,
ephemeralKey ,
installationID ,
)
return err
}
// GetRatchetInfo retrieves the existing RatchetInfo for a specified bundle ID and interlocutor public key from the database
2019-07-17 22:25:42 +00:00
func ( s * sqlitePersistence ) GetRatchetInfo ( bundleID [ ] byte , theirIdentity [ ] byte , installationID string ) ( * RatchetInfo , error ) {
2019-06-03 14:29:14 +00:00
stmt , err := s . DB . Prepare ( ` SELECT ratchet_info_v2 . identity , ratchet_info_v2 . symmetric_key , bundles . private_key , bundles . signed_pre_key , ratchet_info_v2 . ephemeral_key , ratchet_info_v2 . installation_id
2018-11-28 11:34:39 +00:00
FROM ratchet_info_v2 JOIN bundles ON bundle_id = signed_pre_key
WHERE ratchet_info_v2 . identity = ? AND ratchet_info_v2 . installation_id = ? AND bundle_id = ?
LIMIT 1 ` )
2018-09-24 18:07:34 +00:00
if err != nil {
return nil , err
}
defer stmt . Close ( )
ratchetInfo := & RatchetInfo {
BundleID : bundleID ,
}
2018-10-16 10:31:05 +00:00
err = stmt . QueryRow ( theirIdentity , installationID , bundleID ) . Scan (
2018-09-24 18:07:34 +00:00
& ratchetInfo . Identity ,
& ratchetInfo . Sk ,
& ratchetInfo . PrivateKey ,
& ratchetInfo . PublicKey ,
& ratchetInfo . EphemeralKey ,
& ratchetInfo . InstallationID ,
)
switch err {
case sql . ErrNoRows :
return nil , nil
case nil :
ratchetInfo . ID = append ( bundleID , [ ] byte ( ratchetInfo . InstallationID ) ... )
return ratchetInfo , nil
default :
return nil , err
}
}
// GetAnyRatchetInfo retrieves any existing RatchetInfo for a specified interlocutor public key from the database
2019-07-17 22:25:42 +00:00
func ( s * sqlitePersistence ) GetAnyRatchetInfo ( identity [ ] byte , installationID string ) ( * RatchetInfo , error ) {
2019-06-03 14:29:14 +00:00
stmt , err := s . DB . Prepare ( ` SELECT symmetric_key , bundles . private_key , signed_pre_key , bundle_id , ephemeral_key
2018-11-28 11:34:39 +00:00
FROM ratchet_info_v2 JOIN bundles ON bundle_id = signed_pre_key
WHERE expired = 0 AND ratchet_info_v2 . identity = ? AND ratchet_info_v2 . installation_id = ?
LIMIT 1 ` )
2018-09-24 18:07:34 +00:00
if err != nil {
return nil , err
}
defer stmt . Close ( )
ratchetInfo := & RatchetInfo {
Identity : identity ,
InstallationID : installationID ,
}
err = stmt . QueryRow ( identity , installationID ) . Scan (
& ratchetInfo . Sk ,
& ratchetInfo . PrivateKey ,
& ratchetInfo . PublicKey ,
& ratchetInfo . BundleID ,
& ratchetInfo . EphemeralKey ,
)
switch err {
case sql . ErrNoRows :
return nil , nil
case nil :
ratchetInfo . ID = append ( ratchetInfo . BundleID , [ ] byte ( installationID ) ... )
return ratchetInfo , nil
default :
return nil , err
}
}
// RatchetInfoConfirmed clears the ephemeral key in the RatchetInfo
// associated with the specified bundle ID and interlocutor identity public key
2019-07-17 22:25:42 +00:00
func ( s * sqlitePersistence ) RatchetInfoConfirmed ( bundleID [ ] byte , theirIdentity [ ] byte , installationID string ) error {
2019-06-03 14:29:14 +00:00
stmt , err := s . DB . Prepare ( ` UPDATE ratchet_info_v2
2018-11-28 11:34:39 +00:00
SET ephemeral_key = NULL
WHERE identity = ? AND bundle_id = ? AND installation_id = ? ` )
2018-09-24 18:07:34 +00:00
if err != nil {
return err
}
defer stmt . Close ( )
_ , err = stmt . Exec (
theirIdentity ,
bundleID ,
2018-10-16 10:31:05 +00:00
installationID ,
2018-09-24 18:07:34 +00:00
)
return err
}
2019-07-17 22:25:42 +00:00
type sqliteKeysStorage struct {
db * sql . DB
}
func newSQLiteKeysStorage ( db * sql . DB ) * sqliteKeysStorage {
return & sqliteKeysStorage {
db : db ,
}
}
2018-09-24 18:07:34 +00:00
// Get retrieves the message key for a specified public key and message number
2019-07-17 22:25:42 +00:00
func ( s * sqliteKeysStorage ) Get ( pubKey dr . Key , msgNum uint ) ( dr . Key , bool , error ) {
2019-11-04 10:08:22 +00:00
var key [ ] byte
2018-11-28 11:34:39 +00:00
stmt , err := s . db . Prepare ( ` SELECT message_key
FROM keys
WHERE public_key = ? AND msg_num = ?
LIMIT 1 ` )
2018-09-24 18:07:34 +00:00
if err != nil {
return key , false , err
}
defer stmt . Close ( )
2019-11-04 10:08:22 +00:00
err = stmt . QueryRow ( pubKey , msgNum ) . Scan ( & key )
2018-09-24 18:07:34 +00:00
switch err {
case sql . ErrNoRows :
return key , false , nil
case nil :
return key , true , nil
default :
return key , false , err
}
}
// Put stores a key with the specified public key, message number and message key
2019-07-17 22:25:42 +00:00
func ( s * sqliteKeysStorage ) Put ( sessionID [ ] byte , pubKey dr . Key , msgNum uint , mk dr . Key , seqNum uint ) error {
2018-11-28 11:34:39 +00:00
stmt , err := s . db . Prepare ( ` INSERT INTO keys ( session_id , public_key , msg_num , message_key , seq_num )
VALUES ( ? , ? , ? , ? , ? ) ` )
2018-09-24 18:07:34 +00:00
if err != nil {
return err
}
defer stmt . Close ( )
_ , err = stmt . Exec (
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
sessionID ,
2019-11-04 10:08:22 +00:00
pubKey ,
2018-09-24 18:07:34 +00:00
msgNum ,
2019-11-04 10:08:22 +00:00
mk ,
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
seqNum ,
2018-09-24 18:07:34 +00:00
)
return err
}
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
// DeleteOldMks caps remove any key < seq_num, included
2019-07-17 22:25:42 +00:00
func ( s * sqliteKeysStorage ) DeleteOldMks ( sessionID [ ] byte , deleteUntil uint ) error {
2018-11-28 11:34:39 +00:00
stmt , err := s . db . Prepare ( ` DELETE FROM keys
WHERE session_id = ? AND seq_num <= ? ` )
2018-09-24 18:07:34 +00:00
if err != nil {
return err
}
defer stmt . Close ( )
_ , err = stmt . Exec (
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
sessionID ,
deleteUntil ,
2018-09-24 18:07:34 +00:00
)
return err
}
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
// TruncateMks caps the number of keys to maxKeysPerSession deleting them in FIFO fashion
2019-07-17 22:25:42 +00:00
func ( s * sqliteKeysStorage ) TruncateMks ( sessionID [ ] byte , maxKeysPerSession int ) error {
2018-11-28 11:34:39 +00:00
stmt , err := s . db . Prepare ( ` DELETE FROM keys
WHERE rowid IN ( SELECT rowid FROM keys WHERE session_id = ? ORDER BY seq_num DESC LIMIT ? OFFSET ? ) ` )
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
if err != nil {
return err
}
defer stmt . Close ( )
_ , err = stmt . Exec (
sessionID ,
// We LIMIT to the max number of rows here, as OFFSET can't be used without a LIMIT
maxNumberOfRows ,
maxKeysPerSession ,
)
return err
}
// DeleteMk deletes the key with the specified public key and message key
2019-07-17 22:25:42 +00:00
func ( s * sqliteKeysStorage ) DeleteMk ( pubKey dr . Key , msgNum uint ) error {
2018-11-28 11:34:39 +00:00
stmt , err := s . db . Prepare ( ` DELETE FROM keys
WHERE public_key = ? AND msg_num = ? ` )
2018-09-24 18:07:34 +00:00
if err != nil {
return err
}
defer stmt . Close ( )
_ , err = stmt . Exec (
2019-11-04 10:08:22 +00:00
pubKey ,
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
msgNum ,
2018-09-24 18:07:34 +00:00
)
return err
}
// Count returns the count of keys with the specified public key
2019-07-17 22:25:42 +00:00
func ( s * sqliteKeysStorage ) Count ( pubKey dr . Key ) ( uint , error ) {
2018-11-28 11:34:39 +00:00
stmt , err := s . db . Prepare ( ` SELECT COUNT ( 1 )
FROM keys
WHERE public_key = ? ` )
2018-09-24 18:07:34 +00:00
if err != nil {
return 0 , err
}
defer stmt . Close ( )
var count uint
2019-11-04 10:08:22 +00:00
err = stmt . QueryRow ( pubKey ) . Scan ( & count )
2018-09-24 18:07:34 +00:00
if err != nil {
return 0 , err
}
return count , nil
}
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
// CountAll returns the count of keys with the specified public key
2019-07-17 22:25:42 +00:00
func ( s * sqliteKeysStorage ) CountAll ( ) ( uint , error ) {
2018-11-28 11:34:39 +00:00
stmt , err := s . db . Prepare ( ` SELECT COUNT ( 1 )
FROM keys ` )
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
if err != nil {
return 0 , err
}
defer stmt . Close ( )
var count uint
err = stmt . QueryRow ( ) . Scan ( & count )
if err != nil {
return 0 , err
}
return count , nil
}
2018-09-24 18:07:34 +00:00
// All returns nil
2019-11-04 10:08:22 +00:00
func ( s * sqliteKeysStorage ) All ( ) ( map [ string ] map [ uint ] dr . Key , error ) {
2018-09-24 18:07:34 +00:00
return nil , nil
}
2019-07-17 22:25:42 +00:00
type sqliteSessionStorage struct {
db * sql . DB
}
func newSQLiteSessionStorage ( db * sql . DB ) * sqliteSessionStorage {
return & sqliteSessionStorage {
db : db ,
}
}
2018-09-24 18:07:34 +00:00
// Save persists the specified double ratchet state
2019-07-17 22:25:42 +00:00
func ( s * sqliteSessionStorage ) Save ( id [ ] byte , state * dr . State ) error {
2019-11-04 10:08:22 +00:00
dhr := state . DHr
2018-09-24 18:07:34 +00:00
dhs := state . DHs
dhsPublic := dhs . PublicKey ( )
dhsPrivate := dhs . PrivateKey ( )
pn := state . PN
step := state . Step
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
keysCount := state . KeysCount
2018-09-24 18:07:34 +00:00
2019-11-04 10:08:22 +00:00
rootChainKey := state . RootCh . CK
2018-09-24 18:07:34 +00:00
2019-11-04 10:08:22 +00:00
sendChainKey := state . SendCh . CK
2018-09-24 18:07:34 +00:00
sendChainN := state . SendCh . N
2019-11-04 10:08:22 +00:00
recvChainKey := state . RecvCh . CK
2018-09-24 18:07:34 +00:00
recvChainN := state . RecvCh . N
2018-11-28 11:34:39 +00:00
stmt , err := s . db . Prepare ( ` INSERT INTO sessions ( id , dhr , dhs_public , dhs_private , root_chain_key , send_chain_key , send_chain_n , recv_chain_key , recv_chain_n , pn , step , keys_count )
VALUES ( ? , ? , ? , ? , ? , ? , ? , ? , ? , ? , ? , ? ) ` )
2018-09-24 18:07:34 +00:00
if err != nil {
return err
}
defer stmt . Close ( )
_ , err = stmt . Exec (
id ,
dhr ,
2019-11-04 10:08:22 +00:00
dhsPublic ,
dhsPrivate ,
2018-09-24 18:07:34 +00:00
rootChainKey ,
sendChainKey ,
sendChainN ,
recvChainKey ,
recvChainN ,
pn ,
step ,
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
keysCount ,
2018-09-24 18:07:34 +00:00
)
return err
}
// Load retrieves the double ratchet state for a given ID
2019-07-17 22:25:42 +00:00
func ( s * sqliteSessionStorage ) Load ( id [ ] byte ) ( * dr . State , error ) {
2018-11-28 11:34:39 +00:00
stmt , err := s . db . Prepare ( ` SELECT dhr , dhs_public , dhs_private , root_chain_key , send_chain_key , send_chain_n , recv_chain_key , recv_chain_n , pn , step , keys_count
FROM sessions
WHERE id = ? ` )
2018-09-24 18:07:34 +00:00
if err != nil {
return nil , err
}
defer stmt . Close ( )
var (
dhr [ ] byte
dhsPublic [ ] byte
dhsPrivate [ ] byte
rootChainKey [ ] byte
sendChainKey [ ] byte
sendChainN uint
recvChainKey [ ] byte
recvChainN uint
pn uint
step uint
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
keysCount uint
2018-09-24 18:07:34 +00:00
)
err = stmt . QueryRow ( id ) . Scan (
& dhr ,
& dhsPublic ,
& dhsPrivate ,
& rootChainKey ,
& sendChainKey ,
& sendChainN ,
& recvChainKey ,
& recvChainN ,
& pn ,
& step ,
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
& keysCount ,
2018-09-24 18:07:34 +00:00
)
switch err {
case sql . ErrNoRows :
return nil , nil
case nil :
2019-11-04 10:08:22 +00:00
state := dr . DefaultState ( rootChainKey )
2018-09-24 18:07:34 +00:00
state . PN = uint32 ( pn )
state . Step = step
Change handling of skipped/deleted keys & add version (#1261)
- Skipped keys
The purpose of limiting the number of skipped keys generated is to avoid a dos
attack whereby an attacker would send a large N, forcing the device to
compute all the keys between currentN..N .
Previously the logic for handling skipped keys was:
- If in the current receiving chain there are more than maxSkip keys,
throw an error
This is problematic as in long-lived session dropped/unreceived messages starts
piling up, eventually reaching the threshold (1000 dropped/unreceived
messages).
This logic has been changed to be more inline with signals spec, and now
it is:
- If N is > currentN + maxSkip, throw an error
The purpose of limiting the number of skipped keys stored is to avoid a dos
attack whereby an attacker would force us to store a large number of
keys, filling up our storage.
Previously the logic for handling old keys was:
- Once you have maxKeep ratchet steps, delete any key from
currentRatchet - maxKeep.
This, in combination with the maxSkip implementation, capped the number of stored keys to
maxSkip * maxKeep.
The logic has been changed to:
- Keep a maximum of MaxMessageKeysPerSession
and additionally we delete any key that has a sequence number <
currentSeqNum - maxKeep
- Version
We check now the version of the bundle so that when we get a bundle from
the same installationID with a higher version, we mark the previous
bundle as expired and use the new bundle the next time a message is sent
2018-11-05 19:00:04 +00:00
state . KeysCount = keysCount
2018-09-24 18:07:34 +00:00
2019-11-23 17:57:05 +00:00
state . DHs = crypto . DHPair {
2019-11-04 10:08:22 +00:00
PrvKey : dhsPrivate ,
PubKey : dhsPublic ,
2018-09-24 18:07:34 +00:00
}
2019-11-04 10:08:22 +00:00
state . DHr = dhr
2018-09-24 18:07:34 +00:00
2019-11-04 10:08:22 +00:00
state . SendCh . CK = sendChainKey
2018-09-24 18:07:34 +00:00
state . SendCh . N = uint32 ( sendChainN )
2019-11-04 10:08:22 +00:00
state . RecvCh . CK = recvChainKey
2018-09-24 18:07:34 +00:00
state . RecvCh . N = uint32 ( recvChainN )
return & state , nil
default :
return nil , err
}
}
2021-09-21 15:47:04 +00:00
type HRCache struct {
2023-10-12 15:45:23 +00:00
GroupID [ ] byte
KeyID [ ] byte
DeprecatedKeyID uint32
Key [ ] byte
Hash [ ] byte
SeqNo uint32
2021-09-21 15:47:04 +00:00
}
2023-11-29 17:21:21 +00:00
// GetHashRatchetCache retrieves a hash ratchet key by group ID and seqNo.
2021-09-21 15:47:04 +00:00
// If cache data with given seqNo (e.g. 0) is not found,
// then the query will return the cache data with the latest seqNo
2023-11-29 17:21:21 +00:00
func ( s * sqlitePersistence ) GetHashRatchetCache ( ratchet * HashRatchetKeyCompatibility , seqNo uint32 ) ( * HRCache , error ) {
2024-02-29 09:51:38 +00:00
tx , err := s . DB . BeginTx ( context . Background ( ) , & sql . TxOptions { } )
2021-09-21 15:47:04 +00:00
if err != nil {
return nil , err
}
2024-02-29 09:51:38 +00:00
defer func ( ) {
if err == nil {
err = tx . Commit ( )
return
}
// don't shadow original error
_ = tx . Rollback ( )
} ( )
2023-10-12 15:45:23 +00:00
2024-02-29 09:51:38 +00:00
var key , keyID [ ] byte
if ! ratchet . IsOldFormat ( ) {
keyID , err = ratchet . GetKeyID ( )
2023-10-12 15:45:23 +00:00
if err != nil {
return nil , err
}
}
2024-02-29 09:51:38 +00:00
err = tx . QueryRow ( "SELECT key FROM hash_ratchet_encryption WHERE key_id = ? OR deprecated_key_id = ?" , keyID , ratchet . DeprecatedKeyID ( ) ) . Scan ( & key )
if err == sql . ErrNoRows {
2023-10-29 08:04:01 +00:00
return nil , nil
2021-09-21 15:47:04 +00:00
}
2024-02-29 09:51:38 +00:00
if err != nil {
return nil , err
}
args := make ( [ ] interface { } , 0 )
args = append ( args , ratchet . GroupID )
args = append ( args , keyID )
args = append ( args , ratchet . DeprecatedKeyID ( ) )
var query string
if seqNo == 0 {
query = "SELECT seq_no, hash FROM hash_ratchet_encryption_cache WHERE group_id = ? AND (key_id = ? OR key_id = ?) ORDER BY seq_no DESC limit 1"
} else {
query = "SELECT seq_no, hash FROM hash_ratchet_encryption_cache WHERE group_id = ? AND (key_id = ? OR key_id = ?) AND seq_no == ? ORDER BY seq_no DESC limit 1"
args = append ( args , seqNo )
}
2021-09-21 15:47:04 +00:00
2024-02-29 09:51:38 +00:00
var hash [ ] byte
var seqNoPtr * uint32
err = tx . QueryRow ( query , args ... ) . Scan ( & seqNoPtr , & hash ) //nolint: ineffassign,staticcheck
2021-09-21 15:47:04 +00:00
switch err {
2024-02-29 09:51:38 +00:00
case sql . ErrNoRows , nil :
2023-10-29 08:04:01 +00:00
var seqNoResult uint32
if seqNoPtr == nil {
seqNoResult = 0
} else {
seqNoResult = * seqNoPtr
}
2023-10-12 15:45:23 +00:00
ratchet . Key = key
keyID , err := ratchet . GetKeyID ( )
if err != nil {
return nil , err
}
res := & HRCache {
KeyID : keyID ,
Key : key ,
Hash : hash ,
SeqNo : seqNoResult ,
}
2021-09-21 15:47:04 +00:00
return res , nil
default :
return nil , err
}
}
2023-10-12 15:45:23 +00:00
type HashRatchetKeyCompatibility struct {
GroupID [ ] byte
keyID [ ] byte
Timestamp uint64
Key [ ] byte
}
func ( h * HashRatchetKeyCompatibility ) DeprecatedKeyID ( ) uint32 {
return uint32 ( h . Timestamp )
}
func ( h * HashRatchetKeyCompatibility ) IsOldFormat ( ) bool {
return len ( h . keyID ) == 0 && len ( h . Key ) == 0
}
func ( h * HashRatchetKeyCompatibility ) GetKeyID ( ) ( [ ] byte , error ) {
if len ( h . keyID ) != 0 {
return h . keyID , nil
}
if len ( h . GroupID ) == 0 || h . Timestamp == 0 || len ( h . Key ) == 0 {
return nil , errors . New ( "could not create key" )
}
return generateHashRatchetKeyID ( h . GroupID , h . Timestamp , h . Key ) , nil
}
func ( h * HashRatchetKeyCompatibility ) GenerateNext ( ) ( * HashRatchetKeyCompatibility , error ) {
ratchet := & HashRatchetKeyCompatibility {
GroupID : h . GroupID ,
}
// Randomly generate a hash ratchet key
hrKey , err := crypto . GenerateKey ( )
if err != nil {
return nil , err
}
hrKeyBytes := crypto . FromECDSA ( hrKey )
if err != nil {
return nil , err
}
currentTime := GetCurrentTime ( )
if h . Timestamp < currentTime {
ratchet . Timestamp = bumpKeyID ( currentTime )
} else {
ratchet . Timestamp = h . Timestamp + 1
}
ratchet . Key = hrKeyBytes
_ , err = ratchet . GetKeyID ( )
if err != nil {
return nil , err
}
return ratchet , nil
}
2023-05-04 22:17:54 +00:00
// GetCurrentKeyForGroup retrieves a key ID for given group ID
2021-09-21 15:47:04 +00:00
// (with an assumption that key ids are shared in the group, and
// at any given time there is a single key used)
2023-10-12 15:45:23 +00:00
func ( s * sqlitePersistence ) GetCurrentKeyForGroup ( groupID [ ] byte ) ( * HashRatchetKeyCompatibility , error ) {
ratchet := & HashRatchetKeyCompatibility {
GroupID : groupID ,
}
2021-09-21 15:47:04 +00:00
2023-10-12 15:45:23 +00:00
stmt , err := s . DB . Prepare ( ` SELECT key_id , key_timestamp , key
2021-09-21 15:47:04 +00:00
FROM hash_ratchet_encryption
2023-10-12 15:45:23 +00:00
WHERE group_id = ? order by key_timestamp desc limit 1 ` )
2021-09-21 15:47:04 +00:00
if err != nil {
2023-10-12 15:45:23 +00:00
return nil , err
2021-09-21 15:47:04 +00:00
}
defer stmt . Close ( )
2023-10-12 15:45:23 +00:00
var keyID , key [ ] byte
var timestamp uint64
err = stmt . QueryRow ( groupID ) . Scan ( & keyID , & timestamp , & key )
2021-09-21 15:47:04 +00:00
switch err {
case sql . ErrNoRows :
2023-10-12 15:45:23 +00:00
return ratchet , nil
2021-09-21 15:47:04 +00:00
case nil :
2023-10-12 15:45:23 +00:00
ratchet . Key = key
ratchet . Timestamp = timestamp
_ , err = ratchet . GetKeyID ( )
if err != nil {
return nil , err
}
return ratchet , nil
2021-09-21 15:47:04 +00:00
default :
2023-10-12 15:45:23 +00:00
return nil , err
2021-09-21 15:47:04 +00:00
}
}
2023-10-12 15:45:23 +00:00
// GetKeysForGroup retrieves all key IDs for given group ID
func ( s * sqlitePersistence ) GetKeysForGroup ( groupID [ ] byte ) ( [ ] * HashRatchetKeyCompatibility , error ) {
2022-09-21 16:05:29 +00:00
2023-10-12 15:45:23 +00:00
var ratchets [ ] * HashRatchetKeyCompatibility
stmt , err := s . DB . Prepare ( ` SELECT key_id , key_timestamp , key
2022-09-21 16:05:29 +00:00
FROM hash_ratchet_encryption
2023-10-12 15:45:23 +00:00
WHERE group_id = ? order by key_timestamp desc ` )
2022-09-21 16:05:29 +00:00
if err != nil {
return nil , err
}
defer stmt . Close ( )
rows , err := stmt . Query ( groupID )
if err != nil {
return nil , err
}
for rows . Next ( ) {
2023-10-12 15:45:23 +00:00
ratchet := & HashRatchetKeyCompatibility { GroupID : groupID }
err := rows . Scan ( & ratchet . keyID , & ratchet . Timestamp , & ratchet . Key )
2022-09-21 16:05:29 +00:00
if err != nil {
return nil , err
}
2023-10-12 15:45:23 +00:00
ratchets = append ( ratchets , ratchet )
2022-09-21 16:05:29 +00:00
}
2023-10-12 15:45:23 +00:00
return ratchets , nil
2022-09-21 16:05:29 +00:00
}
2023-05-04 22:17:54 +00:00
// SaveHashRatchetKeyHash saves a hash ratchet key cache data
2021-09-21 15:47:04 +00:00
func ( s * sqlitePersistence ) SaveHashRatchetKeyHash (
2023-10-12 15:45:23 +00:00
ratchet * HashRatchetKeyCompatibility ,
2021-09-21 15:47:04 +00:00
hash [ ] byte ,
seqNo uint32 ,
) error {
2023-10-12 15:45:23 +00:00
stmt , err := s . DB . Prepare ( ` INSERT INTO hash_ratchet_encryption_cache ( group_id , key_id , hash , seq_no )
2021-09-21 15:47:04 +00:00
VALUES ( ? , ? , ? , ? ) ` )
if err != nil {
return err
}
defer stmt . Close ( )
2023-10-12 15:45:23 +00:00
keyID , err := ratchet . GetKeyID ( )
2022-11-07 17:30:00 +00:00
if err != nil {
return err
}
2023-10-12 15:45:23 +00:00
_ , err = stmt . Exec ( ratchet . GroupID , keyID , hash , seqNo )
2022-11-07 17:30:00 +00:00
2023-10-12 15:45:23 +00:00
return err
}
// SaveHashRatchetKey saves a hash ratchet key
func ( s * sqlitePersistence ) SaveHashRatchetKey ( ratchet * HashRatchetKeyCompatibility ) error {
stmt , err := s . DB . Prepare ( ` INSERT INTO hash_ratchet_encryption ( group_id , key_id , key_timestamp , deprecated_key_id , key )
VALUES ( ? , ? , ? , ? , ? ) ` )
2022-11-07 17:30:00 +00:00
if err != nil {
return err
}
defer stmt . Close ( )
2023-10-12 15:45:23 +00:00
keyID , err := ratchet . GetKeyID ( )
2021-09-21 15:47:04 +00:00
if err != nil {
return err
}
2023-10-12 15:45:23 +00:00
_ , err = stmt . Exec ( ratchet . GroupID , keyID , ratchet . Timestamp , ratchet . DeprecatedKeyID ( ) , ratchet . Key )
2021-09-21 15:47:04 +00:00
return err
}
2023-11-29 17:21:21 +00:00
func ( s * sqlitePersistence ) GetHashRatchetKeyByID ( keyID [ ] byte ) ( * HashRatchetKeyCompatibility , error ) {
ratchet := & HashRatchetKeyCompatibility {
keyID : keyID ,
}
err := s . DB . QueryRow ( `
SELECT group_id , key_timestamp , key
FROM hash_ratchet_encryption
WHERE key_id = ? ` , keyID ) . Scan ( & ratchet . GroupID , & ratchet . Timestamp , & ratchet . Key )
if err != nil {
if err == sql . ErrNoRows {
return nil , nil
}
return nil , err
}
return ratchet , nil
}