/*
- * Copyright (c) 2018 Apple Inc. All Rights Reserved.
+ * Copyright (c) 2018 - 2020 Apple Inc. All Rights Reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
*/
import CloudKitCode
+import CloudKitCodeProtobuf
import CoreData
import Foundation
import os
let tplogDebug = OSLog(subsystem: "com.apple.security.trustedpeers", category: "debug")
let tplogTrace = OSLog(subsystem: "com.apple.security.trustedpeers", category: "trace")
-
let egoIdentitiesAccessGroup = "com.apple.security.egoIdentities"
+enum Viability {
+ case full
+ case partial
+ case none
+}
+
extension ResetReason {
static func from(cuttlefishResetReason: CuttlefishResetReason) -> ResetReason {
switch cuttlefishResetReason {
case .testGenerated:
return ResetReason.testGenerated
@unknown default:
- fatalError()
+ fatalError("unknown reset reason: \(cuttlefishResetReason)")
}
}
}
case failedToStoreSecret(errorCode: Int)
case unknownSecurityFoundationError
case failedToSerializeData
+ case unknownInternalError
+ case unknownSyncUserControllableViewsValue(value: Int32)
+ case noPeersPreapprovedBySelf
+ case peerRegisteredButNotStored(String)
}
extension ContainerError: LocalizedError {
return "SecurityFoundation returned an unknown type"
case .failedToSerializeData:
return "Failed to encode protobuf data"
+ case .unknownInternalError:
+ return "Internal code failed, but didn't return error"
+ case .unknownSyncUserControllableViewsValue(value: let value):
+ return "Unknown syncUserControllableViews number: \(value)"
+ case .noPeersPreapprovedBySelf:
+ return "No peers preapproved by the local peer"
+ case .peerRegisteredButNotStored(let s):
+ return "Peer \(s) not found in database"
}
}
}
extension ContainerError: CustomNSError {
-
public static var errorDomain: String {
return "com.apple.security.trustedpeers.container"
}
return 43
case .failedToSerializeData:
return 44
+ case .unknownInternalError:
+ return 45
+ case .unknownSyncUserControllableViewsValue:
+ return 46
+ case .noPeersPreapprovedBySelf:
+ return 47
+ case .peerRegisteredButNotStored:
+ return 48
}
}
}
func saveSecret(_ secret: Data, label: String) throws {
-
let query: [CFString: Any] = [
kSecClass: kSecClassInternetPassword,
kSecAttrAccessible: kSecAttrAccessibleWhenUnlocked,
func loadEgoKeys(peerID: String, resultHandler: @escaping (OctagonSelfPeerKeys?, Error?) -> Void) {
loadEgoKeyPair(identifier: signingKeyIdentifier(peerID: peerID)) { signingKey, error in
guard let signingKey = signingKey else {
- os_log("Unable to load signing key: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "error missing")
+ os_log("Unable to load signing key: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "error missing")
resultHandler(nil, error)
return
}
loadEgoKeyPair(identifier: encryptionKeyIdentifier(peerID: peerID)) { encryptionKey, error in
guard let encryptionKey = encryptionKey else {
- os_log("Unable to load encryption key: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "error missing")
+ os_log("Unable to load encryption key: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "error missing")
resultHandler(nil, error)
return
}
throw error
}
}
- }.compactMap { $0 }
+ }
+ .compactMap { $0 }
+}
+
+@discardableResult
+func extract(tlkShares: [CKKSTLKShare], peer: OctagonSelfPeerKeys, model: TPModel) -> (Int64, Int64) {
+ os_log("Attempting to recover %d TLK shares for peer %{public}@", log: tplogDebug, type: .default, tlkShares.count, peer.peerID)
+
+ return extract(tlkShares: tlkShares, peer: peer, sponsorPeerID: nil, model: model)
}
-func extract(tlkShares: [CKKSTLKShare], peer: CKKSSelfPeer) {
- os_log("Attempting to recover %d TLK shares for peer %@", log: tplogDebug, type: .default, tlkShares.count, peer.peerID)
+@discardableResult
+func extract(tlkShares: [CKKSTLKShare], peer: OctagonSelfPeerKeys, sponsorPeerID: String?, model: TPModel) -> (Int64, Int64) {
+ var tlksRecovered: Set<String> = Set()
+ var sharesRecovered: Int64 = 0
+
for share in tlkShares {
guard share.receiverPeerID == peer.peerID else {
os_log("Skipping %@ (wrong peerID)", log: tplogDebug, type: .default, share)
}
do {
- // TODO: how should we handle peer sets here?
+ var trustedPeers: [AnyHashable] = [peer]
+
+ if let egoPeer = model.peer(withID: sponsorPeerID ?? peer.peerID) {
+ egoPeer.trustedPeerIDs.forEach { trustedPeerID in
+ if let peer = model.peer(withID: trustedPeerID) {
+ let peerObj = CKKSActualPeer(peerID: trustedPeerID,
+ encryptionPublicKey: (peer.permanentInfo.encryptionPubKey as! _SFECPublicKey),
+ signing: (peer.permanentInfo.signingPubKey as! _SFECPublicKey),
+ viewList: [])
+
+ trustedPeers.append(peerObj)
+ } else {
+ os_log("No peer for trusted ID %{public}@", log: tplogDebug, type: .default, trustedPeerID)
+ }
+ }
+ } else {
+ os_log("No ego peer in model; no trusted peers", log: tplogDebug, type: .default)
+ }
+
let key = try share.recoverTLK(peer,
- trustedPeers: [peer as! AnyHashable],
+ trustedPeers: Set(trustedPeers),
ckrecord: nil)
try key.saveMaterialToKeychain()
+ tlksRecovered.insert(key.uuid)
+ sharesRecovered += 1
os_log("Recovered %@ (from %@)", log: tplogDebug, type: .default, key, share)
} catch {
- os_log("Failed to recover share %@: %@", log: tplogDebug, type: .default, share, error as CVarArg)
+ os_log("Failed to recover share %@: %{public}@", log: tplogDebug, type: .default, share, error as CVarArg)
}
}
+ return (Int64(tlksRecovered.count), sharesRecovered)
}
struct ContainerState {
var peers: [String: TPPeer] = [:]
var vouchers: [TPVoucher] = []
var bottles = Set<BottleMO>()
+ var escrowRecords = Set<EscrowRecordMO>()
var recoverySigningKey: Data?
var recoveryEncryptionKey: Data?
}
let osVersion: String?
let policyVersion: UInt64?
let policySecrets: [String: Data]?
- let recoverySigningPubKey: Data?
- var recoveryEncryptionPubKey: Data?
+ let setSyncUserControllableViews: TPPBPeerStableInfo_UserControllableViewStatus?
}
// CoreData doesn't handle creating an identical model from an identical URL. Help it out.
private var nsObjectModels: [URL: NSManagedObjectModel] = [:]
private let nsObjectModelsQueue = DispatchQueue(label: "com.apple.security.TrustedPeersHelper.nsObjectModels")
+
func getOrMakeModel(url: URL) -> NSManagedObjectModel {
return nsObjectModelsQueue.sync {
if let model = nsObjectModels[url] {
}
}
+extension ContainerMO {
+ func egoStableInfo() -> TPPeerStableInfo? {
+ guard let egoStableData = self.egoPeerStableInfo,
+ let egoStableSig = self.egoPeerStableInfoSig else {
+ return nil
+ }
+
+ return TPPeerStableInfo(data: egoStableData, sig: egoStableSig)
+ }
+}
+
/// This maps to a Cuttlefish service backed by a CloudKit container,
/// and a corresponding local Core Data persistent container.
///
// that queue.
internal let moc: NSManagedObjectContext
+ // To facilitate CoreData tear down, we need to keep the PersistentContainer around.
+ internal let persistentContainer: NSPersistentContainer
+
// Rather than Container having its own dispatch queue, we use moc's queue
// to synchronise access to our own state as well. So the following instance
// variables must only be accessed within blocks executed by calling
// moc.perform() or moc.performAndWait().
internal var containerMO: ContainerMO
internal var model: TPModel
+ internal var escrowCacheTimeout: TimeInterval
+
+ // Used in tests only. Set when an identity is prepared using a policy version override
+ internal var policyVersionOverride: TPPolicyVersion?
/**
Construct a Container.
// Set up Core Data stack
let url = Bundle(for: type(of: self)).url(forResource: "TrustedPeersHelper", withExtension: "momd")!
let mom = getOrMakeModel(url: url)
- let persistentContainer = NSPersistentContainer(name: "TrustedPeersHelper", managedObjectModel: mom)
- persistentContainer.persistentStoreDescriptions = [persistentStoreDescription]
+ self.persistentContainer = NSPersistentContainer(name: "TrustedPeersHelper", managedObjectModel: mom)
+ self.persistentContainer.persistentStoreDescriptions = [persistentStoreDescription]
- persistentContainer.loadPersistentStores { _, error in
+ self.persistentContainer.loadPersistentStores { _, error in
initError = error
}
if let initError = initError {
throw initError
}
- let moc = persistentContainer.newBackgroundContext()
+ let moc = self.persistentContainer.newBackgroundContext()
moc.mergePolicy = NSMergePolicy.mergeByPropertyStoreTrump
moc.performAndWait {
Container.onqueueUpgradeMachineIDSetToModel(container: containerMO!, moc: moc)
Container.onqueueUpgradeMachineIDSetToUseStatus(container: containerMO!, moc: moc)
+ //remove duplicate vouchers on all the peers
+ Container.onqueueRemoveDuplicateVouchersPerPeer(container: containerMO!, moc: moc)
+
model = Container.loadModel(from: containerMO!)
Container.ensureEgoConsistency(from: containerMO!, model: model!)
try moc.save()
self.containerMO = containerMO!
self.cuttlefish = cuttlefish
self.model = model!
-
+ self.escrowCacheTimeout = 60.0 * 15.0 //15 minutes
super.init()
}
+ func deletePersistentStore() throws {
+ // Call this to entirely destroy the persistent store.
+ // This container should not be used after this event.
+
+ try self.persistentContainer.persistentStoreDescriptions.forEach { storeDescription in
+ if let url = storeDescription.url {
+ try self.moc.persistentStoreCoordinator?.destroyPersistentStore(at: url,
+ ofType: storeDescription.type,
+ options: [:])
+ }
+ }
+ }
+
// Must be on containerMO's moc queue to call this
internal static func loadModel(from containerMO: ContainerMO) -> TPModel {
// Populate model from persistent store
do {
try model.update(stableInfo, forPeerWithID: permanentInfo.peerID)
} catch {
- os_log("loadModel unable to update stable info for peer(%@): %@", log: tplogDebug, type: .default, peer, error as CVarArg)
+ os_log("loadModel unable to update stable info for peer(%{public}@): %{public}@", log: tplogDebug, type: .default, peer, error as CVarArg)
}
} else {
- os_log("loadModel: peer %@ has unparseable stable info", log: tplogDebug, type: .default, permanentInfo.peerID)
+ os_log("loadModel: peer %{public}@ has unparseable stable info", log: tplogDebug, type: .default, permanentInfo.peerID)
}
} else {
- os_log("loadModel: peer %@ has no stable info", log: tplogDebug, type: .default, permanentInfo.peerID)
+ os_log("loadModel: peer %{public}@ has no stable info", log: tplogDebug, type: .default, permanentInfo.peerID)
}
if let data = peer.dynamicInfo, let sig = peer.dynamicInfoSig {
if let dynamicInfo = TPPeerDynamicInfo(data: data as Data, sig: sig as Data) {
do {
try model.update(dynamicInfo, forPeerWithID: permanentInfo.peerID)
} catch {
- os_log("loadModel unable to update dynamic info for peer(%@): %@", log: tplogDebug, type: .default, peer, error as CVarArg)
+ os_log("loadModel unable to update dynamic info for peer(%{public}@): %{public}@", log: tplogDebug, type: .default, peer, error as CVarArg)
}
} else {
- os_log("loadModel: peer %@ has unparseable dynamic info", log: tplogDebug, type: .default, permanentInfo.peerID)
+ os_log("loadModel: peer %{public}@ has unparseable dynamic info", log: tplogDebug, type: .default, permanentInfo.peerID)
}
} else {
- os_log("loadModel: peer %@ has no dynamic info", log: tplogDebug, type: .default, permanentInfo.peerID)
+ os_log("loadModel: peer %{public}@ has no dynamic info", log: tplogDebug, type: .default, permanentInfo.peerID)
}
peer.vouchers?.forEach {
let v = $0 as! VoucherMO
}
}
+ os_log("loadModel: loaded %{public}d vouchers", log: tplogDebug, type: .default, model.allVouchers().count)
+
+ // Note: the containerMO objects are misnamed; they are key data, and not SPKI.
+ if let recoveryKeySigningKeyData = containerMO.recoveryKeySigningSPKI,
+ let recoveryKeyEncyryptionKeyData = containerMO.recoveryKeyEncryptionSPKI {
+ model.setRecoveryKeys(TPRecoveryKeyPair(signingKeyData: recoveryKeySigningKeyData, encryptionKeyData: recoveryKeyEncyryptionKeyData))
+ } else {
+ // If the ego peer has an RK set, tell the model to use that one
+ // This is a hack to work around TPH databases which don't have the RK set on the container due to previously running old software
+ if let egoStableInfo = containerMO.egoStableInfo(),
+ egoStableInfo.recoverySigningPublicKey.count > 0,
+ egoStableInfo.recoveryEncryptionPublicKey.count > 0 {
+ os_log("loadModel: recovery key not set in model, but is set on ego peer", log: tplogDebug, type: .default)
+ model.setRecoveryKeys(TPRecoveryKeyPair(signingKeyData: egoStableInfo.recoverySigningPublicKey, encryptionKeyData: egoStableInfo.recoveryEncryptionPublicKey))
+ }
+ }
+
// Register persisted policies (cached from cuttlefish)
let policies = containerMO.policies as? Set<PolicyMO>
policies?.forEach { policyMO in
let allowedMachineIDs = Set(knownMachines.filter { $0.status == TPMachineIDStatus.allowed.rawValue }.compactMap { $0.machineID })
let disallowedMachineIDs = Set(knownMachines.filter { $0.status == TPMachineIDStatus.disallowed.rawValue }.compactMap { $0.machineID })
- os_log("loadModel: allowedMachineIDs: %@", log: tplogDebug, type: .default, allowedMachineIDs)
- os_log("loadModel: disallowedMachineIDs: %@", log: tplogDebug, type: .default, disallowedMachineIDs)
+ os_log("loadModel: allowedMachineIDs: %{public}@", log: tplogDebug, type: .default, allowedMachineIDs)
+ os_log("loadModel: disallowedMachineIDs: %{public}@", log: tplogDebug, type: .default, disallowedMachineIDs)
- if allowedMachineIDs.count == 0 {
+ if allowedMachineIDs.isEmpty {
os_log("loadModel: no allowedMachineIDs?", log: tplogDebug, type: .default)
}
containerMO.egoPeerStableInfo = modelStableInfo.data
containerMO.egoPeerStableInfoSig = modelStableInfo.sig
}
-
}
static func dictionaryRepresentation(bottle: BottleMO) -> [String: Any] {
func onQueueDetermineLocalTrustStatus(reply: @escaping (TrustedPeersHelperEgoPeerStatus, Error?) -> Void) {
let viablePeerCountsByModelID = self.model.viablePeerCountsByModelID()
let peerCountsByMachineID = self.model.peerCountsByMachineID()
-
if let egoPeerID = self.containerMO.egoPeerID {
var status = self.model.statusOfPeer(withID: egoPeerID)
var isExcluded: Bool = (status == .excluded)
guard returnError == nil else {
var isLocked = false
if let error = (loadError as NSError?) {
- os_log("trust status: Unable to load ego keys: %@", log: tplogDebug, type: .default, error as CVarArg)
+ os_log("trust status: Unable to load ego keys: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
if error.code == errSecItemNotFound && error.domain == NSOSStatusErrorDomain {
os_log("trust status: Lost the ego key pair, returning 'excluded' in hopes of fixing up the identity", log: tplogDebug, type: .debug)
isExcluded = true
reply(egoStatus, nil)
return
}
-
} else {
// With no ego peer ID, either return 'excluded' if there are extant peers, or 'unknown' to signal no peers at all
if self.model.allPeerIDs().isEmpty {
let reply: (TrustedPeersHelperEgoPeerStatus, Error?) -> Void = {
// Suppress logging of successful replies here; it's not that useful
let logType: OSLogType = $1 == nil ? .debug : .info
- os_log("trustStatus complete: %@ %@",
+ os_log("trustStatus complete: %{public}@ %{public}@",
log: tplogTrace, type: logType, TPPeerStatusToString($0.egoStatus), traceError($1))
self.semaphore.signal()
self.moc.performAndWait {
// Knowledge of your peer status only exists if you know about other peers. If you haven't fetched, fetch.
if self.containerMO.changeToken == nil {
- self.fetchAndPersistChanges { fetchError in
+ self.onqueueFetchAndPersistChanges { fetchError in
guard fetchError == nil else {
if let error = fetchError {
- os_log("Unable to fetch changes, trust status is unknown: %@", log: tplogDebug, type: .default, error as CVarArg)
+ os_log("Unable to fetch changes, trust status is unknown: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
}
let egoStatus = TrustedPeersHelperEgoPeerStatus(egoPeerID: nil,
func fetchTrustState(reply: @escaping (TrustedPeersHelperPeerState?, [TrustedPeersHelperPeer]?, Error?) -> Void) {
let reply: (TrustedPeersHelperPeerState?, [TrustedPeersHelperPeer]?, Error?) -> Void = {
- os_log("fetch trust state complete: %@ %@",
+ os_log("fetch trust state complete: %{public}@ %{public}@",
log: tplogTrace, type: .info, String(reflecting: $0), traceError($2))
reply($0, $1, $2)
}
self.moc.performAndWait {
if let egoPeerID = self.containerMO.egoPeerID,
- let egoPermData = self.containerMO.egoPeerPermanentInfo,
- let egoPermSig = self.containerMO.egoPeerPermanentInfoSig {
-
+ let egoPermData = self.containerMO.egoPeerPermanentInfo,
+ let egoPermSig = self.containerMO.egoPeerPermanentInfoSig {
let keyFactory = TPECPublicKeyFactory()
guard let permanentInfo = TPPeerPermanentInfo(peerID: egoPeerID, data: egoPermData, sig: egoPermSig, keyFactory: keyFactory) else {
os_log("fetchTrustState failed to create TPPeerPermanentInfo", log: tplogDebug, type: .error)
}
let isPreapproved = self.model.hasPotentiallyTrustedPeerPreapprovingKey(permanentInfo.signingPubKey.spki())
- os_log("fetchTrustState: ego peer is %@", log: tplogDebug, type: .default, isPreapproved ? "preapproved" : "not yet preapproved")
+ os_log("fetchTrustState: ego peer is %{public}@", log: tplogDebug, type: .default, isPreapproved ? "preapproved" : "not yet preapproved")
let egoStableInfo = self.model.getStableInfoForPeer(withID: egoPeerID)
egoPeer.trustedPeerIDs.forEach { trustedPeerID in
if let peer = self.model.peer(withID: trustedPeerID) {
let peerViews = try? self.model.getViewsForPeer(peer.permanentInfo,
- stableInfo: peer.stableInfo,
- inViews: Set())
+ stableInfo: peer.stableInfo)
tphPeers.append(TrustedPeersHelperPeer(peerID: trustedPeerID,
signingSPKI: peer.permanentInfo.signingPubKey.spki(),
encryptionSPKI: peer.permanentInfo.encryptionPubKey.spki(),
viewList: peerViews ?? Set()))
} else {
- os_log("No peer for trusted ID %@", log: tplogDebug, type: .default, trustedPeerID)
+ os_log("No peer for trusted ID %{public}@", log: tplogDebug, type: .default, trustedPeerID)
+ }
+ }
+
+ if let stableInfo = egoPeer.stableInfo, stableInfo.recoveryEncryptionPublicKey.count > 0, stableInfo.recoverySigningPublicKey.count > 0 {
+ let recoveryKeyPair = TPRecoveryKeyPair(stableInfo: stableInfo)
+
+ do {
+ // The RK should have all views. So, claim that it should have all views that this peer has.
+ let rkViews = try self.model.getViewsForPeer(egoPeer.permanentInfo,
+ stableInfo: egoPeer.stableInfo)
+
+ tphPeers.append(try RecoveryKey.asPeer(recoveryKeys: recoveryKeyPair,
+ viewList: rkViews))
+ } catch {
+ os_log("Unable to add RK as a trusted peer: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
}
}
} else {
os_log("No ego peer in model; no trusted peers", log: tplogDebug, type: .default)
}
- os_log("Returning trust state: %@ %@", log: tplogDebug, type: .default, egoPeerStatus, tphPeers)
+ os_log("Returning trust state: %{public}@ %@", log: tplogDebug, type: .default, egoPeerStatus, tphPeers)
reply(egoPeerStatus, tphPeers, nil)
} else {
// With no ego peer ID, there are no trusted peers
func dump(reply: @escaping ([AnyHashable: Any]?, Error?) -> Void) {
let reply: ([AnyHashable: Any]?, Error?) -> Void = {
- os_log("dump complete: %@",
+ os_log("dump complete: %{public}@",
log: tplogTrace, type: .info, traceError($1))
reply($0, $1)
- }
- self.moc.performAndWait {
+ }
+ self.moc.performAndWait {
var d: [AnyHashable: Any] = [:]
if let egoPeerID = self.containerMO.egoPeerID {
}
func dumpEgoPeer(reply: @escaping (String?, TPPeerPermanentInfo?, TPPeerStableInfo?, TPPeerDynamicInfo?, Error?) -> Void) {
- let reply: (String?, TPPeerPermanentInfo?, TPPeerStableInfo?, TPPeerDynamicInfo?, Error?) -> Void = {
- os_log("dumpEgoPeer complete: %@", log: tplogTrace, type: .info, traceError($4))
- reply($0, $1, $2, $3, $4)
- }
- self.moc.performAndWait {
+ let reply: (String?, TPPeerPermanentInfo?, TPPeerStableInfo?, TPPeerDynamicInfo?, Error?) -> Void = {
+ os_log("dumpEgoPeer complete: %{public}@", log: tplogTrace, type: .info, traceError($4))
+ reply($0, $1, $2, $3, $4)
+ }
+ self.moc.performAndWait {
guard let egoPeerID = self.containerMO.egoPeerID else {
reply(nil, nil, nil, nil, ContainerError.noPreparedIdentity)
return
func validatePeers(request: ValidatePeersRequest, reply: @escaping ([AnyHashable: Any]?, Error?) -> Void) {
self.semaphore.wait()
let reply: ([AnyHashable: Any]?, Error?) -> Void = {
- os_log("validatePeers complete %@", log: tplogTrace, type: .info, traceError($1))
+ os_log("validatePeers complete %{public}@", log: tplogTrace, type: .info, traceError($1))
self.semaphore.signal()
reply($0, $1)
}
self.cuttlefish.validatePeers(request) { response, error in
- os_log("ValidatePeers(%@): %@, error: %@", log: tplogDebug, "\(String(describing: request))", "\(String(describing: response))", "\(String(describing: error))")
guard let response = response, error == nil else {
- os_log("validatePeers failed: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
+ os_log("validatePeers failed: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
reply(nil, error ?? ContainerError.cloudkitResponseMissing)
return
}
}
}
- func getViews(inViews: [String], reply: @escaping ([String]?, Error?) -> Void) {
- let reply: ([String]?, Error?) -> Void = {
- os_log("getViews complete %@", log: tplogTrace, type: .info, traceError($1))
- reply($0, $1)
- }
- self.moc.performAndWait {
- guard let egoPeerID = self.containerMO.egoPeerID,
- let egoPermData = self.containerMO.egoPeerPermanentInfo,
- let egoPermSig = self.containerMO.egoPeerPermanentInfoSig,
- let egoStableData = self.containerMO.egoPeerStableInfo,
- let egoStableSig = self.containerMO.egoPeerStableInfoSig
- else {
- os_log("getViews failed to find ego peer information", log: tplogDebug, type: .error)
- reply(nil, ContainerError.noPreparedIdentity)
- return
- }
- guard let stableInfo = TPPeerStableInfo(data: egoStableData, sig: egoStableSig) else {
- os_log("getViews failed to create TPPeerStableInfo", log: tplogDebug, type: .error)
- reply(nil, ContainerError.invalidStableInfoOrSig)
- return
- }
-
- let keyFactory = TPECPublicKeyFactory()
- guard let permanentInfo = TPPeerPermanentInfo(peerID: egoPeerID, data: egoPermData, sig: egoPermSig, keyFactory: keyFactory) else {
- os_log("getViews failed to create TPPeerPermanentInfo", log: tplogDebug, type: .error)
- reply(nil, ContainerError.invalidPermanentInfoOrSig)
- return
- }
-
- do {
- let views = try self.model.getViewsForPeer(permanentInfo, stableInfo: stableInfo, inViews: Set(inViews))
- reply(Array(views), nil)
- } catch {
- reply(nil, error)
- return
- }
- }
- }
-
func reset(resetReason: CuttlefishResetReason, reply: @escaping (Error?) -> Void) {
self.semaphore.wait()
let reply: (Error?) -> Void = {
- os_log("reset complete %@", log: tplogTrace, type: .info, traceError($0))
+ os_log("reset complete %{public}@", log: tplogTrace, type: .info, traceError($0))
self.semaphore.signal()
reply($0)
}
$0.resetReason = resetReason
}
self.cuttlefish.reset(request) { response, error in
- os_log("Reset(%@): %@, error: %@", log: tplogDebug, "\(String(describing: request))", "\(String(describing: response))", "\(String(describing: error))")
guard let response = response, error == nil else {
- os_log("reset failed: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
+ os_log("reset failed: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
reply(error ?? ContainerError.cloudkitResponseMissing)
return
}
os_log("reset succeded", log: tplogDebug, type: .default)
reply(nil)
} catch {
- os_log("reset persist failed: %@", log: tplogDebug, type: .default, (error as CVarArg))
+ os_log("reset persist failed: %{public}@", log: tplogDebug, type: .default, (error as CVarArg))
reply(error)
}
}
func localReset(reply: @escaping (Error?) -> Void) {
self.semaphore.wait()
let reply: (Error?) -> Void = {
- os_log("localReset complete %@", log: tplogTrace, type: .info, traceError($0))
+ os_log("localReset complete %{public}@", log: tplogTrace, type: .info, traceError($0))
self.semaphore.signal()
reply($0)
}
}
}
- // policyVersion should only be non-nil for testing, to override prevailingPolicyVersion
+ // policyVersion should only be non-nil for testing, to override prevailingPolicyVersion.versionNumber
func prepare(epoch: UInt64,
machineID: String,
bottleSalt: String,
bottleID: String,
modelID: String,
deviceName: String?,
- serialNumber: String,
+ serialNumber: String?,
osVersion: String,
- policyVersion: UInt64?,
+ policyVersion: TPPolicyVersion?,
policySecrets: [String: Data]?,
+ syncUserControllableViews: TPPBPeerStableInfo_UserControllableViewStatus,
signingPrivateKeyPersistentRef: Data?,
encryptionPrivateKeyPersistentRef: Data?,
- reply: @escaping (String?, Data?, Data?, Data?, Data?, Error?) -> Void) {
+ reply: @escaping (String?, Data?, Data?, Data?, Data?, TPSyncingPolicy?, Error?) -> Void) {
self.semaphore.wait()
- let reply: (String?, Data?, Data?, Data?, Data?, Error?) -> Void = {
- os_log("prepare complete peerID: %@ %@",
- log: tplogTrace, type: .info, ($0 ?? "NULL") as CVarArg, traceError($5))
+ let reply: (String?, Data?, Data?, Data?, Data?, TPSyncingPolicy?, Error?) -> Void = {
+ os_log("prepare complete peerID: %{public}@ %{public}@",
+ log: tplogTrace, type: .info, ($0 ?? "NULL") as CVarArg, traceError($6))
self.semaphore.signal()
- reply($0, $1, $2, $3, $4, $5)
+ reply($0, $1, $2, $3, $4, $5, $6)
}
// Create a new peer identity with random keys, and store the keys in keychain
signingKeyPair = try self.loadOrCreateKeyPair(privateKeyPersistentRef: signingPrivateKeyPersistentRef)
encryptionKeyPair = try self.loadOrCreateKeyPair(privateKeyPersistentRef: encryptionPrivateKeyPersistentRef)
+ // <rdar://problem/56270219> Octagon: use epoch transmitted across pairing channel
permanentInfo = try TPPeerPermanentInfo(machineID: machineID,
modelID: modelID,
epoch: 1,
signing: signingKeyPair,
encryptionKeyPair: encryptionKeyPair,
peerIDHashAlgo: TPHashAlgo.SHA256)
-
} catch {
- reply(nil, nil, nil, nil, nil, error)
+ reply(nil, nil, nil, nil, nil, nil, error)
return
}
_ = try saveSecret(bottle.secret, label: peerID)
} catch {
- os_log("bottle creation failed: %@", log: tplogDebug, type: .default, error as CVarArg)
- reply(nil, nil, nil, nil, nil, error)
+ os_log("bottle creation failed: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
+ reply(nil, nil, nil, nil, nil, nil, error)
return
}
saveEgoKeyPair(signingKeyPair, identifier: signingKeyIdentifier(peerID: peerID)) { success, error in
guard success else {
- os_log("Unable to save signing key: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "error missing")
- reply(nil, nil, nil, nil, nil, error ?? ContainerError.failedToStoreIdentity)
+ os_log("Unable to save signing key: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "error missing")
+ reply(nil, nil, nil, nil, nil, nil, error ?? ContainerError.failedToStoreIdentity)
return
}
saveEgoKeyPair(encryptionKeyPair, identifier: encryptionKeyIdentifier(peerID: peerID)) { success, error in
guard success else {
- os_log("Unable to save encryption key: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "error missing")
- reply(nil, nil, nil, nil, nil, error ?? ContainerError.failedToStoreIdentity)
+ os_log("Unable to save encryption key: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "error missing")
+ reply(nil, nil, nil, nil, nil, nil, error ?? ContainerError.failedToStoreIdentity)
return
}
- // Save the prepared identity as containerMO.egoPeer* and its bottle
- self.moc.performAndWait {
- do {
- let policyVersion = policyVersion ?? prevailingPolicyVersion
- let policyDoc = try self.getPolicyDoc(policyVersion)
-
- let stableInfo = TPPeerStableInfo(clock: 1,
- policyVersion: policyDoc.policyVersion,
- policyHash: policyDoc.policyHash,
- policySecrets: policySecrets,
- deviceName: deviceName,
- serialNumber: serialNumber,
- osVersion: osVersion,
- signing: signingKeyPair,
- recoverySigningPubKey: nil,
- recoveryEncryptionPubKey: nil,
- error: nil)
-
- self.containerMO.egoPeerID = permanentInfo.peerID
- self.containerMO.egoPeerPermanentInfo = permanentInfo.data
- self.containerMO.egoPeerPermanentInfoSig = permanentInfo.sig
- self.containerMO.egoPeerStableInfo = stableInfo.data
- self.containerMO.egoPeerStableInfoSig = stableInfo.sig
-
- let bottleMO = BottleMO(context: self.moc)
- bottleMO.peerID = bottle.peerID
- bottleMO.bottleID = bottle.bottleID
- bottleMO.escrowedSigningSPKI = bottle.escrowSigningSPKI
- bottleMO.signatureUsingEscrowKey = bottle.signatureUsingEscrowKey
- bottleMO.signatureUsingPeerKey = bottle.signatureUsingPeerKey
- bottleMO.contents = bottle.contents
-
- self.containerMO.addToBottles(bottleMO)
+ let actualPolicyVersion = policyVersion ?? prevailingPolicyVersion
+ self.fetchPolicyDocumentWithSemaphore(version: actualPolicyVersion) { policyDoc, policyFetchError in
+ guard let policyDoc = policyDoc, policyFetchError == nil else {
+ os_log("Unable to fetch policy: %{public}@", log: tplogDebug, type: .default, (policyFetchError as CVarArg?) ?? "error missing")
+ reply(nil, nil, nil, nil, nil, nil, error ?? ContainerError.unknownInternalError)
+ return
+ }
- try self.moc.save()
+ if policyVersion != nil {
+ self.policyVersionOverride = policyDoc.version
+ }
- reply(permanentInfo.peerID, permanentInfo.data, permanentInfo.sig, stableInfo.data, stableInfo.sig, nil)
- } catch {
- reply(nil, nil, nil, nil, nil, error)
+ // Save the prepared identity as containerMO.egoPeer* and its bottle
+ self.moc.performAndWait {
+ do {
+ // Note: the client chooses for syncUserControllableViews here.
+ // if they pass in UNKNOWN, we'll fix it later at join time, following the peers we trust.
+ let syncUserViews = syncUserControllableViews.sanitizeForPlatform(permanentInfo: permanentInfo)
+
+ let useFrozenPolicyVersion = policyDoc.version.versionNumber >= frozenPolicyVersion.versionNumber
+
+ let stableInfo = try TPPeerStableInfo(clock: 1,
+ frozenPolicyVersion: useFrozenPolicyVersion ? frozenPolicyVersion : policyDoc.version,
+ flexiblePolicyVersion: useFrozenPolicyVersion ? policyDoc.version : nil,
+ policySecrets: policySecrets,
+ syncUserControllableViews: syncUserViews,
+ deviceName: deviceName,
+ serialNumber: serialNumber,
+ osVersion: osVersion,
+ signing: signingKeyPair,
+ recoverySigningPubKey: nil,
+ recoveryEncryptionPubKey: nil)
+ self.containerMO.egoPeerID = permanentInfo.peerID
+ self.containerMO.egoPeerPermanentInfo = permanentInfo.data
+ self.containerMO.egoPeerPermanentInfoSig = permanentInfo.sig
+ self.containerMO.egoPeerStableInfo = stableInfo.data
+ self.containerMO.egoPeerStableInfoSig = stableInfo.sig
+
+ let bottleMO = BottleMO(context: self.moc)
+ bottleMO.peerID = bottle.peerID
+ bottleMO.bottleID = bottle.bottleID
+ bottleMO.escrowedSigningSPKI = bottle.escrowSigningSPKI
+ bottleMO.signatureUsingEscrowKey = bottle.signatureUsingEscrowKey
+ bottleMO.signatureUsingPeerKey = bottle.signatureUsingPeerKey
+ bottleMO.contents = bottle.contents
+
+ self.containerMO.addToBottles(bottleMO)
+
+ let syncingPolicy = try self.syncingPolicyFor(modelID: permanentInfo.modelID, stableInfo: stableInfo)
+
+ try self.moc.save()
+
+ reply(permanentInfo.peerID, permanentInfo.data, permanentInfo.sig, stableInfo.data, stableInfo.sig, syncingPolicy, nil)
+ } catch {
+ os_log("Unable to save identity: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
+ reply(nil, nil, nil, nil, nil, nil, error)
+ }
}
}
}
}
func getEgoEpoch(reply: @escaping (UInt64, Error?) -> Void) {
let reply: (UInt64, Error?) -> Void = {
- os_log("getEgoEpoch complete: %d %@", log: tplogTrace, type: .info, $0, traceError($1))
+ os_log("getEgoEpoch complete: %d %{public}@", log: tplogTrace, type: .info, $0, traceError($1))
reply($0, $1)
}
func establish(ckksKeys: [CKKSKeychainBackedKeySet],
tlkShares: [CKKSTLKShare],
preapprovedKeys: [Data]?,
- reply: @escaping (String?, [CKRecord], Error?) -> Void) {
+ reply: @escaping (String?, [CKRecord], TPSyncingPolicy?, Error?) -> Void) {
self.semaphore.wait()
- let reply: (String?, [CKRecord], Error?) -> Void = {
- os_log("establish complete peer: %@ %@",
- log: tplogTrace, type: .default, ($0 ?? "NULL") as CVarArg, traceError($2))
+ let reply: (String?, [CKRecord], TPSyncingPolicy?, Error?) -> Void = {
+ os_log("establish complete peer: %{public}@ %{public}@",
+ log: tplogTrace, type: .default, ($0 ?? "NULL") as CVarArg, traceError($3))
self.semaphore.signal()
- reply($0, $1, $2)
+ reply($0, $1, $2, $3)
}
self.moc.performAndWait {
self.onqueueEstablish(ckksKeys: ckksKeys,
tlkShares: tlkShares,
- preapprovedKeys: preapprovedKeys,
- reply: reply)
+ preapprovedKeys: preapprovedKeys) { peerID, ckrecords, syncingPolicy, error in
+ reply(peerID, ckrecords, syncingPolicy, error)
+ }
}
}
ttr.trigger()
}
+ func fetchAfterEstablish(ckksKeys: [CKKSKeychainBackedKeySet],
+ tlkShares: [CKKSTLKShare],
+ reply: @escaping (String?, [CKRecord], TPSyncingPolicy?, Error?) -> Void) {
+ self.moc.performAndWait {
+ do {
+ try self.deleteLocalCloudKitData()
+ } catch {
+ os_log("fetchAfterEstablish failed to reset local data: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
+ reply(nil, [], nil, error)
+ return
+ }
+ self.onqueueFetchAndPersistChanges { error in
+ guard error == nil else {
+ os_log("fetchAfterEstablish failed to fetch changes: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "error missing")
+ reply(nil, [], nil, error)
+ return
+ }
+
+ self.moc.performAndWait {
+ guard let egoPeerID = self.containerMO.egoPeerID,
+ let egoPermData = self.containerMO.egoPeerPermanentInfo,
+ let egoPermSig = self.containerMO.egoPeerPermanentInfoSig,
+ let egoStableData = self.containerMO.egoPeerStableInfo,
+ let egoStableSig = self.containerMO.egoPeerStableInfoSig
+ else {
+ os_log("fetchAfterEstablish: failed to fetch egoPeerID", log: tplogDebug, type: .default)
+ reply(nil, [], nil, ContainerError.noPreparedIdentity)
+ return
+ }
+ guard self.model.hasPeer(withID: egoPeerID) else {
+ os_log("fetchAfterEstablish: did not find peer %{public}@ in model", log: tplogDebug, type: .default, egoPeerID)
+ reply(nil, [], nil, ContainerError.invalidPeerID)
+ return
+ }
+ let keyFactory = TPECPublicKeyFactory()
+ guard let selfPermanentInfo = TPPeerPermanentInfo(peerID: egoPeerID, data: egoPermData, sig: egoPermSig, keyFactory: keyFactory) else {
+ reply(nil, [], nil, ContainerError.invalidPermanentInfoOrSig)
+ return
+ }
+ guard let selfStableInfo = TPPeerStableInfo(data: egoStableData, sig: egoStableSig) else {
+ os_log("cannot create TPPeerStableInfo", log: tplogDebug, type: .default)
+ reply(nil, [], nil, ContainerError.invalidStableInfoOrSig)
+ return
+ }
+ self.onqueueUpdateTLKs(ckksKeys: ckksKeys, tlkShares: tlkShares) { ckrecords, error in
+ guard error == nil else {
+ os_log("fetchAfterEstablish failed to update TLKs: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "error missing")
+ reply(nil, [], nil, error)
+ return
+ }
+
+ do {
+ let syncingPolicy = try self.syncingPolicyFor(modelID: selfPermanentInfo.modelID,
+ stableInfo: selfStableInfo)
+ os_log("fetchAfterEstablish succeeded", log: tplogDebug, type: .default)
+ reply(egoPeerID, ckrecords ?? [], syncingPolicy, nil)
+ } catch {
+ os_log("fetchAfterEstablish failed: %{public}@", log: tplogDebug, type: .default, (error as CVarArg))
+ reply(nil, [], nil, error)
+ }
+ }
+ }
+ }
+ }
+ }
+
func onqueueEstablish(ckksKeys: [CKKSKeychainBackedKeySet],
tlkShares: [CKKSTLKShare],
preapprovedKeys: [Data]?,
- reply: @escaping (String?, [CKRecord], Error?) -> Void) {
+ reply: @escaping (String?, [CKRecord], TPSyncingPolicy?, Error?) -> Void) {
// Fetch ego peer identity from local storage.
guard let egoPeerID = self.containerMO.egoPeerID,
let egoPermData = self.containerMO.egoPeerPermanentInfo,
let egoStableData = self.containerMO.egoPeerStableInfo,
let egoStableSig = self.containerMO.egoPeerStableInfoSig
else {
- reply(nil, [], ContainerError.noPreparedIdentity)
+ reply(nil, [], nil, ContainerError.noPreparedIdentity)
return
}
let keyFactory = TPECPublicKeyFactory()
guard let selfPermanentInfo = TPPeerPermanentInfo(peerID: egoPeerID, data: egoPermData, sig: egoPermSig, keyFactory: keyFactory) else {
- reply(nil, [], ContainerError.invalidPermanentInfoOrSig)
+ reply(nil, [], nil, ContainerError.invalidPermanentInfoOrSig)
return
}
guard let selfStableInfo = TPPeerStableInfo(data: egoStableData, sig: egoStableSig) else {
os_log("cannot create TPPeerStableInfo", log: tplogDebug, type: .default)
- reply(nil, [], ContainerError.invalidStableInfoOrSig)
+ reply(nil, [], nil, ContainerError.invalidStableInfoOrSig)
return
}
guard self.onqueueMachineIDAllowedByIDMS(machineID: selfPermanentInfo.machineID) else {
- os_log("establish: self machineID %@ not on list", log: tplogDebug, type: .debug, selfPermanentInfo.machineID)
+ os_log("establish: self machineID %{public}@ not on list", log: tplogDebug, type: .debug, selfPermanentInfo.machineID)
self.onqueueTTRUntrusted()
- reply(nil, [], ContainerError.preparedIdentityNotOnAllowedList(selfPermanentInfo.machineID))
+ reply(nil, [], nil, ContainerError.preparedIdentityNotOnAllowedList(selfPermanentInfo.machineID))
return
}
loadEgoKeys(peerID: egoPeerID) { egoPeerKeys, error in
- guard let egoPeerKeys = egoPeerKeys else {
- os_log("Don't have my own peer keys; can't establish: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "error missing")
- reply(nil, [], error)
+ guard let egoPeerKeys = egoPeerKeys else {
+ os_log("Don't have my own peer keys; can't establish: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "error missing")
+ reply(nil, [], nil, error)
return
}
self.moc.performAndWait {
allTLKShares = octagonShares + sosShares
} catch {
- os_log("Unable to make TLKShares for self: %@", log: tplogDebug, type: .default, error as CVarArg)
- reply(nil, [], error)
+ os_log("Unable to make TLKShares for self: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
+ reply(nil, [], nil, error)
return
}
- let dynamicInfo: TPPeerDynamicInfo
+ let peer: Peer
+ let newDynamicInfo: TPPeerDynamicInfo
do {
- dynamicInfo = try self.model.dynamicInfo(forJoiningPeerID: egoPeerID,
- peerPermanentInfo: selfPermanentInfo,
- peerStableInfo: selfStableInfo,
- sponsorID: nil,
- preapprovedKeys: preapprovedKeys,
- signing: egoPeerKeys.signingKey,
- currentMachineIDs: self.onqueueCurrentMIDList())
-
- os_log("dynamic info: %@", log: tplogDebug, type: .default, dynamicInfo)
+ (peer, newDynamicInfo) = try self.onqueuePreparePeerForJoining(egoPeerID: egoPeerID,
+ peerPermanentInfo: selfPermanentInfo,
+ stableInfo: selfStableInfo,
+ sponsorID: nil,
+ preapprovedKeys: preapprovedKeys,
+ vouchers: [],
+ egoPeerKeys: egoPeerKeys)
+
+ os_log("dynamic info: %{public}@", log: tplogDebug, type: .default, newDynamicInfo)
} catch {
- reply(nil, [], error)
+ os_log("Unable to create peer for joining: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
+ reply(nil, [], nil, error)
return
}
- let peer = Peer.with {
- $0.peerID = egoPeerID
- $0.permanentInfoAndSig.peerPermanentInfo = egoPermData
- $0.permanentInfoAndSig.sig = egoPermSig
- $0.stableInfoAndSig.peerStableInfo = egoStableData
- $0.stableInfoAndSig.sig = egoStableSig
- $0.dynamicInfoAndSig = SignedPeerDynamicInfo(dynamicInfo)
+ guard let newPeerStableInfo = peer.stableInfoAndSig.toStableInfo() else {
+ os_log("Unable to create new peer stable info for joining", log: tplogDebug, type: .default)
+ reply(nil, [], nil, ContainerError.invalidStableInfoOrSig)
+ return
}
let bottle: Bottle
do {
bottle = try self.assembleBottle(egoPeerID: egoPeerID)
} catch {
- reply(nil, [], error)
+ reply(nil, [], nil, error)
return
}
- os_log("Beginning establish for peer %@", log: tplogDebug, type: .default, egoPeerID)
- os_log("Establish permanentInfo: %@", log: tplogDebug, type: .debug, egoPermData.base64EncodedString())
- os_log("Establish permanentInfoSig: %@", log: tplogDebug, type: .debug, egoPermSig.base64EncodedString())
- os_log("Establish stableInfo: %@", log: tplogDebug, type: .debug, egoStableData.base64EncodedString())
- os_log("Establish stableInfoSig: %@", log: tplogDebug, type: .debug, egoStableSig.base64EncodedString())
- os_log("Establish dynamicInfo: %@", log: tplogDebug, type: .debug, peer.dynamicInfoAndSig.peerDynamicInfo.base64EncodedString())
- os_log("Establish dynamicInfoSig: %@", log: tplogDebug, type: .debug, peer.dynamicInfoAndSig.sig.base64EncodedString())
+ os_log("Beginning establish for peer %{public}@", log: tplogDebug, type: .default, egoPeerID)
+ os_log("Establish permanentInfo: %{public}@", log: tplogDebug, type: .debug, egoPermData.base64EncodedString())
+ os_log("Establish permanentInfoSig: %{public}@", log: tplogDebug, type: .debug, egoPermSig.base64EncodedString())
+ os_log("Establish stableInfo: %{public}@", log: tplogDebug, type: .debug, egoStableData.base64EncodedString())
+ os_log("Establish stableInfoSig: %{public}@", log: tplogDebug, type: .debug, egoStableSig.base64EncodedString())
+ os_log("Establish dynamicInfo: %{public}@", log: tplogDebug, type: .debug, peer.dynamicInfoAndSig.peerDynamicInfo.base64EncodedString())
+ os_log("Establish dynamicInfoSig: %{public}@", log: tplogDebug, type: .debug, peer.dynamicInfoAndSig.sig.base64EncodedString())
os_log("Establish introducing %d key sets, %d tlk shares", log: tplogDebug, type: .default, viewKeys.count, allTLKShares.count)
do {
- os_log("Establish bottle: %@", log: tplogDebug, type: .debug, try bottle.serializedData().base64EncodedString())
- os_log("Establish peer: %@", log: tplogDebug, type: .debug, try peer.serializedData().base64EncodedString())
+ os_log("Establish bottle: %{public}@", log: tplogDebug, type: .debug, try bottle.serializedData().base64EncodedString())
+ os_log("Establish peer: %{public}@", log: tplogDebug, type: .debug, try peer.serializedData().base64EncodedString())
} catch {
- os_log("Establish unable to encode bottle/peer: %@", log: tplogDebug, type: .debug, error as CVarArg)
+ os_log("Establish unable to encode bottle/peer: %{public}@", log: tplogDebug, type: .debug, error as CVarArg)
}
let request = EstablishRequest.with {
$0.tlkShares = allTLKShares
}
self.cuttlefish.establish(request) { response, error in
- os_log("Establish(%@): %@, error: %@", log: tplogDebug, "\(String(describing: request))", "\(String(describing: response))", "\(String(describing: error))")
- os_log("Establish: viewKeys: %@", String(describing: viewKeys))
+ os_log("Establish: viewKeys: %{public}@", String(describing: viewKeys))
guard let response = response, error == nil else {
- os_log("establish failed: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
- reply(nil, [], error ?? ContainerError.cloudkitResponseMissing)
- return
+ switch error {
+ case CuttlefishErrorMatcher(code: CuttlefishErrorCode.establishFailed):
+ os_log("establish returned failed, trying fetch", log: tplogDebug, type: .default)
+ self.fetchAfterEstablish(ckksKeys: ckksKeys, tlkShares: tlkShares, reply: reply)
+ return
+ default:
+ os_log("establish failed: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
+ reply(nil, [], nil, error ?? ContainerError.cloudkitResponseMissing)
+ return
+ }
}
do {
- os_log("Establish returned changes: %@", log: tplogDebug, type: .default, try response.changes.jsonString())
+ os_log("Establish returned changes: %{public}@", log: tplogDebug, type: .default, try response.changes.jsonString())
} catch {
os_log("Establish returned changes, but they can't be serialized", log: tplogDebug, type: .default)
}
let keyHierarchyRecords = response.zoneKeyHierarchyRecords.compactMap { CKRecord($0) }
do {
+ let syncingPolicy = try self.syncingPolicyFor(modelID: selfPermanentInfo.modelID,
+ stableInfo: newPeerStableInfo)
+
try self.persist(changes: response.changes)
guard response.changes.more == false else {
self.fetchAndPersistChanges { fetchError in
guard fetchError == nil else {
// This is an odd error condition: we might be able to fetch again and be in a good state...
- os_log("fetch-after-establish failed: %@", log: tplogDebug, type: .default, (fetchError as CVarArg?) ?? "no error")
- reply(nil, keyHierarchyRecords, fetchError)
+ os_log("fetch-after-establish failed: %{public}@", log: tplogDebug, type: .default, (fetchError as CVarArg?) ?? "no error")
+ reply(nil, keyHierarchyRecords, nil, fetchError)
return
}
os_log("fetch-after-establish succeeded", log: tplogDebug, type: .default)
- reply(egoPeerID, keyHierarchyRecords, nil)
+ reply(egoPeerID, keyHierarchyRecords, syncingPolicy, nil)
}
return
}
os_log("establish succeeded", log: tplogDebug, type: .default)
- reply(egoPeerID, keyHierarchyRecords, nil)
+ reply(egoPeerID, keyHierarchyRecords, syncingPolicy, nil)
} catch {
- os_log("establish handling failed: %@", log: tplogDebug, type: .default, (error as CVarArg))
- reply(nil, keyHierarchyRecords, error)
+ os_log("establish handling failed: %{public}@", log: tplogDebug, type: .default, (error as CVarArg))
+ reply(nil, keyHierarchyRecords, nil, error)
}
}
}
}
}
- func setRecoveryKey(recoveryKey: String, salt: String, ckksKeys: [CKKSKeychainBackedKeySet], reply: @escaping (Error?) -> Void) {
+ func setRecoveryKey(recoveryKey: String, salt: String, ckksKeys: [CKKSKeychainBackedKeySet], reply: @escaping ([CKRecord]?, Error?) -> Void) {
self.semaphore.wait()
- let reply: (Error?) -> Void = {
- os_log("setRecoveryKey complete: %@", log: tplogTrace, type: .info, traceError($0))
+ let reply: ([CKRecord]?, Error?) -> Void = {
+ os_log("setRecoveryKey complete: %{public}@", log: tplogTrace, type: .info, traceError($1))
self.semaphore.signal()
- reply($0)
+ reply($0, $1)
}
os_log("beginning a setRecoveryKey", log: tplogDebug, type: .default)
self.moc.performAndWait {
guard let egoPeerID = self.containerMO.egoPeerID else {
os_log("no prepared identity, cannot set recovery key", log: tplogDebug, type: .default)
- reply(ContainerError.noPreparedIdentity)
+ reply(nil, ContainerError.noPreparedIdentity)
return
}
do {
recoveryKeys = try RecoveryKey(recoveryKeyString: recoveryKey, recoverySalt: salt)
} catch {
- os_log("failed to create recovery keys: %@", log: tplogDebug, type: .default, error as CVarArg)
- reply(ContainerError.failedToCreateRecoveryKey)
+ os_log("failed to create recovery keys: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
+ reply(nil, ContainerError.failedToCreateRecoveryKey)
return
}
let signingPublicKey: Data = recoveryKeys.peerKeys.signingVerificationKey.keyData
let encryptionPublicKey: Data = recoveryKeys.peerKeys.encryptionVerificationKey.keyData
- os_log("setRecoveryKey signingPubKey: %@", log: tplogDebug, type: .debug, signingPublicKey.base64EncodedString())
- os_log("setRecoveryKey encryptionPubKey: %@", log: tplogDebug, type: .debug, encryptionPublicKey.base64EncodedString())
+ os_log("setRecoveryKey signingPubKey: %@", log: tplogDebug, type: .default, signingPublicKey.base64EncodedString())
+ os_log("setRecoveryKey encryptionPubKey: %@", log: tplogDebug, type: .default, encryptionPublicKey.base64EncodedString())
guard let stableInfoData = self.containerMO.egoPeerStableInfo else {
os_log("stableInfo does not exist", log: tplogDebug, type: .default)
- reply(ContainerError.nonMember)
+ reply(nil, ContainerError.nonMember)
return
}
guard let stableInfoSig = self.containerMO.egoPeerStableInfoSig else {
os_log("stableInfoSig does not exist", log: tplogDebug, type: .default)
- reply(ContainerError.nonMember)
+ reply(nil, ContainerError.nonMember)
return
}
guard let permInfoData = self.containerMO.egoPeerPermanentInfo else {
os_log("permanentInfo does not exist", log: tplogDebug, type: .default)
- reply(ContainerError.nonMember)
+ reply(nil, ContainerError.nonMember)
return
}
guard let permInfoSig = self.containerMO.egoPeerPermanentInfoSig else {
os_log("permInfoSig does not exist", log: tplogDebug, type: .default)
- reply(ContainerError.nonMember)
+ reply(nil, ContainerError.nonMember)
return
}
guard let stableInfo = TPPeerStableInfo(data: stableInfoData, sig: stableInfoSig) else {
os_log("cannot create TPPeerStableInfo", log: tplogDebug, type: .default)
- reply(ContainerError.invalidStableInfoOrSig)
+ reply(nil, ContainerError.invalidStableInfoOrSig)
return
}
let keyFactory = TPECPublicKeyFactory()
guard let permanentInfo = TPPeerPermanentInfo(peerID: egoPeerID, data: permInfoData, sig: permInfoSig, keyFactory: keyFactory) else {
os_log("cannot create TPPeerPermanentInfo", log: tplogDebug, type: .default)
- reply(ContainerError.invalidStableInfoOrSig)
+ reply(nil, ContainerError.invalidStableInfoOrSig)
return
}
loadEgoKeyPair(identifier: signingKeyIdentifier(peerID: egoPeerID)) { signingKeyPair, error in
guard let signingKeyPair = signingKeyPair else {
- os_log("handle: no signing key pair: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
- reply(error)
+ os_log("handle: no signing key pair: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
+ reply(nil, error)
return
}
self.moc.performAndWait {
toPeer: recoveryKeys.peerKeys,
epoch: Int(permanentInfo.epoch))
- let policyVersion = stableInfo.policyVersion
- let policyDoc = try self.getPolicyDoc(policyVersion)
-
- let updatedStableInfo = TPPeerStableInfo(clock: stableInfo.clock + 1,
- policyVersion: policyDoc.policyVersion,
- policyHash: policyDoc.policyHash,
- policySecrets: stableInfo.policySecrets,
- deviceName: stableInfo.deviceName,
- serialNumber: stableInfo.serialNumber,
- osVersion: stableInfo.osVersion,
- signing: signingKeyPair,
- recoverySigningPubKey: signingPublicKey,
- recoveryEncryptionPubKey: encryptionPublicKey,
- error: nil)
+ let policyVersion = stableInfo.bestPolicyVersion()
+ let policyDoc = try self.getPolicyDoc(policyVersion.versionNumber)
+
+ let updatedStableInfo = try TPPeerStableInfo(clock: stableInfo.clock + 1,
+ frozenPolicyVersion: frozenPolicyVersion,
+ flexiblePolicyVersion: policyDoc.version,
+ policySecrets: stableInfo.policySecrets,
+ syncUserControllableViews: stableInfo.syncUserControllableViews,
+ deviceName: stableInfo.deviceName,
+ serialNumber: stableInfo.serialNumber,
+ osVersion: stableInfo.osVersion,
+ signing: signingKeyPair,
+ recoverySigningPubKey: signingPublicKey,
+ recoveryEncryptionPubKey: encryptionPublicKey)
let signedStableInfo = SignedPeerStableInfo(updatedStableInfo)
let request = SetRecoveryKeyRequest.with {
}
self.cuttlefish.setRecoveryKey(request) { response, error in
- os_log("SetRecoveryKey(%@): %@, error: %@", log: tplogDebug, "\(String(describing: request))", "\(String(describing: response))", "\(String(describing: error))")
guard let response = response, error == nil else {
- os_log("setRecoveryKey failed: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
- reply(error ?? ContainerError.cloudkitResponseMissing)
+ os_log("setRecoveryKey failed: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
+ reply(nil, error ?? ContainerError.cloudkitResponseMissing)
return
}
try self.onQueuePersist(changes: response.changes)
os_log("setRecoveryKey succeeded", log: tplogDebug, type: .default)
- reply(nil)
+
+ let keyHierarchyRecords = response.zoneKeyHierarchyRecords.compactMap { CKRecord($0) }
+ reply(keyHierarchyRecords, nil)
} catch {
- os_log("setRecoveryKey handling failed: %@", log: tplogDebug, type: .default, (error as CVarArg))
- reply(error)
+ os_log("setRecoveryKey handling failed: %{public}@", log: tplogDebug, type: .default, (error as CVarArg))
+ reply(nil, error)
}
}
}
} catch {
- reply(error)
+ reply(nil, error)
}
}
}
}
}
- func currentSetContainerBottleID(bottleMOs: Set<BottleMO>, bottleID: String) -> (Bool) {
- let bmos = bottleMOs.filter {
- $0.bottleID == bottleID
+ func vouchWithBottle(bottleID: String,
+ entropy: Data,
+ bottleSalt: String,
+ tlkShares: [CKKSTLKShare],
+ reply: @escaping (Data?, Data?, Int64, Int64, Error?) -> Void) {
+ self.semaphore.wait()
+ let reply: (Data?, Data?, Int64, Int64, Error?) -> Void = {
+ os_log("vouchWithBottle complete: %{public}@",
+ log: tplogTrace, type: .info, traceError($4))
+ self.semaphore.signal()
+ reply($0, $1, $2, $3, $4)
}
- return !bmos.isEmpty
- }
- func onqueueFindBottle(bottleID: String, reply: @escaping (BottleMO?, Error?) -> Void) {
+ // A preflight should have been successful before calling this function. So, we can assume that all required data is stored locally.
- var bmo: BottleMO?
- var bottles: Set<BottleMO> = []
- var shouldPerformFetch = false
+ self.moc.performAndWait {
+ let bmo: BottleMO
- if let containerBottles = self.containerMO.bottles as? Set<BottleMO> {
- if self.currentSetContainerBottleID(bottleMOs: containerBottles, bottleID: bottleID) == false {
- shouldPerformFetch = true
- } else {
- bottles = containerBottles
+ do {
+ (bmo, _, _) = try self.onMOCQueuePerformPreflight(bottleID: bottleID)
+ } catch {
+ os_log("vouchWithBottle failed preflight: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "")
+ reply(nil, nil, 0, 0, error)
+ return
}
- } else {
- shouldPerformFetch = true
- }
-
- if shouldPerformFetch == true {
- self.fetchViableBottlesWithSemaphore { _, _, error in
- guard error == nil else {
- os_log("fetchViableBottles failed: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
- reply(nil, error)
- return
- }
-
- guard let newBottles = self.containerMO.bottles as? Set<BottleMO> else {
- os_log("no bottles on container: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
- reply(nil, ContainerError.noBottlesPresent)
- return
- }
-
- guard self.currentSetContainerBottleID(bottleMOs: newBottles, bottleID: bottleID) == true else {
- reply(nil, ContainerError.noBottlesForEscrowRecordID)
- return
- }
- os_log("onqueueFindBottle found bottle: %@", log: tplogDebug, type: .default, newBottles)
-
- bottles = newBottles.filter {
- $0.bottleID == bottleID
- }
- if bottles.count > 1 {
- reply(nil, ContainerError.tooManyBottlesForPeer)
- return
- }
- bmo = bottles.removeFirst()
- reply(bmo, nil)
- }
- } else {
- var filteredBottles = bottles.filter {
- $0.bottleID == bottleID
+ guard let bottledContents = bmo.contents else {
+ reply(nil, nil, 0, 0, ContainerError.bottleDoesNotContainContents)
+ return
}
- if filteredBottles.count > 1 {
- reply(nil, ContainerError.tooManyBottlesForPeer)
+ guard let signatureUsingEscrowKey = bmo.signatureUsingEscrowKey else {
+ reply(nil, nil, 0, 0, ContainerError.bottleDoesNotContainEscrowKeySignature)
return
}
- bmo = filteredBottles.removeFirst()
- reply(bmo, nil)
- }
- }
-
- func onqueueRecoverBottle(managedBottle: BottleMO, entropy: Data, bottleSalt: String) throws -> BottledPeer {
- guard let bottledContents = managedBottle.contents else {
- throw ContainerError.bottleDoesNotContainContents
- }
- guard let signatureUsingEscrowKey = managedBottle.signatureUsingEscrowKey else {
- throw ContainerError.bottleDoesNotContainEscrowKeySignature
- }
-
- guard let signatureUsingPeerKey = managedBottle.signatureUsingPeerKey else {
- throw ContainerError.bottleDoesNotContainerPeerKeySignature
- }
- guard let sponsorPeerID = managedBottle.peerID else {
- throw ContainerError.bottleDoesNotContainPeerID
- }
- //verify bottle signature using peer
- do {
- guard let sponsorPeer = self.model.peer(withID: sponsorPeerID) else {
- os_log("recover bottle: Unable to find peer that created the bottle", log: tplogDebug, type: .default)
- throw ContainerError.bottleCreatingPeerNotFound
+ guard let signatureUsingPeerKey = bmo.signatureUsingPeerKey else {
+ reply(nil, nil, 0, 0, ContainerError.bottleDoesNotContainerPeerKeySignature)
+ return
}
- guard let signingKey: _SFECPublicKey = sponsorPeer.permanentInfo.signingPubKey as? _SFECPublicKey else {
- os_log("recover bottle: Unable to create a sponsor public key", log: tplogDebug, type: .default)
- throw ContainerError.signatureVerificationFailed
+ guard let sponsorPeerID = bmo.peerID else {
+ reply(nil, nil, 0, 0, ContainerError.bottleDoesNotContainPeerID)
+ return
}
- _ = try BottledPeer.verifyBottleSignature(data: bottledContents, signature: signatureUsingPeerKey, pubKey: signingKey)
- } catch {
- os_log("Verification of bottled signature failed: %@", log: tplogDebug, type: .default, error as CVarArg)
- throw ContainerError.failedToCreateBottledPeer
- }
+ //verify bottle signature using peer
+ do {
+ guard let sponsorPeer = self.model.peer(withID: sponsorPeerID) else {
+ os_log("vouchWithBottle: Unable to find peer that created the bottle", log: tplogDebug, type: .default)
+ reply(nil, nil, 0, 0, ContainerError.bottleCreatingPeerNotFound)
+ return
+ }
+ guard let signingKey: _SFECPublicKey = sponsorPeer.permanentInfo.signingPubKey as? _SFECPublicKey else {
+ os_log("vouchWithBottle: Unable to create a sponsor public key", log: tplogDebug, type: .default)
+ reply(nil, nil, 0, 0, ContainerError.signatureVerificationFailed)
+ return
+ }
- do {
- return try BottledPeer(contents: bottledContents,
- secret: entropy,
- bottleSalt: bottleSalt,
- signatureUsingEscrow: signatureUsingEscrowKey,
- signatureUsingPeerKey: signatureUsingPeerKey)
- } catch {
- os_log("Creation of Bottled Peer failed with bottle salt: %@,\nAttempting with empty bottle salt", bottleSalt)
+ _ = try BottledPeer.verifyBottleSignature(data: bottledContents, signature: signatureUsingPeerKey, pubKey: signingKey)
+ } catch {
+ os_log("vouchWithBottle: Verification of bottled signature failed: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
+ reply(nil, nil, 0, 0, ContainerError.failedToCreateBottledPeer)
+ return
+ }
+ //create bottled peer
+ let bottledPeer: BottledPeer
do {
- return try BottledPeer(contents: bottledContents,
- secret: entropy,
- bottleSalt: "",
- signatureUsingEscrow: signatureUsingEscrowKey,
- signatureUsingPeerKey: signatureUsingPeerKey)
+ bottledPeer = try BottledPeer(contents: bottledContents,
+ secret: entropy,
+ bottleSalt: bottleSalt,
+ signatureUsingEscrow: signatureUsingEscrowKey,
+ signatureUsingPeerKey: signatureUsingPeerKey)
} catch {
- os_log("Creation of Bottled Peer failed: %@", log: tplogDebug, type: .default, error as CVarArg)
- throw ContainerError.failedToCreateBottledPeer
+ os_log("Creation of Bottled Peer failed with bottle salt: %@,\nAttempting with empty bottle salt", bottleSalt)
+
+ do {
+ bottledPeer = try BottledPeer(contents: bottledContents,
+ secret: entropy,
+ bottleSalt: "",
+ signatureUsingEscrow: signatureUsingEscrowKey,
+ signatureUsingPeerKey: signatureUsingPeerKey)
+ } catch {
+ os_log("Creation of Bottled Peer failed: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
+ reply(nil, nil, 0, 0, ContainerError.failedToCreateBottledPeer)
+ return
+ }
}
- }
- }
- func preflightVouchWithBottle(bottleID: String,
- reply: @escaping (String?, Error?) -> Void) {
- self.semaphore.wait()
- let reply: (String?, Error?) -> Void = {
- os_log("preflightVouchWithBottle complete: %@",
- log: tplogTrace, type: .info, traceError($1))
- self.semaphore.signal()
- reply($0, $1)
- }
+ os_log("Have a bottle for peer %{public}@", log: tplogDebug, type: .default, bottledPeer.peerID)
- self.moc.performAndWait {
- self.onqueueFindBottle(bottleID: bottleID) { bottleMO, error in
- guard let bottleMO = bottleMO else {
- os_log("preflightVouchWithBottle found no bottle: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "")
- reply(nil, error)
+ // Extract any TLKs we have been given
+ let (uniqueTLKsRecovered, totalSharesRecovered) = extract(tlkShares: tlkShares, peer: bottledPeer.peerKeys, model: self.model)
+
+ self.moc.performAndWait {
+ // I must have an ego identity in order to vouch using bottle
+ guard let egoPeerID = self.containerMO.egoPeerID else {
+ os_log("As a nonmember, can't vouch for someone else", log: tplogDebug, type: .default)
+ reply(nil, nil, 0, 0, ContainerError.nonMember)
+ return
+ }
+ guard let permanentInfo = self.containerMO.egoPeerPermanentInfo else {
+ os_log("permanentInfo does not exist", log: tplogDebug, type: .default)
+ reply(nil, nil, 0, 0, ContainerError.nonMember)
+ return
+ }
+ guard let permanentInfoSig = self.containerMO.egoPeerPermanentInfoSig else {
+ os_log("permanentInfoSig does not exist", log: tplogDebug, type: .default)
+ reply(nil, nil, 0, 0, ContainerError.nonMember)
+ return
+ }
+ guard let stableInfo = self.containerMO.egoPeerStableInfo else {
+ os_log("stableInfo does not exist", log: tplogDebug, type: .default)
+ reply(nil, nil, 0, 0, ContainerError.nonMember)
+ return
+ }
+ guard let stableInfoSig = self.containerMO.egoPeerStableInfoSig else {
+ os_log("stableInfoSig does not exist", log: tplogDebug, type: .default)
+ reply(nil, nil, 0, 0, ContainerError.nonMember)
+ return
+ }
+ let keyFactory = TPECPublicKeyFactory()
+ guard let beneficiaryPermanentInfo = TPPeerPermanentInfo(peerID: egoPeerID, data: permanentInfo, sig: permanentInfoSig, keyFactory: keyFactory) else {
+ os_log("Invalid permenent info or signature; can't vouch for them", log: tplogDebug, type: .default)
+ reply(nil, nil, 0, 0, ContainerError.invalidPermanentInfoOrSig)
+ return
+ }
+ guard let beneficiaryStableInfo = TPPeerStableInfo(data: stableInfo, sig: stableInfoSig) else {
+ os_log("Invalid stableinfo or signature; van't vouch for them", log: tplogDebug, type: .default)
+ reply(nil, nil, 0, 0, ContainerError.invalidStableInfoOrSig)
return
}
- reply(bottleMO.peerID, nil)
+ do {
+ let voucher = try self.model.createVoucher(forCandidate: beneficiaryPermanentInfo,
+ stableInfo: beneficiaryStableInfo,
+ withSponsorID: sponsorPeerID,
+ reason: TPVoucherReason.restore,
+ signing: bottledPeer.peerKeys.signingKey)
+ reply(voucher.data, voucher.sig, uniqueTLKsRecovered, totalSharesRecovered, nil)
+ return
+ } catch {
+ os_log("Error creating voucher with bottle: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
+ reply(nil, nil, 0, 0, error)
+ return
+ }
}
}
}
- func vouchWithBottle(bottleID: String,
- entropy: Data,
- bottleSalt: String,
- tlkShares: [CKKSTLKShare],
- reply: @escaping (Data?, Data?, Error?) -> Void) {
+ func vouchWithRecoveryKey(recoveryKey: String,
+ salt: String,
+ tlkShares: [CKKSTLKShare],
+ reply: @escaping (Data?, Data?, Error?) -> Void) {
self.semaphore.wait()
let reply: (Data?, Data?, Error?) -> Void = {
- os_log("vouchWithBottle complete: %@",
+ os_log("vouchWithRecoveryKey complete: %{public}@",
log: tplogTrace, type: .info, traceError($2))
self.semaphore.signal()
reply($0, $1, $2)
}
- self.fetchAndPersistChanges { error in
- guard error == nil else {
- os_log("vouchWithBottle unable to fetch changes: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "")
- reply(nil, nil, error)
- return
- }
-
- self.onqueueFindBottle(bottleID: bottleID) { returnedBMO, error in
- self.moc.performAndWait {
- guard error == nil else {
- os_log("vouchWithBottle unable to find bottle for escrow record id: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "")
- reply(nil, nil, error)
- return
- }
-
- guard let bmo: BottleMO = returnedBMO else {
- os_log("vouchWithBottle bottle is nil: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "")
- reply(nil, nil, error)
- return
- }
-
- guard let bottledContents = bmo.contents else {
- reply(nil, nil, ContainerError.bottleDoesNotContainContents)
- return
- }
- guard let signatureUsingEscrowKey = bmo.signatureUsingEscrowKey else {
- reply(nil, nil, ContainerError.bottleDoesNotContainEscrowKeySignature)
- return
- }
-
- guard let signatureUsingPeerKey = bmo.signatureUsingPeerKey else {
- reply(nil, nil, ContainerError.bottleDoesNotContainerPeerKeySignature)
- return
- }
- guard let sponsorPeerID = bmo.peerID else {
- reply(nil, nil, ContainerError.bottleDoesNotContainPeerID)
- return
- }
-
- //verify bottle signature using peer
- do {
- guard let sponsorPeer = self.model.peer(withID: sponsorPeerID) else {
- os_log("vouchWithBottle: Unable to find peer that created the bottle", log: tplogDebug, type: .default)
- reply(nil, nil, ContainerError.bottleCreatingPeerNotFound)
- return
- }
- guard let signingKey: _SFECPublicKey = sponsorPeer.permanentInfo.signingPubKey as? _SFECPublicKey else {
- os_log("vouchWithBottle: Unable to create a sponsor public key", log: tplogDebug, type: .default)
- reply(nil, nil, ContainerError.signatureVerificationFailed)
- return
- }
-
- _ = try BottledPeer.verifyBottleSignature(data: bottledContents, signature: signatureUsingPeerKey, pubKey: signingKey)
- } catch {
- os_log("vouchWithBottle: Verification of bottled signature failed: %@", log: tplogDebug, type: .default, error as CVarArg)
- reply(nil, nil, ContainerError.failedToCreateBottledPeer)
- return
- }
-
- //create bottled peer
- let bottledPeer: BottledPeer
- do {
- bottledPeer = try BottledPeer(contents: bottledContents,
- secret: entropy,
- bottleSalt: bottleSalt,
- signatureUsingEscrow: signatureUsingEscrowKey,
- signatureUsingPeerKey: signatureUsingPeerKey)
- } catch {
- os_log("Creation of Bottled Peer failed with bottle salt: %@,\nAttempting with empty bottle salt", bottleSalt)
-
- do {
- bottledPeer = try BottledPeer(contents: bottledContents,
- secret: entropy,
- bottleSalt: "",
- signatureUsingEscrow: signatureUsingEscrowKey,
- signatureUsingPeerKey: signatureUsingPeerKey)
- } catch {
-
- os_log("Creation of Bottled Peer failed: %@", log: tplogDebug, type: .default, error as CVarArg)
- reply(nil, nil, ContainerError.failedToCreateBottledPeer)
- return
- }
- }
-
- os_log("Have a bottle for peer %@", log: tplogDebug, type: .default, bottledPeer.peerID)
-
- // Extract any TLKs we have been given
- extract(tlkShares: tlkShares, peer: bottledPeer.peerKeys)
-
- self.moc.performAndWait {
- // I must have an ego identity in order to vouch using bottle
- guard let egoPeerID = self.containerMO.egoPeerID else {
- os_log("As a nonmember, can't vouch for someone else", log: tplogDebug, type: .default)
- reply(nil, nil, ContainerError.nonMember)
- return
- }
- guard let permanentInfo = self.containerMO.egoPeerPermanentInfo else {
- os_log("permanentInfo does not exist", log: tplogDebug, type: .default)
- reply(nil, nil, ContainerError.nonMember)
- return
- }
- guard let permanentInfoSig = self.containerMO.egoPeerPermanentInfoSig else {
- os_log("permanentInfoSig does not exist", log: tplogDebug, type: .default)
- reply(nil, nil, ContainerError.nonMember)
- return
- }
- guard let stableInfo = self.containerMO.egoPeerStableInfo else {
- os_log("stableInfo does not exist", log: tplogDebug, type: .default)
- reply(nil, nil, ContainerError.nonMember)
- return
- }
- guard let stableInfoSig = self.containerMO.egoPeerStableInfoSig else {
- os_log("stableInfoSig does not exist", log: tplogDebug, type: .default)
- reply(nil, nil, ContainerError.nonMember)
- return
- }
- let keyFactory = TPECPublicKeyFactory()
- guard let beneficiaryPermanentInfo = TPPeerPermanentInfo(peerID: egoPeerID, data: permanentInfo, sig: permanentInfoSig, keyFactory: keyFactory) else {
- os_log("Invalid permenent info or signature; can't vouch for them", log: tplogDebug, type: .default)
- reply(nil, nil, ContainerError.invalidPermanentInfoOrSig)
- return
- }
- guard let beneficiaryStableInfo = TPPeerStableInfo(data: stableInfo, sig: stableInfoSig) else {
- os_log("Invalid stableinfo or signature; van't vouch for them", log: tplogDebug, type: .default)
- reply(nil, nil, ContainerError.invalidStableInfoOrSig)
- return
- }
-
- do {
- let voucher = try self.model.createVoucher(forCandidate: beneficiaryPermanentInfo,
- stableInfo: beneficiaryStableInfo,
- withSponsorID: sponsorPeerID,
- reason: TPVoucherReason.restore,
- signing: bottledPeer.peerKeys.signingKey)
- reply(voucher.data, voucher.sig, nil)
- return
- } catch {
- os_log("Error creating voucher: %@", log: tplogDebug, type: .default, error as CVarArg)
- reply(nil, nil, error)
- return
- }
- }
- }
- }
- }
- }
-
- func vouchWithRecoveryKey(recoveryKey: String,
- salt: String,
- tlkShares: [CKKSTLKShare],
- reply: @escaping (Data?, Data?, Error?) -> Void) {
- self.semaphore.wait()
- let reply: (Data?, Data?, Error?) -> Void = {
- os_log("vouchWithRecoveryKey complete: %@",
- log: tplogTrace, type: .info, traceError($2))
- self.semaphore.signal()
- reply($0, $1, $2)
- }
-
- self.moc.performAndWait {
- os_log("beginning a vouchWithRecoveryKey", log: tplogDebug, type: .default)
-
- // I must have an ego identity in order to vouch using bottle
- guard let egoPeerID = self.containerMO.egoPeerID else {
- os_log("As a nonmember, can't vouch for someone else", log: tplogDebug, type: .default)
- reply(nil, nil, ContainerError.nonMember)
+ self.moc.performAndWait {
+ os_log("beginning a vouchWithRecoveryKey", log: tplogDebug, type: .default)
+
+ // I must have an ego identity in order to vouch using bottle
+ guard let egoPeerID = self.containerMO.egoPeerID else {
+ os_log("As a nonmember, can't vouch for someone else", log: tplogDebug, type: .default)
+ reply(nil, nil, ContainerError.nonMember)
return
}
guard let permanentInfo = self.containerMO.egoPeerPermanentInfo else {
do {
recoveryKeys = try RecoveryKey(recoveryKeyString: recoveryKey, recoverySalt: salt)
} catch {
- os_log("failed to create recovery keys: %@", log: tplogDebug, type: .default, error as CVarArg)
+ os_log("failed to create recovery keys: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
reply(nil, nil, ContainerError.failedToCreateRecoveryKey)
return
}
- extract(tlkShares: tlkShares, peer: recoveryKeys.peerKeys)
-
let signingPublicKey: Data = recoveryKeys.peerKeys.signingKey.publicKey.keyData
let encryptionPublicKey: Data = recoveryKeys.peerKeys.encryptionKey.publicKey.keyData
- os_log("vouchWithRecoveryKey signingPubKey: %@", log: tplogDebug, type: .debug, signingPublicKey.base64EncodedString())
- os_log("vouchWithRecoveryKey encryptionPubKey: %@", log: tplogDebug, type: .debug, encryptionPublicKey.base64EncodedString())
+ os_log("vouchWithRecoveryKey signingPubKey: %@", log: tplogDebug, type: .default, signingPublicKey.base64EncodedString())
+ os_log("vouchWithRecoveryKey encryptionPubKey: %@", log: tplogDebug, type: .default, encryptionPublicKey.base64EncodedString())
guard self.model.isRecoveryKeyEnrolled() else {
os_log("Recovery Key is not enrolled", log: tplogDebug, type: .default)
}
//find matching peer containing recovery keys
- guard let sponsorPeerID = self.model.peerIDThatTrustsRecoveryKeys(TPRecoveryKeyPair(signingSPKI: signingPublicKey, encryptionSPKI: encryptionPublicKey)) else {
+ guard let sponsorPeerID = self.model.peerIDThatTrustsRecoveryKeys(TPRecoveryKeyPair(signingKeyData: signingPublicKey, encryptionKeyData: encryptionPublicKey)) else {
os_log("Untrusted recovery key set", log: tplogDebug, type: .default)
reply(nil, nil, ContainerError.untrustedRecoveryKeys)
return
}
+ // We're going to end up trusting every peer that the sponsor peer trusts.
+ // We might as well trust all TLKShares from those peers at this point.
+ extract(tlkShares: tlkShares, peer: recoveryKeys.peerKeys, sponsorPeerID: sponsorPeerID, model: self.model)
+
do {
let voucher = try self.model.createVoucher(forCandidate: beneficiaryPermanentInfo,
stableInfo: beneficiaryStableInfo,
reply(voucher.data, voucher.sig, nil)
return
} catch {
- os_log("Error creating voucher using recovery key set: %@", log: tplogDebug, type: .default, error as CVarArg)
+ os_log("Error creating voucher using recovery key set: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
reply(nil, nil, error)
return
}
reply: @escaping (Data?, Data?, Error?) -> Void) {
self.semaphore.wait()
let reply: (Data?, Data?, Error?) -> Void = {
- os_log("vouch complete: %@", log: tplogTrace, type: .info, traceError($2))
+ os_log("vouch complete: %{public}@", log: tplogTrace, type: .info, traceError($2))
self.semaphore.signal()
reply($0, $1, $2)
}
loadEgoKeys(peerID: egoPeerID) { egoPeerKeys, error in
guard let egoPeerKeys = egoPeerKeys else {
- os_log("Don't have my own keys: can't vouch for %@: %@", log: tplogDebug, type: .default, beneficiaryPermanentInfo, (error as CVarArg?) ?? "no error")
+ os_log("Don't have my own keys: can't vouch for %{public}@(%{public}@): %{public}@", log: tplogDebug, type: .default, peerID, beneficiaryPermanentInfo, (error as CVarArg?) ?? "no error")
reply(nil, nil, error)
return
}
- self.moc.performAndWait {
- let voucher: TPVoucher
- do {
- voucher = try self.model.createVoucher(forCandidate: beneficiaryPermanentInfo,
- stableInfo: beneficiaryStableInfo,
- withSponsorID: egoPeerID,
- reason: TPVoucherReason.secureChannel,
- signing: egoPeerKeys.signingKey)
- } catch {
- os_log("Error creating voucher: %@", log: tplogDebug, type: .default, error as CVarArg)
- reply(nil, nil, error)
+ self.fetchPolicyDocumentsWithSemaphore(versions: Set([beneficiaryStableInfo.bestPolicyVersion()])) { _, policyFetchError in
+ guard policyFetchError == nil else {
+ os_log("Unknown policy for beneficiary: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
+ reply(nil, nil, policyFetchError)
return
}
- // And generate and upload any tlkShares
- // Note that this might not be the whole list: <rdar://problem/47899980> Octagon: Limited Peers
- let tlkShares: [TLKShare]
- do {
- // Note: we only want to send up TLKs for uploaded ckks zones
- let ckksTLKs = ckksKeys.filter { !$0.newUpload }.map { $0.tlk }
+ self.moc.performAndWait {
+ let voucher: TPVoucher
+ do {
+ voucher = try self.model.createVoucher(forCandidate: beneficiaryPermanentInfo,
+ stableInfo: beneficiaryStableInfo,
+ withSponsorID: egoPeerID,
+ reason: TPVoucherReason.secureChannel,
+ signing: egoPeerKeys.signingKey)
+ } catch {
+ os_log("Error creating voucher: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
+ reply(nil, nil, error)
+ return
+ }
- tlkShares = try makeTLKShares(ckksTLKs: ckksTLKs,
- asPeer: egoPeerKeys,
- toPeer: beneficiaryPermanentInfo,
- epoch: Int(selfPermanentInfo.epoch))
- } catch {
- os_log("Unable to make TLKShares for beneficiary %@: %@", log: tplogDebug, type: .default, beneficiaryPermanentInfo, error as CVarArg)
- reply(nil, nil, error)
- return
- }
+ // And generate and upload any tlkShares
+ let tlkShares: [TLKShare]
+ do {
+ // Note that this might not be the whole list, so filter some of them out
+ let peerViews = try? self.model.getViewsForPeer(beneficiaryPermanentInfo,
+ stableInfo: beneficiaryStableInfo)
+
+ // Note: we only want to send up TLKs for uploaded ckks zones
+ let ckksTLKs = ckksKeys
+ .filter { !$0.newUpload }
+ .filter { peerViews?.contains($0.tlk.zoneID.zoneName) ?? false }
+ .map { $0.tlk }
+
+ tlkShares = try makeTLKShares(ckksTLKs: ckksTLKs,
+ asPeer: egoPeerKeys,
+ toPeer: beneficiaryPermanentInfo,
+ epoch: Int(selfPermanentInfo.epoch))
+ } catch {
+ os_log("Unable to make TLKShares for beneficiary %{public}@(%{public}@): %{public}@", log: tplogDebug, type: .default, peerID, beneficiaryPermanentInfo, error as CVarArg)
+ reply(nil, nil, error)
+ return
+ }
- guard !tlkShares.isEmpty else {
- os_log("No TLKShares to upload for new peer, returning voucher", log: tplogDebug, type: .default)
- reply(voucher.data, voucher.sig, nil)
- return
- }
+ guard !tlkShares.isEmpty else {
+ os_log("No TLKShares to upload for new peer, returning voucher", log: tplogDebug, type: .default)
+ reply(voucher.data, voucher.sig, nil)
+ return
+ }
+
+ self.cuttlefish.updateTrust(changeToken: self.containerMO.changeToken ?? "",
+ peerID: egoPeerID,
+ stableInfoAndSig: nil,
+ dynamicInfoAndSig: nil,
+ tlkShares: tlkShares,
+ viewKeys: []) { response, error in
+ guard let response = response, error == nil else {
+ os_log("Unable to upload new tlkshares: %{public}@", log: tplogDebug, type: .default, error as CVarArg? ?? "no error")
+ reply(voucher.data, voucher.sig, error ?? ContainerError.cloudkitResponseMissing)
+ return
+ }
- self.cuttlefish.updateTrust(changeToken: self.containerMO.changeToken ?? "",
- peerID: egoPeerID,
- stableInfoAndSig: nil,
- dynamicInfoAndSig: nil,
- tlkShares: tlkShares,
- viewKeys: []) { response, error in
- guard let response = response, error == nil else {
- os_log("Unable to upload new tlkshares: %@", log: tplogDebug, type: .default, error as CVarArg? ?? "no error")
- reply(voucher.data, voucher.sig, error ?? ContainerError.cloudkitResponseMissing)
- return
- }
-
- let newKeyRecords = response.zoneKeyHierarchyRecords.map(CKRecord.init)
- os_log("Uploaded new tlkshares: %@", log: tplogDebug, type: .default, newKeyRecords)
- // We don't need to save these; CKKS will refetch them as needed
-
- reply(voucher.data, voucher.sig, nil)
+ let newKeyRecords = response.zoneKeyHierarchyRecords.map(CKRecord.init)
+ os_log("Uploaded new tlkshares: %@", log: tplogDebug, type: .default, newKeyRecords)
+ // We don't need to save these; CKKS will refetch them as needed
+
+ reply(voucher.data, voucher.sig, nil)
+ }
}
}
}
func departByDistrustingSelf(reply: @escaping (Error?) -> Void) {
self.semaphore.wait()
let reply: (Error?) -> Void = {
- os_log("departByDistrustingSelf complete: %@", log: tplogTrace, type: .info, traceError($0))
+ os_log("departByDistrustingSelf complete: %{public}@", log: tplogTrace, type: .info, traceError($0))
self.semaphore.signal()
reply($0)
}
reply: @escaping (Error?) -> Void) {
self.semaphore.wait()
let reply: (Error?) -> Void = {
- os_log("distrust complete: %@", log: tplogTrace, type: .info, traceError($0))
+ os_log("distrust complete: %{public}@", log: tplogTrace, type: .info, traceError($0))
self.semaphore.signal()
reply($0)
}
func onqueueDistrust(peerIDs: Set<String>,
reply: @escaping (Error?) -> Void) {
-
guard let egoPeerID = self.containerMO.egoPeerID else {
os_log("No dynamic info for self?", log: tplogDebug, type: .default)
reply(ContainerError.noPreparedIdentity)
loadEgoKeyPair(identifier: signingKeyIdentifier(peerID: egoPeerID)) { signingKeyPair, error in
guard let signingKeyPair = signingKeyPair else {
- os_log("No longer have signing key pair; can't sign distrust: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "nil")
+ os_log("No longer have signing key pair; can't sign distrust: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "nil")
reply(error)
return
}
preapprovedKeys: nil,
signing: signingKeyPair,
currentMachineIDs: self.onqueueCurrentMIDList())
-
} catch {
- os_log("Error preparing dynamic info: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "nil")
+ os_log("Error preparing dynamic info: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "nil")
reply(error)
return
}
let signedDynamicInfo = SignedPeerDynamicInfo(dynamicInfo)
- os_log("attempting distrust for %@ with: %@", log: tplogDebug, type: .default, peerIDs, dynamicInfo)
+ os_log("attempting distrust for %{public}@ with: %{public}@", log: tplogDebug, type: .default, peerIDs, dynamicInfo)
let request = UpdateTrustRequest.with {
$0.changeToken = self.containerMO.changeToken ?? ""
$0.dynamicInfoAndSig = signedDynamicInfo
}
self.cuttlefish.updateTrust(request) { response, error in
- os_log("UpdateTrust(%@): %@, error: %@", log: tplogDebug, "\(String(describing: request))", "\(String(describing: response))", "\(String(describing: error))")
guard let response = response, error == nil else {
- os_log("updateTrust failed: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
+ os_log("updateTrust failed: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
reply(error ?? ContainerError.cloudkitResponseMissing)
return
}
os_log("distrust succeeded", log: tplogDebug, type: .default)
reply(nil)
} catch {
- os_log("distrust handling failed: %@", log: tplogDebug, type: .default, (error as CVarArg))
+ os_log("distrust handling failed: %{public}@", log: tplogDebug, type: .default, (error as CVarArg))
reply(error)
}
}
func fetchEscrowContents(reply: @escaping (Data?, String?, Data?, Error?) -> Void) {
self.semaphore.wait()
let reply: (Data?, String?, Data?, Error?) -> Void = {
- os_log("fetchEscrowContents complete: %@", log: tplogTrace, type: .info, traceError($3))
+ os_log("fetchEscrowContents complete: %{public}@", log: tplogTrace, type: .info, traceError($3))
self.semaphore.signal()
reply($0, $1, $2, $3)
}
return
}
- var bmoSet = bottles.filter { $0.peerID == egoPeerID }
- let bmo = bmoSet.removeFirst()
+ guard let bmo = bottles.filter({ $0.peerID == egoPeerID }).first else {
+ os_log("fetchEscrowContents no bottle matches peerID", log: tplogDebug, type: .default)
+ reply(nil, nil, nil, ContainerError.noBottleForPeer)
+ return
+ }
+
let bottleID = bmo.bottleID
var entropy: Data
}
entropy = loaded
} catch {
- os_log("fetchEscrowContents failed to load entropy: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
+ os_log("fetchEscrowContents failed to load entropy: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
reply(nil, nil, nil, error)
return
}
func fetchViableBottles(reply: @escaping ([String]?, [String]?, Error?) -> Void) {
self.semaphore.wait()
let reply: ([String]?, [String]?, Error?) -> Void = {
- os_log("fetchViableBottles complete: %@", log: tplogTrace, type: .info, traceError($2))
+ os_log("fetchViableBottles complete: %{public}@", log: tplogTrace, type: .info, traceError($2))
self.semaphore.signal()
reply($0, $1, $2)
}
self.fetchViableBottlesWithSemaphore(reply: reply)
}
- func onqueueCachedBottlesContainEgoPeerBottle(cachedBottles: TPCachedViableBottles) -> Bool {
- guard let egoPeerID = self.containerMO.egoPeerID else {
- os_log("bottleForEgoPeer: No identity.", log: tplogDebug, type: .default)
- return false
+ func handleFetchViableBottlesResponseWithSemaphore(response: FetchViableBottlesResponse?) {
+ guard let escrowPairs = response?.viableBottles else {
+ os_log("fetchViableBottles returned no viable bottles", log: tplogDebug, type: .default)
+ return
}
- guard let bottles: Set<BottleMO> = self.containerMO.bottles as? Set<BottleMO> else {
- os_log("bottleForEgoPeer: No Bottles.", log: tplogDebug, type: .default)
- return false
+
+ var partialPairs: [EscrowPair] = []
+ if let partial = response?.partialBottles {
+ partialPairs = partial
+ } else {
+ os_log("fetchViableBottles returned no partially viable bottles, but that's ok", log: tplogDebug, type: .default)
}
- var matchesCached: Bool = false
- for bottle in bottles {
- guard let bottleID: String = bottle.bottleID else {
- continue
+
+ var legacyEscrowInformations: [EscrowInformation] = []
+ if let legacy = response?.legacyRecords {
+ legacyEscrowInformations = legacy
+ } else {
+ os_log("fetchViableBottles returned no legacy escrow records", log: tplogDebug, type: .default)
+ }
+
+ escrowPairs.forEach { pair in
+ let bottle = pair.bottle
+ let record = pair.record
+ if pair.hasRecord {
+ // Save this escrow record only if we don't already have it
+ if let existingRecords = self.containerMO.fullyViableEscrowRecords as? Set<EscrowRecordMO> {
+ let matchingRecords: Set<EscrowRecordMO> = existingRecords.filter { existing in existing.label == record.label
+ && existing.escrowMetadata?.bottleID == record.escrowInformationMetadata.bottleID }
+ if !matchingRecords.isEmpty {
+ os_log("fetchViableBottles already knows about record, re-adding entry", log: tplogDebug, type: .default, record.label)
+ self.containerMO.removeFromFullyViableEscrowRecords(matchingRecords as NSSet)
+ }
+ self.setEscrowRecord(record: record, viability: .full)
+ }
}
- if bottle.peerID == egoPeerID && (cachedBottles.viableBottles.contains(bottleID) || cachedBottles.partialBottles.contains(bottleID)) {
- matchesCached = true
- break
+ // Save this bottle only if we don't already have it
+ if let existingBottles = self.containerMO.bottles as? Set<BottleMO> {
+ let matchingBottles: Set<BottleMO> = existingBottles.filter { existing in
+ existing.peerID == bottle.peerID &&
+ existing.bottleID == bottle.bottleID &&
+ existing.escrowedSigningSPKI == bottle.escrowedSigningSpki &&
+ existing.signatureUsingEscrowKey == bottle.signatureUsingEscrowKey &&
+ existing.signatureUsingPeerKey == bottle.signatureUsingPeerKey &&
+ existing.contents == bottle.contents
+ }
+ if !matchingBottles.isEmpty {
+ os_log("fetchViableBottles already knows about bottle", log: tplogDebug, type: .default, bottle.bottleID)
+ return
+ }
+ }
+
+ let bmo = BottleMO(context: self.moc)
+ bmo.peerID = bottle.peerID
+ bmo.bottleID = bottle.bottleID
+ bmo.escrowedSigningSPKI = bottle.escrowedSigningSpki
+ bmo.signatureUsingEscrowKey = bottle.signatureUsingEscrowKey
+ bmo.signatureUsingPeerKey = bottle.signatureUsingPeerKey
+ bmo.contents = bottle.contents
+
+ os_log("fetchViableBottles saving new bottle: %{public}@", log: tplogDebug, type: .default, bmo)
+ self.containerMO.addToBottles(bmo)
+ }
+
+ partialPairs.forEach { pair in
+ let bottle = pair.bottle
+
+ let record = pair.record
+ // Save this escrow record only if we don't already have it
+ if pair.hasRecord {
+ if let existingRecords = self.containerMO.partiallyViableEscrowRecords as? Set<EscrowRecordMO> {
+ let matchingRecords: Set<EscrowRecordMO> = existingRecords.filter { existing in existing.label == record.label
+ && existing.escrowMetadata?.bottleID == record.escrowInformationMetadata.bottleID }
+ if !matchingRecords.isEmpty {
+ os_log("fetchViableBottles already knows about record, re-adding entry", log: tplogDebug, type: .default, record.label)
+ self.containerMO.removeFromPartiallyViableEscrowRecords(matchingRecords as NSSet)
+ }
+ self.setEscrowRecord(record: record, viability: Viability.partial)
+ }
+ }
+
+ // Save this bottle only if we don't already have it
+ if let existingBottles = self.containerMO.bottles as? Set<BottleMO> {
+ let matchingBottles: Set<BottleMO> = existingBottles.filter { existing in
+ existing.peerID == bottle.peerID &&
+ existing.bottleID == bottle.bottleID &&
+ existing.escrowedSigningSPKI == bottle.escrowedSigningSpki &&
+ existing.signatureUsingEscrowKey == bottle.signatureUsingEscrowKey &&
+ existing.signatureUsingPeerKey == bottle.signatureUsingPeerKey &&
+ existing.contents == bottle.contents
+ }
+ if !matchingBottles.isEmpty {
+ os_log("fetchViableBottles already knows about bottle", log: tplogDebug, type: .default, bottle.bottleID)
+ return
+ }
+ }
+
+ let bmo = BottleMO(context: self.moc)
+ bmo.peerID = bottle.peerID
+ bmo.bottleID = bottle.bottleID
+ bmo.escrowedSigningSPKI = bottle.escrowedSigningSpki
+ bmo.signatureUsingEscrowKey = bottle.signatureUsingEscrowKey
+ bmo.signatureUsingPeerKey = bottle.signatureUsingPeerKey
+ bmo.contents = bottle.contents
+
+ os_log("fetchViableBottles saving new bottle: %{public}@", log: tplogDebug, type: .default, bmo)
+ self.containerMO.addToBottles(bmo)
+ }
+ legacyEscrowInformations.forEach { record in
+ // Save this escrow record only if we don't already have it
+ if let existingRecords = self.containerMO.legacyEscrowRecords as? Set<EscrowRecordMO> {
+ let matchingRecords: Set<EscrowRecordMO> = existingRecords.filter { existing in existing.label == record.label }
+ if !matchingRecords.isEmpty {
+ os_log("fetchViableBottles already knows about legacy record %@, re-adding entry", log: tplogDebug, type: .default, record.label)
+ self.containerMO.removeFromLegacyEscrowRecords(matchingRecords as NSSet)
+ }
+ if record.label.hasSuffix(".double") {
+ os_log("ignoring double enrollment record %@", record.label)
+ } else {
+ self.setEscrowRecord(record: record, viability: Viability.none)
+ }
}
}
- return matchesCached
}
func fetchViableBottlesWithSemaphore(reply: @escaping ([String]?, [String]?, Error?) -> Void) {
os_log("beginning a fetchViableBottles", log: tplogDebug, type: .default)
- let cachedBottles: TPCachedViableBottles = self.model.currentCachedViableBottlesSet()
self.moc.performAndWait {
- if self.onqueueCachedBottlesContainEgoPeerBottle(cachedBottles: cachedBottles)
- && (cachedBottles.viableBottles.count > 0 || cachedBottles.partialBottles.count > 0) {
+ var cachedBottles = TPCachedViableBottles(viableBottles: [], partialBottles: [])
+
+ if OctagonIsOptimizationEnabled() {
+ if let lastDate = self.containerMO.escrowFetchDate {
+ if Date() < lastDate.addingTimeInterval(escrowCacheTimeout) {
+ os_log("escrow cache still valid", log: tplogDebug, type: .default)
+ cachedBottles = onqueueCachedBottlesFromEscrowRecords()
+ } else {
+ os_log("escrow cache no longer valid", log: tplogDebug, type: .default)
+ if let records = self.containerMO.fullyViableEscrowRecords {
+ self.containerMO.removeFromFullyViableEscrowRecords(records)
+ }
+ if let records = self.containerMO.partiallyViableEscrowRecords {
+ self.containerMO.removeFromPartiallyViableEscrowRecords(records)
+ }
+ self.containerMO.escrowFetchDate = nil
+ }
+ }
+ } else {
+ cachedBottles = self.model.currentCachedViableBottlesSet()
+ }
+
+ if !cachedBottles.viableBottles.isEmpty || !cachedBottles.partialBottles.isEmpty {
os_log("returning from fetchViableBottles, using cached bottles", log: tplogDebug, type: .default)
reply(cachedBottles.viableBottles, cachedBottles.partialBottles, nil)
return
}
-
- self.cuttlefish.fetchViableBottles { response, error in
+
+ let request = FetchViableBottlesRequest.with {
+ $0.filterRequest = OctagonPlatformSupportsSOS() ? .unknown : .byOctagonOnly
+ }
+ if request.filterRequest == .byOctagonOnly {
+ os_log("Requesting Cuttlefish sort records by Octagon Only", log: tplogDebug, type: .default)
+ }
+
+ self.cuttlefish.fetchViableBottles(request) { response, error in
guard error == nil else {
- os_log("fetchViableBottles failed: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
+ os_log("fetchViableBottles failed: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
reply(nil, nil, error)
return
}
self.moc.performAndWait {
-
guard let escrowPairs = response?.viableBottles else {
- os_log("fetchViableBottles returned no viable bottles: %@", log: tplogDebug, type: .default)
+ os_log("fetchViableBottles returned no viable bottles", log: tplogDebug, type: .default)
reply([], [], nil)
return
}
if let partial = response?.partialBottles {
partialPairs = partial
} else {
- os_log("fetchViableBottles returned no partially viable bottles, but that's ok: %@", log: tplogDebug, type: .default)
+ os_log("fetchViableBottles returned no partially viable bottles, but that's ok", log: tplogDebug, type: .default)
}
let viableBottleIDs = escrowPairs.compactMap { $0.bottle.bottleID }
- os_log("fetchViableBottles returned viable bottles: %@", log: tplogDebug, type: .default, viableBottleIDs)
+ os_log("fetchViableBottles returned viable bottles: %{public}@", log: tplogDebug, type: .default, viableBottleIDs)
let partialBottleIDs = partialPairs.compactMap { $0.bottle.bottleID }
- os_log("fetchViableBottles returned partial bottles: %@", log: tplogDebug, type: .default, partialBottleIDs)
-
- escrowPairs.forEach { pair in
- let bottle = pair.bottle
-
- // Save this bottle only if we don't already have it
- if let existingBottles = self.containerMO.bottles as? Set<BottleMO> {
- let matchingBottles: Set<BottleMO> = existingBottles.filter { existing in
- existing.peerID == bottle.peerID &&
- existing.bottleID == bottle.bottleID &&
- existing.escrowedSigningSPKI == bottle.escrowedSigningSpki &&
- existing.signatureUsingEscrowKey == bottle.signatureUsingEscrowKey &&
- existing.signatureUsingPeerKey == bottle.signatureUsingPeerKey &&
- existing.contents == bottle.contents
- }
- if !matchingBottles.isEmpty {
- os_log("fetchViableBottles already knows about bottle", log: tplogDebug, type: .default, bottle.bottleID)
- return
- }
- }
-
- let bmo = BottleMO(context: self.moc)
- bmo.peerID = bottle.peerID
- bmo.bottleID = bottle.bottleID
- bmo.escrowedSigningSPKI = bottle.escrowedSigningSpki
- bmo.signatureUsingEscrowKey = bottle.signatureUsingEscrowKey
- bmo.signatureUsingPeerKey = bottle.signatureUsingPeerKey
- bmo.contents = bottle.contents
-
- os_log("fetchViableBottles saving new bottle: %@", log: tplogDebug, type: .default, bmo)
- self.containerMO.addToBottles(bmo)
- }
-
- partialPairs.forEach { pair in
- let bottle = pair.bottle
-
- // Save this bottle only if we don't already have it
- if let existingBottles = self.containerMO.bottles as? Set<BottleMO> {
- let matchingBottles: Set<BottleMO> = existingBottles.filter { existing in
- existing.peerID == bottle.peerID &&
- existing.bottleID == bottle.bottleID &&
- existing.escrowedSigningSPKI == bottle.escrowedSigningSpki &&
- existing.signatureUsingEscrowKey == bottle.signatureUsingEscrowKey &&
- existing.signatureUsingPeerKey == bottle.signatureUsingPeerKey &&
- existing.contents == bottle.contents
- }
- if !matchingBottles.isEmpty {
- os_log("fetchViableBottles already knows about bottle", log: tplogDebug, type: .default, bottle.bottleID)
- return
- }
- }
+ os_log("fetchViableBottles returned partial bottles: %{public}@", log: tplogDebug, type: .default, partialBottleIDs)
- let bmo = BottleMO(context: self.moc)
- bmo.peerID = bottle.peerID
- bmo.bottleID = bottle.bottleID
- bmo.escrowedSigningSPKI = bottle.escrowedSigningSpki
- bmo.signatureUsingEscrowKey = bottle.signatureUsingEscrowKey
- bmo.signatureUsingPeerKey = bottle.signatureUsingPeerKey
- bmo.contents = bottle.contents
-
- os_log("fetchViableBottles saving new bottle: %@", log: tplogDebug, type: .default, bmo)
- self.containerMO.addToBottles(bmo)
- }
+ self.handleFetchViableBottlesResponseWithSemaphore(response: response)
do {
try self.moc.save()
os_log("fetchViableBottles saved bottles", log: tplogDebug, type: .default)
let cached = TPCachedViableBottles(viableBottles: viableBottleIDs, partialBottles: partialBottleIDs)
self.model.setViableBottles(cached)
+ self.containerMO.escrowFetchDate = Date()
reply(viableBottleIDs, partialBottleIDs, nil)
} catch {
- os_log("fetchViableBottles unable to save bottles: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
+ os_log("fetchViableBottles unable to save bottles: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
reply(nil, nil, error)
}
-
}
}
}
}
- func fetchPolicy(reply: @escaping (TPPolicy?, Error?) -> Void) {
+ func removeEscrowCache(reply: @escaping (Error?) -> Void) {
+ os_log("beginning a removeEscrowCache", log: tplogDebug, type: .default)
+
self.semaphore.wait()
- let reply: (TPPolicy?, Error?) -> Void = {
- os_log("fetchPolicy complete: %@", log: tplogTrace, type: .info, traceError($1))
+ let reply: (Error?) -> Void = {
+ os_log("removeEscrowCache complete %{public}@", log: tplogTrace, type: .info, traceError($0))
self.semaphore.signal()
- reply($0, $1)
+ reply($0)
}
self.moc.performAndWait {
- var keys: [NSNumber: String] = [:]
-
- guard let stableInfoData = self.containerMO.egoPeerStableInfo,
- let stableInfoSig = self.containerMO.egoPeerStableInfoSig else {
- os_log("fetchPolicy failed to find ego peer stableinfodata/sig", log: tplogDebug, type: .error)
- reply(nil, ContainerError.noPreparedIdentity)
- return
- }
- guard let stableInfo = TPPeerStableInfo(data: stableInfoData, sig: stableInfoSig) else {
- os_log("fetchPolicy failed to create TPPeerStableInfo", log: tplogDebug, type: .error)
- reply(nil, ContainerError.invalidStableInfoOrSig)
- return
- }
-
- let policyVersionCounter = stableInfo.policyVersion
- let policyVersion = NSNumber(value: policyVersionCounter)
- keys[policyVersion] = stableInfo.policyHash
-
- if let policyDocument = self.model.policy(withVersion: policyVersionCounter) {
- os_log("fetchPolicy: have a local version of policy %@: %@", log: tplogDebug, type: .default, policyVersion, policyDocument)
- do {
- let policy = try policyDocument.policy(withSecrets: stableInfo.policySecrets, decrypter: Decrypter())
- reply(policy, nil)
- return
- } catch {
- os_log("TPPolicyDocument failed: %@", log: tplogDebug, type: .default, error as CVarArg)
- reply(nil, error)
- return
+ self.onQueueRemoveEscrowCache()
+ reply(nil)
+ }
+ }
+
+ private func onQueueRemoveEscrowCache() {
+ if let records = self.containerMO.fullyViableEscrowRecords {
+ self.containerMO.removeFromFullyViableEscrowRecords(records)
+ }
+ if let records = self.containerMO.partiallyViableEscrowRecords {
+ self.containerMO.removeFromPartiallyViableEscrowRecords(records)
+ }
+ if let records = self.containerMO.legacyEscrowRecords {
+ self.containerMO.removeFromLegacyEscrowRecords(records)
+ }
+ self.containerMO.escrowFetchDate = nil
+ }
+
+ func fetchEscrowRecordsWithSemaphore(forceFetch: Bool, reply: @escaping ([Data]?, Error?) -> Void) {
+ os_log("beginning a fetchEscrowRecords", log: tplogDebug, type: .default)
+
+ self.moc.performAndWait {
+ var cachedRecords: [OTEscrowRecord] = []
+
+ if forceFetch == false {
+ os_log("fetchEscrowRecords: force fetch flag is off", log: tplogDebug, type: .default)
+ if let lastDate = self.containerMO.escrowFetchDate {
+ if Date() < lastDate.addingTimeInterval(escrowCacheTimeout) {
+ os_log("escrow cache still valid", log: tplogDebug, type: .default)
+ cachedRecords = onqueueCachedEscrowRecords()
+ } else {
+ os_log("escrow cache no longer valid", log: tplogDebug, type: .default)
+ self.onQueueRemoveEscrowCache()
+ }
}
+ } else {
+ os_log("fetchEscrowRecords: force fetch flag is on, removing escrow cache", log: tplogDebug, type: .default)
+ self.onQueueRemoveEscrowCache()
+ }
+
+ if !cachedRecords.isEmpty {
+ os_log("returning from fetchEscrowRecords, using cached escrow records", log: tplogDebug, type: .default)
+ let recordData: [Data] = cachedRecords.map { $0.data }
+ reply(recordData, nil)
+ return
}
- self.fetchPolicyDocuments(keys: keys) { result, error in
+ let request = FetchViableBottlesRequest.with {
+ $0.filterRequest = OctagonPlatformSupportsSOS() ? .unknown : .byOctagonOnly
+ }
+ if request.filterRequest == .byOctagonOnly {
+ os_log("Requesting Cuttlefish sort records by Octagon Only", log: tplogDebug, type: .default)
+ }
+
+ self.cuttlefish.fetchViableBottles(request) { response, error in
guard error == nil else {
+ os_log("fetchViableBottles failed: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
reply(nil, error)
return
}
- guard let result = result else {
- os_log("fetchPolicy: nil policies returned")
- reply(nil, ContainerError.policyDocumentDoesNotValidate)
- return
- }
- guard result.count == 1 else {
- os_log("fetchPolicy: wrong length returned")
- reply(nil, ContainerError.policyDocumentDoesNotValidate)
- return
+
+ self.moc.performAndWait {
+ guard response?.viableBottles != nil else {
+ os_log("fetchViableBottles returned no viable bottles", log: tplogDebug, type: .default)
+ reply([], nil)
+ return
+ }
+
+ self.handleFetchViableBottlesResponseWithSemaphore(response: response)
}
- guard let r = result[policyVersion] else {
- os_log("fetchPolicy: version not found")
- reply(nil, ContainerError.unknownPolicyVersion(policyVersion.uint64Value))
- return
+
+ do {
+ try self.moc.save()
+ os_log("fetchViableBottles saved bottles and records", log: tplogDebug, type: .default)
+ self.containerMO.escrowFetchDate = Date()
+
+ var allEscrowRecordData: [Data] = []
+ if let fullyViableRecords = self.containerMO.fullyViableEscrowRecords as? Set<EscrowRecordMO> {
+ for record in fullyViableRecords {
+ if let r = self.escrowRecordMOToEscrowRecords(record: record, viability: .full) {
+ allEscrowRecordData.append(r.data)
+ }
+ }
+ }
+ if let partiallyViableRecords = self.containerMO.partiallyViableEscrowRecords as? Set<EscrowRecordMO> {
+ for record in partiallyViableRecords {
+ if let r = self.escrowRecordMOToEscrowRecords(record: record, viability: .partial) {
+ allEscrowRecordData.append(r.data)
+ }
+ }
+ }
+ if let legacyRecords = self.containerMO.legacyEscrowRecords as? Set<EscrowRecordMO> {
+ for record in legacyRecords {
+ if let r = self.escrowRecordMOToEscrowRecords(record: record, viability: .none) {
+ allEscrowRecordData.append(r.data)
+ }
+ }
+ }
+ reply(allEscrowRecordData, nil)
+ } catch {
+ os_log("fetchViableBottles unable to save bottles and records: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
+ reply(nil, error)
}
- guard let data = r[1].data(using: .utf8) else {
- os_log("fetchPolicy: failed to convert data")
- reply(nil, ContainerError.unknownPolicyVersion(policyVersion.uint64Value))
+ }
+ }
+ }
+
+ func fetchCurrentPolicy(modelIDOverride: String?, reply: @escaping (TPSyncingPolicy?, TPPBPeerStableInfo_UserControllableViewStatus, Error?) -> Void) {
+ self.semaphore.wait()
+ let reply: (TPSyncingPolicy?, TPPBPeerStableInfo_UserControllableViewStatus, Error?) -> Void = {
+ os_log("fetchCurrentPolicy complete: %{public}@", log: tplogTrace, type: .info, traceError($2))
+ self.semaphore.signal()
+ reply($0, $1, $2)
+ }
+
+ self.moc.performAndWait {
+ guard let egoPeerID = self.containerMO.egoPeerID,
+ let egoPermData = self.containerMO.egoPeerPermanentInfo,
+ let egoPermSig = self.containerMO.egoPeerPermanentInfoSig,
+ let stableInfoData = self.containerMO.egoPeerStableInfo,
+ let stableInfoSig = self.containerMO.egoPeerStableInfoSig else {
+ os_log("fetchCurrentPolicy failed to find ego peer information", log: tplogDebug, type: .error)
+ // This is technically an error, but we also need to know the prevailing syncing policy at CloudKit signin time, not just after we've started to join
+
+ guard let modelID = modelIDOverride else {
+ os_log("no model ID override; returning error", log: tplogDebug, type: .default)
+ reply(nil, .UNKNOWN, ContainerError.noPreparedIdentity)
return
}
- guard let pd = TPPolicyDocument.policyDoc(withHash: r[0], data: data) else {
- os_log("fetchPolicy: pd is nil")
- reply(nil, ContainerError.policyDocumentDoesNotValidate)
+
+ guard let policyDocument = self.model.policy(withVersion: prevailingPolicyVersion.versionNumber) else {
+ os_log("prevailing policy is missing?", log: tplogDebug, type: .default)
+ reply(nil, .UNKNOWN, ContainerError.noPreparedIdentity)
return
}
+
do {
- let policy = try pd.policy(withSecrets: stableInfo.policySecrets, decrypter: Decrypter())
- reply(policy, nil)
+ let prevailingPolicy = try policyDocument.policy(withSecrets: [:], decrypter: Decrypter())
+ let syncingPolicy = try prevailingPolicy.syncingPolicy(forModel: modelID, syncUserControllableViews: .UNKNOWN)
+
+ os_log("returning a policy for model ID %{public}@", log: tplogDebug, type: .default, modelID)
+ reply(syncingPolicy, .UNKNOWN, nil)
+ return
} catch {
- os_log("TPPolicyDocument: %@", log: tplogDebug, type: .default, error as CVarArg)
- reply(nil, error)
+ os_log("fetchCurrentPolicy failed to prevailing policy: %{public}@", log: tplogDebug, type: .error)
+ reply(nil, .UNKNOWN, ContainerError.noPreparedIdentity)
+ return
+ }
+ }
+
+ let keyFactory = TPECPublicKeyFactory()
+ guard let permanentInfo = TPPeerPermanentInfo(peerID: egoPeerID, data: egoPermData, sig: egoPermSig, keyFactory: keyFactory) else {
+ os_log("fetchCurrentPolicy failed to create TPPeerPermanentInfo", log: tplogDebug, type: .error)
+ reply(nil, .UNKNOWN, ContainerError.invalidPermanentInfoOrSig)
+ return
+ }
+ guard let stableInfo = TPPeerStableInfo(data: stableInfoData, sig: stableInfoSig) else {
+ os_log("fetchCurrentPolicy failed to create TPPeerStableInfo", log: tplogDebug, type: .error)
+ reply(nil, .UNKNOWN, ContainerError.invalidStableInfoOrSig)
+ return
+ }
+
+ do {
+ let syncingPolicy = try self.syncingPolicyFor(modelID: modelIDOverride ?? permanentInfo.modelID, stableInfo: stableInfo)
+
+ guard let peer = self.model.peer(withID: permanentInfo.peerID), let dynamicInfo = peer.dynamicInfo else {
+ os_log("fetchCurrentPolicy with no dynamic info", log: tplogDebug, type: .error)
+ reply(syncingPolicy, .UNKNOWN, nil)
+ return
}
+
+ // Note: we specifically do not want to sanitize this value for the platform: returning FOLLOWING here isn't that helpful
+ let peersUserViewSyncability = self.model.userViewSyncabilityConsensusAmongTrustedPeers(dynamicInfo)
+ reply(syncingPolicy, peersUserViewSyncability, nil)
+ return
+ } catch {
+ os_log("Fetching the syncing policy failed: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
+ reply(nil, .UNKNOWN, error)
+ return
}
}
}
+ func syncingPolicyFor(modelID: String, stableInfo: TPPeerStableInfo) throws -> TPSyncingPolicy {
+ let bestPolicyVersion : TPPolicyVersion
+
+ let peerPolicyVersion = stableInfo.bestPolicyVersion()
+ if peerPolicyVersion.versionNumber < frozenPolicyVersion.versionNumber {
+ // This peer was from before CKKS4All, and we shouldn't listen to them when it comes to Syncing Policies
+ bestPolicyVersion = prevailingPolicyVersion
+ os_log("Ignoring policy version from pre-CKKS4All peer", log: tplogDebug, type: .default)
+
+ } else {
+ bestPolicyVersion = peerPolicyVersion
+ }
+
+ guard let policyDocument = self.model.policy(withVersion: bestPolicyVersion.versionNumber) else {
+ os_log("best policy is missing?", log: tplogDebug, type: .default)
+ throw ContainerError.unknownPolicyVersion(prevailingPolicyVersion.versionNumber)
+ }
+
+ let policy = try policyDocument.policy(withSecrets: stableInfo.policySecrets, decrypter: Decrypter())
+ return try policy.syncingPolicy(forModel: modelID, syncUserControllableViews: stableInfo.syncUserControllableViews)
+ }
+
// All-or-nothing: return an error in case full list cannot be returned.
// Completion handler data format: [version : [hash, data]]
- func fetchPolicyDocuments(keys: [NSNumber: String],
- reply: @escaping ([NSNumber: [String]]?, Error?) -> Void) {
+ func fetchPolicyDocuments(versions: Set<TPPolicyVersion>,
+ reply: @escaping ([TPPolicyVersion: Data]?, Error?) -> Void) {
self.semaphore.wait()
- let reply: ([NSNumber: [String]]?, Error?) -> Void = {
- os_log("fetchPolicyDocuments complete: %@", log: tplogTrace, type: .info, traceError($1))
+ let reply: ([TPPolicyVersion: Data]?, Error?) -> Void = {
+ os_log("fetchPolicyDocuments complete: %{public}@", log: tplogTrace, type: .info, traceError($1))
self.semaphore.signal()
reply($0, $1)
}
- var keys = keys
- var docs: [NSNumber: [String]] = [:]
+ self.fetchPolicyDocumentsWithSemaphore(versions: versions) { policyDocuments, fetchError in
+ reply(policyDocuments.flatMap { $0.mapValues { policyDoc in policyDoc.protobuf } }, fetchError)
+ }
+ }
+
+ func fetchPolicyDocumentWithSemaphore(version: TPPolicyVersion,
+ reply: @escaping (TPPolicyDocument?, Error?) -> Void) {
+ self.fetchPolicyDocumentsWithSemaphore(versions: Set([version])) { versions, fetchError in
+ guard fetchError == nil else {
+ reply(nil, fetchError)
+ return
+ }
+
+ guard let doc = versions?[version] else {
+ os_log("fetchPolicyDocument: didn't return policy of version: %{public}@", log: tplogDebug, versions ?? "no versions")
+ reply(nil, ContainerError.unknownPolicyVersion(version.versionNumber))
+ return
+ }
+
+ reply(doc, nil)
+ }
+ }
+
+ func fetchPolicyDocumentsWithSemaphore(versions: Set<TPPolicyVersion>,
+ reply: @escaping ([TPPolicyVersion: TPPolicyDocument]?, Error?) -> Void) {
+ var remaining = versions
+ var docs: [TPPolicyVersion: TPPolicyDocument] = [:]
self.moc.performAndWait {
- for (version, hash) in keys {
- if let policydoc = try? self.getPolicyDoc(version.uint64Value), policydoc.policyHash == hash {
- docs[version] = [policydoc.policyHash, policydoc.protobuf.base64EncodedString()]
- keys[version] = nil
+ for version in remaining {
+ if let policydoc = try? self.getPolicyDoc(version.versionNumber), policydoc.version.policyHash == version.policyHash {
+ docs[policydoc.version] = policydoc
+ remaining.remove(version)
}
}
}
- if keys.isEmpty {
+ guard !remaining.isEmpty else {
reply(docs, nil)
return
}
let request = FetchPolicyDocumentsRequest.with {
- $0.keys = keys.map { key, value in
- PolicyDocumentKey.with { $0.version = key.uint64Value; $0.hash = value }}
+ $0.keys = remaining.map { version in
+ PolicyDocumentKey.with { $0.version = version.versionNumber; $0.hash = version.policyHash }}
}
self.cuttlefish.fetchPolicyDocuments(request) { response, error in
- os_log("FetchPolicyDocuments(%@): %@, error: %@", log: tplogDebug, "\(String(describing: request))", "\(String(describing: response))", "\(String(describing: error))")
guard let response = response, error == nil else {
- os_log("FetchPolicyDocuments failed: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
+ os_log("FetchPolicyDocuments failed: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
reply(nil, error ?? ContainerError.cloudkitResponseMissing)
return
}
// TODO: validate the policy's signature
guard let doc = TPPolicyDocument.policyDoc(withHash: mapEntry.key.hash, data: mapEntry.value) else {
- os_log("Can't make policy document with hash %@ and data %@",
+ os_log("Can't make policy document with hash %{public}@ and data %{public}@",
log: tplogDebug, type: .default, mapEntry.key.hash, mapEntry.value.base64EncodedString())
reply(nil, ContainerError.policyDocumentDoesNotValidate)
return
}
- guard let hash = keys[NSNumber(value: doc.policyVersion)], hash == doc.policyHash else {
- os_log("Requested hash %@ does not match fetched hash %@", log: tplogDebug, type: .default,
- keys[NSNumber(value: doc.policyVersion)] ?? "<nil>", doc.policyHash)
+ guard let expectedVersion = (remaining.first { $0.versionNumber == doc.version.versionNumber }) else {
+ os_log("Received a policy version we didn't request: %d", log: tplogDebug, type: .default, doc.version.versionNumber)
+ reply(nil, ContainerError.policyDocumentDoesNotValidate)
+ return
+ }
+
+ guard expectedVersion.policyHash == doc.version.policyHash else {
+ os_log("Requested hash %{public}@ does not match fetched hash %{public}@", log: tplogDebug, type: .default,
+ expectedVersion.policyHash, doc.version.policyHash)
reply(nil, ContainerError.policyDocumentDoesNotValidate)
return
}
- keys[NSNumber(value: doc.policyVersion)] = nil // Server responses should be unique, let's enforce
- docs[NSNumber(value: doc.policyVersion)] = [doc.policyHash, doc.protobuf.base64EncodedString()]
+
+ remaining.remove(expectedVersion) // Server responses should be unique, let's enforce
+
+ docs[doc.version] = doc
self.model.register(doc)
}
return
}
- if !keys.isEmpty {
- let (unknownVersion, _) = keys.first!
- reply(nil, ContainerError.unknownPolicyVersion(unknownVersion.uint64Value))
+ // Determine if there's anything left to fetch
+ guard let unfetchedVersion = remaining.first else {
+ // Nothing remaining? Success!
+ reply(docs, nil)
return
}
- reply(docs, nil)
+ reply(nil, ContainerError.unknownPolicyVersion(unfetchedVersion.versionNumber))
}
}
}
}
vouchers?.forEach { voucher in
self.model.register(voucher)
- let voucherMO = VoucherMO(context: self.moc)
- voucherMO.voucherInfo = voucher.data
- voucherMO.voucherInfoSig = voucher.sig
- peer.addToVouchers(voucherMO)
+
+ if (peer.vouchers as? Set<TPVoucher> ?? Set()).filter({ $0.data == voucher.data && $0.sig == voucher.sig }).isEmpty {
+ let voucherMO = VoucherMO(context: self.moc)
+ voucherMO.voucherInfo = voucher.data
+ voucherMO.voucherInfoSig = voucher.sig
+ peer.addToVouchers(voucherMO)
+ }
}
return peer
}
/* Returns any new CKKS keys that need uploading, as well as any TLKShares necessary for those keys */
func makeSharesForNewKeySets(ckksKeys: [CKKSKeychainBackedKeySet],
- tlkShares: [CKKSTLKShare],
- egoPeerKeys: OctagonSelfPeerKeys,
- egoPeerDynamicInfo: TPPeerDynamicInfo,
- epoch: Int) throws -> ([ViewKeys], [TLKShare]) {
+ tlkShares: [CKKSTLKShare],
+ egoPeerKeys: OctagonSelfPeerKeys,
+ egoPeerDynamicInfo: TPPeerDynamicInfo,
+ epoch: Int) throws -> ([ViewKeys], [TLKShare]) {
let newCKKSKeys = ckksKeys.filter { $0.newUpload }
let newViewKeys: [ViewKeys] = newCKKSKeys.map(ViewKeys.convert)
do {
let peerIDsWithAccess = try self.model.getPeerIDsTrustedByPeer(with: egoPeerDynamicInfo,
toAccessView: keyset.tlk.zoneID.zoneName)
- os_log("Planning to share %@ with peers %@", log: tplogDebug, type: .default, String(describing: keyset.tlk), peerIDsWithAccess)
+ os_log("Planning to share %@ with peers %{public}@", log: tplogDebug, type: .default, String(describing: keyset.tlk), peerIDsWithAccess)
let peers = peerIDsWithAccess.compactMap { self.model.peer(withID: $0) }
let viewPeerShares = try peers.map { receivingPeer in
poisoned: 0))
}
- peerShares = peerShares + viewPeerShares
-
+ peerShares += viewPeerShares
} catch {
- os_log("Unable to create TLKShares for keyset %@: %@", log: tplogDebug, type: .default, String(describing: keyset), error as CVarArg)
+ os_log("Unable to create TLKShares for keyset %@: %{public}@", log: tplogDebug, type: .default, String(describing: keyset), error as CVarArg)
}
}
signing: egoPeerKeys.signingKey,
currentMachineIDs: self.onqueueCurrentMIDList())
- let newStableInfo = try self.createNewStableInfoIfNeeded(stableChanges: nil,
- egoPeerID: egoPeerID,
+ let userViewSyncability: TPPBPeerStableInfo_UserControllableViewStatus?
+ if [.ENABLED, .DISABLED].contains(stableInfo.syncUserControllableViews) {
+ // No change!
+ userViewSyncability = nil
+ } else {
+ let newUserViewSyncability: TPPBPeerStableInfo_UserControllableViewStatus
+
+ if peerPermanentInfo.modelID.hasPrefix("AppleTV") ||
+ peerPermanentInfo.modelID.hasPrefix("AudioAccessory") ||
+ peerPermanentInfo.modelID.hasPrefix("Watch") {
+ // Watches, TVs, and AudioAccessories always join as FOLLOWING.
+ newUserViewSyncability = .FOLLOWING
+ } else {
+ // All other platforms select what the other devices say to do
+ newUserViewSyncability = self.model.userViewSyncabilityConsensusAmongTrustedPeers(dynamicInfo)
+ }
+
+ os_log("join: setting 'user view sync' control as: %{public}@", log: tplogDebug, type: .default,
+ TPPBPeerStableInfo_UserControllableViewStatusAsString(newUserViewSyncability))
+ userViewSyncability = newUserViewSyncability
+ }
+
+ let newStableInfo = try self.createNewStableInfoIfNeeded(stableChanges: StableChanges.change(viewStatus: userViewSyncability),
+ permanentInfo: peerPermanentInfo,
+ existingStableInfo: stableInfo,
dynamicInfo: dynamicInfo,
signingKeyPair: egoPeerKeys.signingKey)
ckksKeys: [CKKSKeychainBackedKeySet],
tlkShares: [CKKSTLKShare],
preapprovedKeys: [Data]?,
- reply: @escaping (String?, [CKRecord], Error?) -> Void) {
+ reply: @escaping (String?, [CKRecord], TPSyncingPolicy?, Error?) -> Void) {
self.semaphore.wait()
- let reply: (String?, [CKRecord], Error?) -> Void = {
- os_log("join complete: %@", log: tplogTrace, type: .info, traceError($2))
+ let reply: (String?, [CKRecord], TPSyncingPolicy?, Error?) -> Void = {
+ os_log("join complete: %{public}@", log: tplogTrace, type: .info, traceError($3))
self.semaphore.signal()
- reply($0, $1, $2)
+ reply($0, $1, $2, $3)
}
self.fetchAndPersistChanges { error in
guard error == nil else {
- reply(nil, [], error)
+ reply(nil, [], nil, error)
return
}
- self.moc.performAndWait {
- guard let voucher = TPVoucher(infoWith: voucherData, sig: voucherSig) else {
- reply(nil, [], ContainerError.invalidVoucherOrSig)
- return
- }
- guard let sponsor = self.model.peer(withID: voucher.sponsorID) else {
- reply(nil, [], ContainerError.sponsorNotRegistered(voucher.sponsorID))
- return
- }
- // Fetch ego peer identity from local storage.
- guard let egoPeerID = self.containerMO.egoPeerID,
- let egoPermData = self.containerMO.egoPeerPermanentInfo,
- let egoPermSig = self.containerMO.egoPeerPermanentInfoSig,
- let egoStableData = self.containerMO.egoPeerStableInfo,
- let egoStableSig = self.containerMO.egoPeerStableInfoSig
- else {
- reply(nil, [], ContainerError.noPreparedIdentity)
- return
+ // To join, you must know all policies that exist
+ let allPolicyVersions = self.model.allPolicyVersions()
+ self.fetchPolicyDocumentsWithSemaphore(versions: allPolicyVersions) { _, policyFetchError in
+ if let error = policyFetchError {
+ os_log("join: error fetching all requested policies (continuing anyway): %{public}@", log: tplogDebug, type: .default, error as CVarArg)
}
- let keyFactory = TPECPublicKeyFactory()
- guard let selfPermanentInfo = TPPeerPermanentInfo(peerID: egoPeerID, data: egoPermData, sig: egoPermSig, keyFactory: keyFactory) else {
- reply(nil, [], ContainerError.invalidPermanentInfoOrSig)
- return
- }
- guard let selfStableInfo = TPPeerStableInfo(data: egoStableData, sig: egoStableSig) else {
- reply(nil, [], ContainerError.invalidStableInfoOrSig)
- return
- }
- guard self.onqueueMachineIDAllowedByIDMS(machineID: selfPermanentInfo.machineID) else {
- os_log("join: self machineID %@ not on list", log: tplogDebug, type: .debug, selfPermanentInfo.machineID)
- self.onqueueTTRUntrusted()
- reply(nil, [], ContainerError.preparedIdentityNotOnAllowedList(selfPermanentInfo.machineID))
- return
- }
-
- loadEgoKeys(peerID: egoPeerID) { egoPeerKeys, error in
- guard let egoPeerKeys = egoPeerKeys else {
- os_log("Don't have my own peer keys; can't join: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "error missing")
- reply(nil, [], error)
- return
- }
- self.moc.performAndWait {
- let peer: Peer
- let newDynamicInfo: TPPeerDynamicInfo
- do {
- (peer, newDynamicInfo) = try self.onqueuePreparePeerForJoining(egoPeerID: egoPeerID,
- peerPermanentInfo: selfPermanentInfo,
- stableInfo: selfStableInfo,
- sponsorID: sponsor.peerID,
- preapprovedKeys: preapprovedKeys,
- vouchers: [SignedVoucher.with {
- $0.voucher = voucher.data
- $0.sig = voucher.sig
- }, ],
- egoPeerKeys: egoPeerKeys)
- } catch {
- os_log("Unable to create peer for joining: %@", log: tplogDebug, type: .default, error as CVarArg)
- reply(nil, [], error)
- return
- }
-
- let allTLKShares: [TLKShare]
- let viewKeys: [ViewKeys]
- do {
- (viewKeys, allTLKShares) = try self.makeSharesForNewKeySets(ckksKeys: ckksKeys,
- tlkShares: tlkShares,
- egoPeerKeys: egoPeerKeys,
- egoPeerDynamicInfo: newDynamicInfo,
- epoch: Int(selfPermanentInfo.epoch))
- } catch {
- os_log("Unable to process keys before joining: %@", log: tplogDebug, type: .default, error as CVarArg)
- reply(nil, [], error)
- return
- }
+ self.moc.performAndWait {
+ guard let voucher = TPVoucher(infoWith: voucherData, sig: voucherSig) else {
+ reply(nil, [], nil, ContainerError.invalidVoucherOrSig)
+ return
+ }
+ guard let sponsor = self.model.peer(withID: voucher.sponsorID) else {
+ reply(nil, [], nil, ContainerError.sponsorNotRegistered(voucher.sponsorID))
+ return
+ }
- do {
- try self.model.checkIntroduction(forCandidate: selfPermanentInfo,
- stableInfo: peer.stableInfoAndSig.toStableInfo(),
- withSponsorID: sponsor.peerID)
- } catch {
- os_log("Error checking introduction: %@", log: tplogDebug, type: .default, error as CVarArg)
- reply(nil, [], error)
+ // Fetch ego peer identity from local storage.
+ guard let egoPeerID = self.containerMO.egoPeerID,
+ let egoPermData = self.containerMO.egoPeerPermanentInfo,
+ let egoPermSig = self.containerMO.egoPeerPermanentInfoSig,
+ let egoStableData = self.containerMO.egoPeerStableInfo,
+ let egoStableSig = self.containerMO.egoPeerStableInfoSig
+ else {
+ reply(nil, [], nil, ContainerError.noPreparedIdentity)
return
- }
+ }
- var bottle: Bottle
- do {
- bottle = try self.assembleBottle(egoPeerID: egoPeerID)
- } catch {
- reply(nil, [], error)
+ let keyFactory = TPECPublicKeyFactory()
+ guard let selfPermanentInfo = TPPeerPermanentInfo(peerID: egoPeerID, data: egoPermData, sig: egoPermSig, keyFactory: keyFactory) else {
+ reply(nil, [], nil, ContainerError.invalidPermanentInfoOrSig)
+ return
+ }
+ guard let selfStableInfo = TPPeerStableInfo(data: egoStableData, sig: egoStableSig) else {
+ reply(nil, [], nil, ContainerError.invalidStableInfoOrSig)
+ return
+ }
+ guard self.onqueueMachineIDAllowedByIDMS(machineID: selfPermanentInfo.machineID) else {
+ os_log("join: self machineID %{public}@ not on list", log: tplogDebug, type: .debug, selfPermanentInfo.machineID)
+ self.onqueueTTRUntrusted()
+ reply(nil, [], nil, ContainerError.preparedIdentityNotOnAllowedList(selfPermanentInfo.machineID))
+ return
+ }
+
+ loadEgoKeys(peerID: egoPeerID) { egoPeerKeys, error in
+ guard let egoPeerKeys = egoPeerKeys else {
+ os_log("Don't have my own peer keys; can't join: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "error missing")
+ reply(nil, [], nil, error)
return
}
+ self.moc.performAndWait {
+ let peer: Peer
+ let newDynamicInfo: TPPeerDynamicInfo
+ do {
+ (peer, newDynamicInfo) = try self.onqueuePreparePeerForJoining(egoPeerID: egoPeerID,
+ peerPermanentInfo: selfPermanentInfo,
+ stableInfo: selfStableInfo,
+ sponsorID: sponsor.peerID,
+ preapprovedKeys: preapprovedKeys,
+ vouchers: [SignedVoucher.with {
+ $0.voucher = voucher.data
+ $0.sig = voucher.sig
+ }, ],
+ egoPeerKeys: egoPeerKeys)
+ } catch {
+ os_log("Unable to create peer for joining: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
+ reply(nil, [], nil, error)
+ return
+ }
- os_log("Beginning join for peer %@", log: tplogDebug, type: .default, egoPeerID)
- os_log("Join permanentInfo: %@", log: tplogDebug, type: .debug, egoPermData.base64EncodedString())
- os_log("Join permanentInfoSig: %@", log: tplogDebug, type: .debug, egoPermSig.base64EncodedString())
- os_log("Join stableInfo: %@", log: tplogDebug, type: .debug, peer.stableInfoAndSig.peerStableInfo.base64EncodedString())
- os_log("Join stableInfoSig: %@", log: tplogDebug, type: .debug, peer.stableInfoAndSig.sig.base64EncodedString())
- os_log("Join dynamicInfo: %@", log: tplogDebug, type: .debug, peer.dynamicInfoAndSig.peerDynamicInfo.base64EncodedString())
- os_log("Join dynamicInfoSig: %@", log: tplogDebug, type: .debug, peer.dynamicInfoAndSig.sig.base64EncodedString())
-
- os_log("Join vouchers: %@", log: tplogDebug, type: .debug, peer.vouchers.map { $0.voucher.base64EncodedString() })
- os_log("Join voucher signatures: %@", log: tplogDebug, type: .debug, peer.vouchers.map { $0.sig.base64EncodedString() })
+ guard let peerStableInfo = peer.stableInfoAndSig.toStableInfo() else {
+ os_log("Unable to create new peer stable info for joining", log: tplogDebug, type: .default)
+ reply(nil, [], nil, ContainerError.invalidStableInfoOrSig)
+ return
+ }
- os_log("Uploading %d tlk shares", log: tplogDebug, type: .default, allTLKShares.count)
+ let allTLKShares: [TLKShare]
+ let viewKeys: [ViewKeys]
+ do {
+ (viewKeys, allTLKShares) = try self.makeSharesForNewKeySets(ckksKeys: ckksKeys,
+ tlkShares: tlkShares,
+ egoPeerKeys: egoPeerKeys,
+ egoPeerDynamicInfo: newDynamicInfo,
+ epoch: Int(selfPermanentInfo.epoch))
+ } catch {
+ os_log("Unable to process keys before joining: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
+ reply(nil, [], nil, error)
+ return
+ }
- do {
- os_log("Join peer: %@", log: tplogDebug, type: .debug, try peer.serializedData().base64EncodedString())
- } catch {
- os_log("Join unable to encode peer: %@", log: tplogDebug, type: .debug, error as CVarArg)
- }
+ do {
+ try self.model.checkIntroduction(forCandidate: selfPermanentInfo,
+ stableInfo: peer.stableInfoAndSig.toStableInfo(),
+ withSponsorID: sponsor.peerID)
+ } catch {
+ os_log("Error checking introduction: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
+ reply(nil, [], nil, error)
+ return
+ }
- let changeToken = self.containerMO.changeToken ?? ""
- let request = JoinWithVoucherRequest.with {
- $0.changeToken = changeToken
- $0.peer = peer
- $0.bottle = bottle
- $0.tlkShares = allTLKShares
- $0.viewKeys = viewKeys
- }
- self.cuttlefish.joinWithVoucher(request) { response, error in
- os_log("JoinWithVoucher(%@): %@, error: %@", log: tplogDebug, "\(String(describing: request))", "\(String(describing: response))", "\(String(describing: error))")
- guard let response = response, error == nil else {
- os_log("joinWithVoucher failed: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
- reply(nil, [], error ?? ContainerError.cloudkitResponseMissing)
+ var bottle: Bottle
+ do {
+ bottle = try self.assembleBottle(egoPeerID: egoPeerID)
+ } catch {
+ reply(nil, [], nil, error)
return
}
- self.moc.performAndWait {
- do {
- self.containerMO.egoPeerStableInfo = peer.stableInfoAndSig.peerStableInfo
- self.containerMO.egoPeerStableInfoSig = peer.stableInfoAndSig.sig
- try self.onQueuePersist(changes: response.changes)
- os_log("JoinWithVoucher succeeded", log: tplogDebug)
+ os_log("Beginning join for peer %{public}@", log: tplogDebug, type: .default, egoPeerID)
+ os_log("Join permanentInfo: %{public}@", log: tplogDebug, type: .debug, egoPermData.base64EncodedString())
+ os_log("Join permanentInfoSig: %{public}@", log: tplogDebug, type: .debug, egoPermSig.base64EncodedString())
+ os_log("Join stableInfo: %{public}@", log: tplogDebug, type: .debug, peer.stableInfoAndSig.peerStableInfo.base64EncodedString())
+ os_log("Join stableInfoSig: %{public}@", log: tplogDebug, type: .debug, peer.stableInfoAndSig.sig.base64EncodedString())
+ os_log("Join dynamicInfo: %{public}@", log: tplogDebug, type: .debug, peer.dynamicInfoAndSig.peerDynamicInfo.base64EncodedString())
+ os_log("Join dynamicInfoSig: %{public}@", log: tplogDebug, type: .debug, peer.dynamicInfoAndSig.sig.base64EncodedString())
- let keyHierarchyRecords = response.zoneKeyHierarchyRecords.compactMap { CKRecord($0) }
- reply(egoPeerID, keyHierarchyRecords, nil)
- } catch {
- os_log("JoinWithVoucher failed: %@", log: tplogDebug, String(describing: error))
- reply(nil, [], error)
+ os_log("Join vouchers: %{public}@", log: tplogDebug, type: .debug, peer.vouchers.map { $0.voucher.base64EncodedString() })
+ os_log("Join voucher signatures: %{public}@", log: tplogDebug, type: .debug, peer.vouchers.map { $0.sig.base64EncodedString() })
+
+ os_log("Uploading %d tlk shares", log: tplogDebug, type: .default, allTLKShares.count)
+
+ do {
+ os_log("Join peer: %{public}@", log: tplogDebug, type: .debug, try peer.serializedData().base64EncodedString())
+ } catch {
+ os_log("Join unable to encode peer: %{public}@", log: tplogDebug, type: .debug, error as CVarArg)
+ }
+
+ let changeToken = self.containerMO.changeToken ?? ""
+ let request = JoinWithVoucherRequest.with {
+ $0.changeToken = changeToken
+ $0.peer = peer
+ $0.bottle = bottle
+ $0.tlkShares = allTLKShares
+ $0.viewKeys = viewKeys
+ }
+ self.cuttlefish.joinWithVoucher(request) { response, error in
+ guard let response = response, error == nil else {
+ os_log("joinWithVoucher failed: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
+ reply(nil, [], nil, error ?? ContainerError.cloudkitResponseMissing)
+ return
+ }
+
+ self.moc.performAndWait {
+ do {
+ self.containerMO.egoPeerStableInfo = peer.stableInfoAndSig.peerStableInfo
+ self.containerMO.egoPeerStableInfoSig = peer.stableInfoAndSig.sig
+
+ let syncingPolicy = try self.syncingPolicyFor(modelID: selfPermanentInfo.modelID,
+ stableInfo: peerStableInfo)
+
+ try self.onQueuePersist(changes: response.changes)
+ os_log("JoinWithVoucher succeeded", log: tplogDebug)
+
+ let keyHierarchyRecords = response.zoneKeyHierarchyRecords.compactMap { CKRecord($0) }
+ reply(egoPeerID, keyHierarchyRecords, syncingPolicy, nil)
+ } catch {
+ os_log("JoinWithVoucher failed: %{public}@", log: tplogDebug, String(describing: error))
+ reply(nil, [], nil, error)
+ }
}
}
}
}
}
- func requestHealthCheck(requiresEscrowCheck: Bool, reply: @escaping (Bool, Bool, Bool, Error?) -> Void) {
+ func requestHealthCheck(requiresEscrowCheck: Bool, reply: @escaping (Bool, Bool, Bool, Bool, Error?) -> Void) {
self.semaphore.wait()
- let reply: (Bool, Bool, Bool, Error?) -> Void = {
- os_log("health check complete: %@", log: tplogTrace, type: .info, traceError($3))
+ let reply: (Bool, Bool, Bool, Bool, Error?) -> Void = {
+ os_log("health check complete: %{public}@", log: tplogTrace, type: .info, traceError($4))
self.semaphore.signal()
- reply($0, $1, $2, $3)
+ reply($0, $1, $2, $3, $4)
}
os_log("requestHealthCheck requiring escrow check: %d", log: tplogDebug, type: .default, requiresEscrowCheck)
guard let egoPeerID = self.containerMO.egoPeerID else {
// No identity, nothing to do
os_log("requestHealthCheck: No identity.", log: tplogDebug, type: .default)
- reply(false, false, false, ContainerError.noPreparedIdentity)
+ reply(false, false, false, false, ContainerError.noPreparedIdentity)
return
}
let request = GetRepairActionRequest.with {
self.cuttlefish.getRepairAction(request) { response, error in
guard error == nil else {
- reply(false, false, false, error)
+ reply(false, false, false, false, error)
return
}
guard let action = response?.repairAction else {
- os_log("repair response is empty, returning false: %@", log: tplogDebug, type: .default)
- reply(false, false, false, nil)
+ os_log("repair response is empty, returning false", log: tplogDebug, type: .default)
+ reply(false, false, false, false, nil)
return
}
var postRepairAccount: Bool = false
var postRepairEscrow: Bool = false
var resetOctagon: Bool = false
+ var leaveTrust: Bool = false
switch action {
case .noAction:
break
case .postRepairAccount:
postRepairAccount = true
- break
case .postRepairEscrow:
postRepairEscrow = true
- break
case .resetOctagon:
resetOctagon = true
- break
+ case .leaveTrust:
+ leaveTrust = true
case .UNRECOGNIZED:
break
}
- reply(postRepairAccount, postRepairEscrow, resetOctagon, nil)
+ reply(postRepairAccount, postRepairEscrow, resetOctagon, leaveTrust, nil)
}
}
}
func getSupportAppInfo(reply: @escaping (Data?, Error?) -> Void) {
self.semaphore.wait()
let reply: (Data?, Error?) -> Void = {
- os_log("getSupportAppInfo complete: %@", log: tplogTrace, type: .info, traceError($1))
+ os_log("getSupportAppInfo complete: %{public}@", log: tplogTrace, type: .info, traceError($1))
self.semaphore.signal()
reply($0, $1)
}
self.cuttlefish.getSupportAppInfo { response, error in
- os_log("getSupportAppInfo(): %@, error: %@", log: tplogDebug,
- "(\(String(describing: response))", "\(String(describing: error))")
guard let response = response, error == nil else {
- os_log("getSupportAppInfo failed: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
+ os_log("getSupportAppInfo failed: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
reply(nil, error ?? ContainerError.cloudkitResponseMissing)
return
}
reply(data, nil)
}
-
}
- func preflightPreapprovedJoin(reply: @escaping (Bool, Error?) -> Void) {
+ func preflightPreapprovedJoin(preapprovedKeys: [Data]?,
+ reply: @escaping (Bool, Error?) -> Void) {
self.semaphore.wait()
let reply: (Bool, Error?) -> Void = {
- os_log("preflightPreapprovedJoin complete: %@", log: tplogTrace, type: .info, traceError($1))
+ os_log("preflightPreapprovedJoin complete: %{public}@", log: tplogTrace, type: .info, traceError($1))
self.semaphore.signal()
reply($0, $1)
}
self.fetchAndPersistChanges { error in
guard error == nil else {
- os_log("preflightPreapprovedJoin unable to fetch changes: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "")
+ os_log("preflightPreapprovedJoin unable to fetch changes: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "")
reply(false, error)
return
}
- // We explicitly ignore the machine ID list here; we're only interested in the peer states: do they preapprove us?
+ // We need to try to have all policy versions that our peers claim to behave
+ let allPolicyVersions = self.model.allPolicyVersions()
+ self.fetchPolicyDocumentsWithSemaphore(versions: allPolicyVersions) { _, policyFetchError in
+ if let error = policyFetchError {
+ os_log("preflightPreapprovedJoin: error fetching all requested policies (continuing anyway): %{public}@", log: tplogDebug, type: .default, error as CVarArg)
+ }
- guard !self.model.allPeerIDs().isEmpty else {
- // If, after fetch and handle changes, there's no peers, then we can likely establish.
- reply(true, nil)
- return
- }
+ // We explicitly ignore the machine ID list here; we're only interested in the peer states: do they preapprove us?
- guard let egoPeerID = self.containerMO.egoPeerID,
- let egoPermData = self.containerMO.egoPeerPermanentInfo,
- let egoPermSig = self.containerMO.egoPeerPermanentInfoSig
- else {
- os_log("preflightPreapprovedJoin: no prepared identity", log: tplogDebug, type: .debug)
- reply(false, ContainerError.noPreparedIdentity)
+ guard !self.model.allPeerIDs().isEmpty else {
+ // If, after fetch and handle changes, there's no peers, then we can likely establish.
+ reply(true, nil)
return
- }
+ }
- let keyFactory = TPECPublicKeyFactory()
- guard let egoPermanentInfo = TPPeerPermanentInfo(peerID: egoPeerID, data: egoPermData, sig: egoPermSig, keyFactory: keyFactory) else {
- os_log("preflightPreapprovedJoin: invalid permanent info", log: tplogDebug, type: .debug)
- reply(false, ContainerError.invalidPermanentInfoOrSig)
- return
- }
+ guard let egoPeerID = self.containerMO.egoPeerID,
+ let egoPermData = self.containerMO.egoPeerPermanentInfo,
+ let egoPermSig = self.containerMO.egoPeerPermanentInfoSig
+ else {
+ os_log("preflightPreapprovedJoin: no prepared identity", log: tplogDebug, type: .debug)
+ reply(false, ContainerError.noPreparedIdentity)
+ return
+ }
- guard self.model.hasPotentiallyTrustedPeerPreapprovingKey(egoPermanentInfo.signingPubKey.spki()) else {
- os_log("preflightPreapprovedJoin: no peers preapprove our key", log: tplogDebug, type: .debug)
- reply(false, ContainerError.noPeersPreapprovePreparedIdentity)
- return
- }
+ let keyFactory = TPECPublicKeyFactory()
+ guard let egoPermanentInfo = TPPeerPermanentInfo(peerID: egoPeerID, data: egoPermData, sig: egoPermSig, keyFactory: keyFactory) else {
+ os_log("preflightPreapprovedJoin: invalid permanent info", log: tplogDebug, type: .debug)
+ reply(false, ContainerError.invalidPermanentInfoOrSig)
+ return
+ }
+
+ guard self.model.hasPotentiallyTrustedPeerPreapprovingKey(egoPermanentInfo.signingPubKey.spki()) else {
+ os_log("preflightPreapprovedJoin: no peers preapprove our key", log: tplogDebug, type: .debug)
+ reply(false, ContainerError.noPeersPreapprovePreparedIdentity)
+ return
+ }
+
+ let keysApprovingPeers = preapprovedKeys?.filter { key in
+ self.model.hasPotentiallyTrustedPeer(withSigningKey: key)
+ }
+
+ guard (keysApprovingPeers?.count ?? 0) > 0 else {
+ os_log("preflightPreapprovedJoin: no reciprocal trust for existing peers", log: tplogDebug, type: .debug)
+ reply(false, ContainerError.noPeersPreapprovedBySelf)
+ return
+ }
- reply(true, nil)
+ reply(true, nil)
+ }
}
}
func preapprovedJoin(ckksKeys: [CKKSKeychainBackedKeySet],
tlkShares: [CKKSTLKShare],
preapprovedKeys: [Data]?,
- reply: @escaping (String?, [CKRecord], Error?) -> Void) {
+ reply: @escaping (String?, [CKRecord], TPSyncingPolicy?, Error?) -> Void) {
self.semaphore.wait()
- let reply: (String?, [CKRecord], Error?) -> Void = {
- os_log("preapprovedJoin complete: %@", log: tplogTrace, type: .info, traceError($2))
+ let reply: (String?, [CKRecord], TPSyncingPolicy?, Error?) -> Void = {
+ os_log("preapprovedJoin complete: %{public}@", log: tplogTrace, type: .info, traceError($3))
self.semaphore.signal()
- reply($0, $1, $2)
+ reply($0, $1, $2, $3)
}
self.fetchAndPersistChangesIfNeeded { error in
guard error == nil else {
- os_log("preapprovedJoin unable to fetch changes: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "")
- reply(nil, [], error)
+ os_log("preapprovedJoin unable to fetch changes: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "")
+ reply(nil, [], nil, error)
return
}
self.moc.performAndWait {
// That's up to the caller.
if self.model.allPeerIDs().isEmpty {
os_log("preapprovedJoin but no existing peers, attempting establish", log: tplogDebug, type: .debug)
+
self.onqueueEstablish(ckksKeys: ckksKeys,
- tlkShares: tlkShares,
- preapprovedKeys: preapprovedKeys,
- reply: reply)
+ tlkShares: tlkShares,
+ preapprovedKeys: preapprovedKeys,
+ reply: reply)
return
}
let egoStableSig = self.containerMO.egoPeerStableInfoSig
else {
os_log("preapprovedJoin: no prepared identity", log: tplogDebug, type: .debug)
- reply(nil, [], ContainerError.noPreparedIdentity)
+ reply(nil, [], nil, ContainerError.noPreparedIdentity)
return
}
let keyFactory = TPECPublicKeyFactory()
guard let selfPermanentInfo = TPPeerPermanentInfo(peerID: egoPeerID, data: egoPermData, sig: egoPermSig, keyFactory: keyFactory) else {
- reply(nil, [], ContainerError.invalidPermanentInfoOrSig)
+ reply(nil, [], nil, ContainerError.invalidPermanentInfoOrSig)
return
}
guard let selfStableInfo = TPPeerStableInfo(data: egoStableData, sig: egoStableSig) else {
- reply(nil, [], ContainerError.invalidStableInfoOrSig)
+ reply(nil, [], nil, ContainerError.invalidStableInfoOrSig)
return
}
guard self.onqueueMachineIDAllowedByIDMS(machineID: selfPermanentInfo.machineID) else {
- os_log("preapprovedJoin: self machineID %@ (me) not on list", log: tplogDebug, type: .debug, selfPermanentInfo.machineID)
+ os_log("preapprovedJoin: self machineID %{public}@ (me) not on list", log: tplogDebug, type: .debug, selfPermanentInfo.machineID)
self.onqueueTTRUntrusted()
- reply(nil, [], ContainerError.preparedIdentityNotOnAllowedList(selfPermanentInfo.machineID))
+ reply(nil, [], nil, ContainerError.preparedIdentityNotOnAllowedList(selfPermanentInfo.machineID))
return
}
loadEgoKeys(peerID: egoPeerID) { egoPeerKeys, error in
guard let egoPeerKeys = egoPeerKeys else {
os_log("preapprovedJoin: Don't have my own keys: can't join", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
- reply(nil, [], error)
+ reply(nil, [], nil, error)
return
}
guard self.model.hasPotentiallyTrustedPeerPreapprovingKey(egoPeerKeys.signingKey.publicKey().spki()) else {
os_log("preapprovedJoin: no peers preapprove our key", log: tplogDebug, type: .debug)
- reply(nil, [], ContainerError.noPeersPreapprovePreparedIdentity)
+ reply(nil, [], nil, ContainerError.noPeersPreapprovePreparedIdentity)
return
}
self.moc.performAndWait {
-
let peer: Peer
let newDynamicInfo: TPPeerDynamicInfo
do {
vouchers: [],
egoPeerKeys: egoPeerKeys)
} catch {
- os_log("Unable to create peer for joining: %@", log: tplogDebug, type: .default, error as CVarArg)
- reply(nil, [], error)
+ os_log("Unable to create peer for joining: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
+ reply(nil, [], nil, error)
+ return
+ }
+
+ guard let peerStableInfo = peer.stableInfoAndSig.toStableInfo() else {
+ os_log("Unable to create new peer stable info for joining", log: tplogDebug, type: .default)
+ reply(nil, [], nil, ContainerError.invalidStableInfoOrSig)
return
}
let viewKeys: [ViewKeys]
do {
(viewKeys, allTLKShares) = try self.makeSharesForNewKeySets(ckksKeys: ckksKeys,
- tlkShares: tlkShares,
- egoPeerKeys: egoPeerKeys,
- egoPeerDynamicInfo: newDynamicInfo,
- epoch: Int(selfPermanentInfo.epoch))
+ tlkShares: tlkShares,
+ egoPeerKeys: egoPeerKeys,
+ egoPeerDynamicInfo: newDynamicInfo,
+ epoch: Int(selfPermanentInfo.epoch))
} catch {
- os_log("Unable to process keys before joining: %@", log: tplogDebug, type: .default, error as CVarArg)
- reply(nil, [], error)
+ os_log("Unable to process keys before joining: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
+ reply(nil, [], nil, error)
return
}
do {
bottle = try self.assembleBottle(egoPeerID: egoPeerID)
} catch {
- reply(nil, [], error)
+ reply(nil, [], nil, error)
return
}
- os_log("Beginning preapprovedJoin for peer %@", log: tplogDebug, type: .default, egoPeerID)
- os_log("preapprovedJoin permanentInfo: %@", log: tplogDebug, type: .debug, egoPermData.base64EncodedString())
- os_log("preapprovedJoin permanentInfoSig: %@", log: tplogDebug, type: .debug, egoPermSig.base64EncodedString())
- os_log("preapprovedJoin stableInfo: %@", log: tplogDebug, type: .debug, egoStableData.base64EncodedString())
- os_log("preapprovedJoin stableInfoSig: %@", log: tplogDebug, type: .debug, egoStableSig.base64EncodedString())
- os_log("preapprovedJoin dynamicInfo: %@", log: tplogDebug, type: .debug, peer.dynamicInfoAndSig.peerDynamicInfo.base64EncodedString())
- os_log("preapprovedJoin dynamicInfoSig: %@", log: tplogDebug, type: .debug, peer.dynamicInfoAndSig.sig.base64EncodedString())
+ os_log("Beginning preapprovedJoin for peer %{public}@", log: tplogDebug, type: .default, egoPeerID)
+ os_log("preapprovedJoin permanentInfo: %{public}@", log: tplogDebug, type: .debug, egoPermData.base64EncodedString())
+ os_log("preapprovedJoin permanentInfoSig: %{public}@", log: tplogDebug, type: .debug, egoPermSig.base64EncodedString())
+ os_log("preapprovedJoin stableInfo: %{public}@", log: tplogDebug, type: .debug, egoStableData.base64EncodedString())
+ os_log("preapprovedJoin stableInfoSig: %{public}@", log: tplogDebug, type: .debug, egoStableSig.base64EncodedString())
+ os_log("preapprovedJoin dynamicInfo: %{public}@", log: tplogDebug, type: .debug, peer.dynamicInfoAndSig.peerDynamicInfo.base64EncodedString())
+ os_log("preapprovedJoin dynamicInfoSig: %{public}@", log: tplogDebug, type: .debug, peer.dynamicInfoAndSig.sig.base64EncodedString())
- os_log("preapprovedJoin vouchers: %@", log: tplogDebug, type: .debug, peer.vouchers.map { $0.voucher.base64EncodedString() })
- os_log("preapprovedJoin voucher signatures: %@", log: tplogDebug, type: .debug, peer.vouchers.map { $0.sig.base64EncodedString() })
+ os_log("preapprovedJoin vouchers: %{public}@", log: tplogDebug, type: .debug, peer.vouchers.map { $0.voucher.base64EncodedString() })
+ os_log("preapprovedJoin voucher signatures: %{public}@", log: tplogDebug, type: .debug, peer.vouchers.map { $0.sig.base64EncodedString() })
os_log("preapprovedJoin: uploading %d tlk shares", log: tplogDebug, type: .default, allTLKShares.count)
do {
- os_log("preapprovedJoin peer: %@", log: tplogDebug, type: .debug, try peer.serializedData().base64EncodedString())
+ os_log("preapprovedJoin peer: %{public}@", log: tplogDebug, type: .debug, try peer.serializedData().base64EncodedString())
} catch {
- os_log("preapprovedJoin unable to encode peer: %@", log: tplogDebug, type: .debug, error as CVarArg)
+ os_log("preapprovedJoin unable to encode peer: %{public}@", log: tplogDebug, type: .debug, error as CVarArg)
}
let changeToken = self.containerMO.changeToken ?? ""
$0.viewKeys = viewKeys
}
self.cuttlefish.joinWithVoucher(request) { response, error in
- os_log("preapprovedJoin(%@): %@, error: %@", log: tplogDebug, "\(String(describing: request))", "\(String(describing: response))", "\(String(describing: error))")
guard let response = response, error == nil else {
- os_log("preapprovedJoin failed: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
- reply(nil, [], error ?? ContainerError.cloudkitResponseMissing)
+ os_log("preapprovedJoin failed: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
+ reply(nil, [], nil, error ?? ContainerError.cloudkitResponseMissing)
return
}
do {
self.containerMO.egoPeerStableInfo = peer.stableInfoAndSig.peerStableInfo
self.containerMO.egoPeerStableInfoSig = peer.stableInfoAndSig.sig
+
+ let syncingPolicy = try self.syncingPolicyFor(modelID: selfPermanentInfo.modelID,
+ stableInfo: peerStableInfo)
+
try self.onQueuePersist(changes: response.changes)
os_log("preapprovedJoin succeeded", log: tplogDebug)
let keyHierarchyRecords = response.zoneKeyHierarchyRecords.compactMap { CKRecord($0) }
- reply(egoPeerID, keyHierarchyRecords, nil)
+ reply(egoPeerID, keyHierarchyRecords, syncingPolicy, nil)
} catch {
- os_log("preapprovedJoin failed: %@", log: tplogDebug, String(describing: error))
- reply(nil, [], error)
+ os_log("preapprovedJoin failed: %{public}@", log: tplogDebug, String(describing: error))
+ reply(nil, [], nil, error)
}
}
}
osVersion: String?,
policyVersion: UInt64?,
policySecrets: [String: Data]?,
- reply: @escaping (TrustedPeersHelperPeerState?, Error?) -> Void) {
+ syncUserControllableViews: TPPBPeerStableInfo_UserControllableViewStatus?,
+ reply: @escaping (TrustedPeersHelperPeerState?, TPSyncingPolicy?, Error?) -> Void) {
self.semaphore.wait()
- let reply: (TrustedPeersHelperPeerState?, Error?) -> Void = {
- os_log("update complete: %@", log: tplogTrace, type: .info, traceError($1))
+ let reply: (TrustedPeersHelperPeerState?, TPSyncingPolicy?, Error?) -> Void = {
+ os_log("update complete: %{public}@", log: tplogTrace, type: .info, traceError($2))
self.semaphore.signal()
- reply($0, $1)
+ reply($0, $1, $2)
}
// Get (and save) the latest from cuttlefish
osVersion: osVersion,
policyVersion: policyVersion,
policySecrets: policySecrets,
- recoverySigningPubKey: nil,
- recoveryEncryptionPubKey: nil)
+ setSyncUserControllableViews: syncUserControllableViews)
self.fetchChangesAndUpdateTrustIfNeeded(stableChanges: stableChanges, reply: reply)
}
func set(preapprovedKeys: [Data],
- reply: @escaping (Error?) -> Void) {
+ reply: @escaping (TrustedPeersHelperPeerState?, Error?) -> Void) {
self.semaphore.wait()
- let reply: (Error?) -> Void = {
- os_log("setPreapprovedKeys complete: %@", log: tplogTrace, type: .info, traceError($0))
+ let reply: (TrustedPeersHelperPeerState?, Error?) -> Void = {
+ os_log("setPreapprovedKeys complete: %{public}@", log: tplogTrace, type: .info, traceError($1))
self.semaphore.signal()
- reply($0)
+ reply($0, $1)
}
self.moc.performAndWait {
guard let egoPeerID = self.containerMO.egoPeerID else {
// No identity, nothing to do
os_log("setPreapprovedKeys: No identity.", log: tplogDebug, type: .default)
- reply(ContainerError.noPreparedIdentity)
+ reply(nil, ContainerError.noPreparedIdentity)
return
}
loadEgoKeyPair(identifier: signingKeyIdentifier(peerID: egoPeerID)) { signingKeyPair, error in
guard let signingKeyPair = signingKeyPair else {
- os_log("setPreapprovedKeys: no signing key pair: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
- reply(error ?? ContainerError.unableToCreateKeyPair)
+ os_log("setPreapprovedKeys: no signing key pair: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
+ reply(nil, error ?? ContainerError.unableToCreateKeyPair)
return
}
signing: signingKeyPair,
currentMachineIDs: self.onqueueCurrentMIDList())
} catch {
- os_log("setPreapprovedKeys: couldn't calculate dynamic info: %@", log: tplogDebug, type: .default, error as CVarArg)
- reply(error)
+ os_log("setPreapprovedKeys: couldn't calculate dynamic info: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
+ reply(nil, error)
return
}
- os_log("setPreapprovedKeys: produced a dynamicInfo: %@", log: tplogDebug, type: .default, dynamicInfo)
+ os_log("setPreapprovedKeys: produced a dynamicInfo: %{public}@", log: tplogDebug, type: .default, dynamicInfo)
if dynamicInfo == self.model.peer(withID: egoPeerID)?.dynamicInfo {
os_log("setPreapprovedKeys: no change; nothing to do.", log: tplogDebug, type: .default)
- reply(nil)
+
+ // Calling this will fill in the peer status
+ self.updateTrustIfNeeded { status, _, error in
+ reply(status, error)
+ }
return
}
- os_log("setPreapprovedKeys: attempting updateTrust for %@ with: %@", log: tplogDebug, type: .default, egoPeerID, dynamicInfo)
+ os_log("setPreapprovedKeys: attempting updateTrust for %{public}@ with: %{public}@", log: tplogDebug, type: .default, egoPeerID, dynamicInfo)
let request = UpdateTrustRequest.with {
$0.changeToken = self.containerMO.changeToken ?? ""
$0.peerID = egoPeerID
$0.dynamicInfoAndSig = SignedPeerDynamicInfo(dynamicInfo)
}
- self.cuttlefish.updateTrust(request) { response, error in
- os_log("setPreapprovedKeys(%@): %@, error: %@", log: tplogDebug, "\(String(describing: request))", "\(String(describing: response))", "\(String(describing: error))")
- guard let response = response, error == nil else {
- os_log("setPreapprovedKeys failed: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
- reply(error ?? ContainerError.cloudkitResponseMissing)
- return
- }
- os_log("setPreapprovedKeys: updateTrust suceeded", log: tplogDebug, type: .default)
-
- do {
- try self.persist(changes: response.changes)
- } catch {
- os_log("setPreapprovedKeys: could not persist changes: %@", log: tplogDebug, type: .default, error as CVarArg)
- reply(error)
+ self.perform(updateTrust: request) { state, _, error in
+ guard error == nil else {
+ os_log("setPreapprovedKeys: failed: %{public}@", log: tplogDebug, type: .default, error as CVarArg? ?? "no error")
+ reply(state, error)
return
}
- reply(nil)
+ os_log("setPreapprovedKeys: updateTrust succeeded", log: tplogDebug, type: .default)
+ reply(state, nil)
}
}
}
reply: @escaping ([CKRecord]?, Error?) -> Void) {
self.semaphore.wait()
let reply: ([CKRecord]?, Error?) -> Void = {
- os_log("updateTLKs complete: %@", log: tplogTrace, type: .info, traceError($1))
+ os_log("updateTLKs complete: %{public}@", log: tplogTrace, type: .info, traceError($1))
self.semaphore.signal()
reply($0, $1)
}
os_log("Uploading some new TLKs: %@", log: tplogDebug, type: .default, ckksKeys)
self.moc.performAndWait {
- guard let egoPeerID = self.containerMO.egoPeerID,
- let egoPermData = self.containerMO.egoPeerPermanentInfo,
- let egoPermSig = self.containerMO.egoPeerPermanentInfoSig
- else {
- os_log("Have no self identity, can't make tlk shares", log: tplogDebug, type: .default)
- reply(nil, ContainerError.noPreparedIdentity)
- return
- }
+ self.onqueueUpdateTLKs(ckksKeys: ckksKeys, tlkShares: tlkShares, reply: reply)
+ }
+ }
+
+ func onqueueUpdateTLKs(ckksKeys: [CKKSKeychainBackedKeySet],
+ tlkShares: [CKKSTLKShare],
+ reply: @escaping ([CKRecord]?, Error?) -> Void) {
+ guard let egoPeerID = self.containerMO.egoPeerID,
+ let egoPermData = self.containerMO.egoPeerPermanentInfo,
+ let egoPermSig = self.containerMO.egoPeerPermanentInfoSig
+ else {
+ os_log("Have no self identity, can't make tlk shares", log: tplogDebug, type: .default)
+ reply(nil, ContainerError.noPreparedIdentity)
+ return
+ }
+
+ guard let selfPermanentInfo = TPPeerPermanentInfo(peerID: egoPeerID, data: egoPermData, sig: egoPermSig, keyFactory: TPECPublicKeyFactory()) else {
+ os_log("Couldn't parse self identity", log: tplogDebug, type: .default, ckksKeys)
+ reply(nil, ContainerError.invalidPermanentInfoOrSig)
+ return
+ }
- guard let selfPermanentInfo = TPPeerPermanentInfo(peerID: egoPeerID, data: egoPermData, sig: egoPermSig, keyFactory: TPECPublicKeyFactory()) else {
- os_log("Couldn't parse self identity", log: tplogDebug, type: .default, ckksKeys)
- reply(nil, ContainerError.invalidPermanentInfoOrSig)
+ loadEgoKeys(peerID: egoPeerID) { egoPeerKeys, error in
+ guard let egoPeerKeys = egoPeerKeys else {
+ os_log("Don't have my own peer keys; can't upload new TLKs: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "error missing")
+ reply(nil, error)
return
}
+ self.moc.performAndWait {
+ guard let egoPeerDynamicInfo = self.model.getDynamicInfoForPeer(withID: egoPeerID) else {
+ os_log("Unable to fetch dynamic info for self", log: tplogDebug, type: .default)
+ reply(nil, ContainerError.missingDynamicInfo)
+ return
+ }
- loadEgoKeys(peerID: egoPeerID) { egoPeerKeys, error in
- guard let egoPeerKeys = egoPeerKeys else {
- os_log("Don't have my own peer keys; can't upload new TLKs: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "error missing")
+ let allTLKShares: [TLKShare]
+ let viewKeys: [ViewKeys]
+ do {
+ (viewKeys, allTLKShares) = try self.makeSharesForNewKeySets(ckksKeys: ckksKeys,
+ tlkShares: tlkShares,
+ egoPeerKeys: egoPeerKeys,
+ egoPeerDynamicInfo: egoPeerDynamicInfo,
+ epoch: Int(selfPermanentInfo.epoch))
+ } catch {
+ os_log("Unable to process keys before uploading: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
reply(nil, error)
return
}
- self.moc.performAndWait {
- guard let egoPeerDynamicInfo = self.model.getDynamicInfoForPeer(withID: egoPeerID) else {
- os_log("Unable to fetch dynamic info for self", log: tplogDebug, type: .default)
- reply(nil, ContainerError.missingDynamicInfo)
- return
- }
- let allTLKShares: [TLKShare]
- let viewKeys: [ViewKeys]
- do {
- (viewKeys, allTLKShares) = try self.makeSharesForNewKeySets(ckksKeys: ckksKeys,
- tlkShares: tlkShares,
- egoPeerKeys: egoPeerKeys,
- egoPeerDynamicInfo: egoPeerDynamicInfo,
- epoch: Int(selfPermanentInfo.epoch))
- } catch {
- os_log("Unable to process keys before uploading: %@", log: tplogDebug, type: .default, error as CVarArg)
+ let request = UpdateTrustRequest.with {
+ $0.changeToken = self.containerMO.changeToken ?? ""
+ $0.peerID = egoPeerID
+ $0.tlkShares = allTLKShares
+ $0.viewKeys = viewKeys
+ }
+
+ self.cuttlefish.updateTrust(request) { response, error in
+ guard error == nil else {
reply(nil, error)
return
}
- let request = UpdateTrustRequest.with {
- $0.changeToken = self.containerMO.changeToken ?? ""
- $0.peerID = egoPeerID
- $0.tlkShares = allTLKShares
- $0.viewKeys = viewKeys
- }
-
- self.cuttlefish.updateTrust(request) { response, error in
- os_log("UpdateTrust(%@): %@, error: %@", log: tplogDebug, "\(String(describing: request))", "\(String(describing: response))", "\(String(describing: error))")
-
- guard error == nil else {
- reply(nil, error)
- return
- }
-
- let keyHierarchyRecords = response?.zoneKeyHierarchyRecords.compactMap { CKRecord($0) } ?? []
- os_log("Recevied %d CKRecords back", log: tplogDebug, type: .default, keyHierarchyRecords.count)
- reply(keyHierarchyRecords, nil)
- }
+ let keyHierarchyRecords = response?.zoneKeyHierarchyRecords.compactMap { CKRecord($0) } ?? []
+ os_log("Recevied %d CKRecords back", log: tplogDebug, type: .default, keyHierarchyRecords.count)
+ reply(keyHierarchyRecords, nil)
}
}
}
func getState(reply: @escaping (ContainerState) -> Void) {
self.semaphore.wait()
let reply: (ContainerState) -> Void = {
- os_log("getState complete: %@", log: tplogTrace, type: .info, $0.egoPeerID ?? "<NULL>")
+ os_log("getState complete: %{public}@", log: tplogTrace, type: .info, $0.egoPeerID ?? "<NULL>")
self.semaphore.signal()
reply($0)
}
}
}
+ if self.containerMO.fullyViableEscrowRecords != nil {
+ self.containerMO.fullyViableEscrowRecords!.forEach { record in
+ state.escrowRecords.insert(record as! EscrowRecordMO)
+ }
+ }
+
+ if self.containerMO.partiallyViableEscrowRecords != nil {
+ self.containerMO.partiallyViableEscrowRecords!.forEach { record in
+ state.escrowRecords.insert(record as! EscrowRecordMO)
+ }
+ }
+
self.model.allPeers().forEach { peer in
state.peers[peer.peerID] = peer
}
}
// This will only fetch changes if no changes have ever been fetched before
- private func fetchAndPersistChangesIfNeeded(reply: @escaping (Error?) -> Void) {
+ func fetchAndPersistChangesIfNeeded(reply: @escaping (Error?) -> Void) {
self.moc.performAndWait {
if self.containerMO.changeToken == nil {
self.onqueueFetchAndPersistChanges(reply: reply)
}
}
- private func fetchAndPersistChanges(reply: @escaping (Error?) -> Void) {
+ func fetchAndPersistChanges(reply: @escaping (Error?) -> Void) {
self.moc.performAndWait {
self.onqueueFetchAndPersistChanges(reply: reply)
}
let request = FetchChangesRequest.with {
$0.changeToken = self.containerMO.changeToken ?? ""
}
- os_log("Fetching with change token: %@", log: tplogDebug, type: .default, request.changeToken.count > 0 ? request.changeToken : "empty")
+ os_log("Fetching with change token: %{public}@", log: tplogDebug, type: .default, !request.changeToken.isEmpty ? request.changeToken : "empty")
self.cuttlefish.fetchChanges(request) { response, error in
- os_log("FetchChanges(%@): %@, error: %@", log: tplogDebug, "\(String(describing: request))", "\(String(describing: response))", "\(String(describing: error))")
guard let response = response, error == nil else {
switch error {
case CuttlefishErrorMatcher(code: CuttlefishErrorCode.changeTokenExpired):
do {
try self.deleteLocalCloudKitData()
} catch {
- os_log("Failed to reset local data: %@", log: tplogDebug, type: .default, error as CVarArg)
+ os_log("Failed to reset local data: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
reply(error)
return
}
return
default:
- os_log("Fetch error is an unknown error: %@", log: tplogDebug, type: .default, String(describing: error))
+ os_log("Fetch error is an unknown error: %{public}@", log: tplogDebug, type: .default, String(describing: error))
}
- os_log("Could not fetch changes: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
+ os_log("Could not fetch changes: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
reply(error)
return
}
do {
try self.persist(changes: response.changes)
} catch {
- os_log("Could not persist changes: %@", log: tplogDebug, type: .default, error as CVarArg)
+ os_log("Could not persist changes: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
reply(error)
return
}
guard let oldDynamicInfo = oldDynamicInfo else {
return true
}
- if (newDynamicInfo.includedPeerIDs != oldDynamicInfo.includedPeerIDs) {
+ if newDynamicInfo.includedPeerIDs != oldDynamicInfo.includedPeerIDs {
return true
}
- if (newDynamicInfo.excludedPeerIDs != oldDynamicInfo.excludedPeerIDs) {
+ if newDynamicInfo.excludedPeerIDs != oldDynamicInfo.excludedPeerIDs {
return true
}
- if (newDynamicInfo.preapprovals != oldDynamicInfo.preapprovals) {
+ if newDynamicInfo.preapprovals != oldDynamicInfo.preapprovals {
return true
}
return false
// the caller's responsibility to release it after it completes
// (i.e. after reply is invoked).
internal func fetchChangesAndUpdateTrustIfNeeded(stableChanges: StableChanges? = nil,
- changesPending: Bool = false,
- reply: @escaping (TrustedPeersHelperPeerState?, Error?) -> Void) {
+ peerChanges: Bool = false,
+ reply: @escaping (TrustedPeersHelperPeerState?, TPSyncingPolicy?, Error?) -> Void) {
self.fetchAndPersistChanges { error in
if let error = error {
- os_log("fetchChangesAndUpdateTrustIfNeeded: fetching failed: %@", log: tplogDebug, type: .default, error as CVarArg)
- reply(nil, error)
+ os_log("fetchChangesAndUpdateTrustIfNeeded: fetching failed: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
+ reply(nil, nil, error)
return
}
- self.updateTrustIfNeeded(stableChanges: stableChanges, changesPending: changesPending, reply: reply)
+ self.updateTrustIfNeeded(stableChanges: stableChanges, peerChanges: peerChanges, reply: reply)
}
}
// the caller's responsibility to release it after it completes
// (i.e. after reply is invoked).
private func updateTrustIfNeeded(stableChanges: StableChanges? = nil,
- changesPending: Bool = false,
- reply: @escaping (TrustedPeersHelperPeerState?, Error?) -> Void) {
+ peerChanges: Bool = false,
+ reply: @escaping (TrustedPeersHelperPeerState?, TPSyncingPolicy?, Error?) -> Void) {
self.moc.performAndWait {
guard let egoPeerID = self.containerMO.egoPeerID else {
// No identity, nothing to do
os_log("updateTrustIfNeeded: No identity.", log: tplogDebug, type: .default)
- reply(TrustedPeersHelperPeerState(peerID: nil, isPreapproved: false, status: .unknown, memberChanges: changesPending, unknownMachineIDs: false, osVersion: nil), nil)
+ reply(TrustedPeersHelperPeerState(peerID: nil, isPreapproved: false, status: .unknown, memberChanges: peerChanges, unknownMachineIDs: false, osVersion: nil),
+ nil,
+ nil)
return
}
loadEgoKeyPair(identifier: signingKeyIdentifier(peerID: egoPeerID)) { signingKeyPair, error in
guard let signingKeyPair = signingKeyPair else {
- os_log("updateTrustIfNeeded: no signing key pair: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
- reply(TrustedPeersHelperPeerState(peerID: nil, isPreapproved: false, status: .unknown, memberChanges: changesPending, unknownMachineIDs: false, osVersion: nil), error)
+ os_log("updateTrustIfNeeded: no signing key pair: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
+ reply(TrustedPeersHelperPeerState(peerID: nil, isPreapproved: false, status: .unknown, memberChanges: peerChanges, unknownMachineIDs: false, osVersion: nil),
+ nil,
+ error)
return
}
- guard self.model.hasPeer(withID: egoPeerID) else {
+ guard let currentSelfInModel = self.model.peer(withID: egoPeerID) else {
// Not in circle, nothing to do
let isPreapproved = self.model.hasPotentiallyTrustedPeerPreapprovingKey(signingKeyPair.publicKey().spki())
- os_log("updateTrustIfNeeded: ego peer is not in model, is %@", log: tplogDebug, type: .default, isPreapproved ? "preapproved" : "not yet preapproved")
+ os_log("updateTrustIfNeeded: ego peer is not in model, is %{public}@", log: tplogDebug, type: .default, isPreapproved ? "preapproved" : "not yet preapproved")
reply(TrustedPeersHelperPeerState(peerID: egoPeerID,
isPreapproved: isPreapproved,
status: .unknown,
- memberChanges: changesPending,
+ memberChanges: peerChanges,
unknownMachineIDs: false,
osVersion: nil),
+ nil,
nil)
return
}
- self.moc.performAndWait {
- let dynamicInfo: TPPeerDynamicInfo
- var stableInfo: TPPeerStableInfo?
- do {
- // FIXME We should be able to calculate the contents of dynamicInfo without the signingKeyPair,
- // and then only load the key if it has changed and we need to sign a new one. This would also
- // help make our detection of change immune from non-canonical serialization of dynamicInfo.
- dynamicInfo = try self.model.calculateDynamicInfoForPeer(withID: egoPeerID,
- addingPeerIDs: nil,
- removingPeerIDs: nil,
- preapprovedKeys: nil,
- signing: signingKeyPair,
- currentMachineIDs: self.onqueueCurrentMIDList())
- stableInfo = try self.createNewStableInfoIfNeeded(stableChanges: stableChanges,
- egoPeerID: egoPeerID,
- dynamicInfo: dynamicInfo,
- signingKeyPair: signingKeyPair)
- } catch {
- os_log("updateTrustIfNeeded: couldn't calculate dynamic info: %@", log: tplogDebug, type: .default, error as CVarArg)
- reply(TrustedPeersHelperPeerState(peerID: egoPeerID,
- isPreapproved: false,
- status: self.model.statusOfPeer(withID: egoPeerID),
- memberChanges: changesPending,
- unknownMachineIDs: false,
- osVersion: nil),
- error)
- return
+ // We need to try to have all policy versions that our peers claim to behave
+ let allPolicyVersions = self.model.allPolicyVersions()
+ self.fetchPolicyDocumentsWithSemaphore(versions: allPolicyVersions) { _, policyFetchError in
+ if let error = policyFetchError {
+ os_log("updateTrustIfNeeded: error fetching all requested policies (continuing anyway): %{public}@", log: tplogDebug, type: .default, error as CVarArg)
}
- os_log("updateTrustIfNeeded: produced a stableInfo: %@", log: tplogDebug, type: .default, String(describing: stableInfo))
- os_log("updateTrustIfNeeded: produced a dynamicInfo: %@", log: tplogDebug, type: .default, dynamicInfo)
-
- let peer = self.model.peer(withID: egoPeerID)
- if (stableInfo == nil || stableInfo == peer?.stableInfo) &&
- dynamicInfo == peer?.dynamicInfo {
- os_log("updateTrustIfNeeded: complete.", log: tplogDebug, type: .default)
- // No change to the dynamicInfo: update the MID list now that we've reached a steady state
+ self.moc.performAndWait {
+ let dynamicInfo: TPPeerDynamicInfo
+ var stableInfo: TPPeerStableInfo?
do {
- self.onqueueUpdateMachineIDListFromModel(dynamicInfo: dynamicInfo)
- try self.moc.save()
+ // FIXME We should be able to calculate the contents of dynamicInfo without the signingKeyPair,
+ // and then only load the key if it has changed and we need to sign a new one. This would also
+ // help make our detection of change immune from non-canonical serialization of dynamicInfo.
+ dynamicInfo = try self.model.calculateDynamicInfoForPeer(withID: egoPeerID,
+ addingPeerIDs: nil,
+ removingPeerIDs: nil,
+ preapprovedKeys: nil,
+ signing: signingKeyPair,
+ currentMachineIDs: self.onqueueCurrentMIDList())
+
+ stableInfo = try self.createNewStableInfoIfNeeded(stableChanges: stableChanges,
+ permanentInfo: currentSelfInModel.permanentInfo,
+ existingStableInfo: currentSelfInModel.stableInfo,
+ dynamicInfo: dynamicInfo,
+ signingKeyPair: signingKeyPair)
} catch {
- os_log("updateTrustIfNeeded: unable to remove untrusted MachineIDs: %@", log: tplogDebug, type: .default, error as CVarArg)
+ os_log("updateTrustIfNeeded: couldn't calculate dynamic info: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
+ reply(TrustedPeersHelperPeerState(peerID: egoPeerID,
+ isPreapproved: false,
+ status: self.model.statusOfPeer(withID: egoPeerID),
+ memberChanges: peerChanges,
+ unknownMachineIDs: false,
+ osVersion: nil),
+ nil,
+ error)
+ return
}
- reply(TrustedPeersHelperPeerState(peerID: egoPeerID,
- isPreapproved: false,
- status: self.model.statusOfPeer(withID: egoPeerID),
- memberChanges: changesPending,
- unknownMachineIDs: self.onqueueFullIDMSListWouldBeHelpful(),
- osVersion: peer?.stableInfo?.osVersion),
- nil)
- return
- }
- // Check if we change that should trigger a notification that should trigger TLKShare updates
- let haveChanges = changesPending || self.haveTrustMemberChanges(newDynamicInfo: dynamicInfo, oldDynamicInfo: peer?.dynamicInfo)
+ os_log("updateTrustIfNeeded: produced a stableInfo: %{public}@", log: tplogDebug, type: .default, String(describing: stableInfo))
+ os_log("updateTrustIfNeeded: produced a dynamicInfo: %{public}@", log: tplogDebug, type: .default, dynamicInfo)
+
+ let peer = self.model.peer(withID: egoPeerID)
+ if (stableInfo == nil || stableInfo == peer?.stableInfo) &&
+ dynamicInfo == peer?.dynamicInfo {
+ os_log("updateTrustIfNeeded: complete.", log: tplogDebug, type: .default)
+ // No change to the dynamicInfo: update the MID list now that we've reached a steady state
+ do {
+ self.onqueueUpdateMachineIDListFromModel(dynamicInfo: dynamicInfo)
+ try self.moc.save()
+ } catch {
+ os_log("updateTrustIfNeeded: unable to remove untrusted MachineIDs: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
+ }
- let signedDynamicInfo = SignedPeerDynamicInfo(dynamicInfo)
- os_log("updateTrustIfNeeded: attempting updateTrust for %@ with: %@", log: tplogDebug, type: .default, egoPeerID, dynamicInfo)
- var request = UpdateTrustRequest.with {
- $0.changeToken = self.containerMO.changeToken ?? ""
- $0.peerID = egoPeerID
- $0.dynamicInfoAndSig = signedDynamicInfo
- }
- if let stableInfo = stableInfo {
- request.stableInfoAndSig = SignedPeerStableInfo(stableInfo)
- }
- self.cuttlefish.updateTrust(request) { response, error in
- os_log("UpdateTrust(%@): %@, error: %@", log: tplogDebug, "\(String(describing: request))", "\(String(describing: response))", "\(String(describing: error))")
- guard let response = response, error == nil else {
- os_log("UpdateTrust failed: %@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
- reply(nil, error ?? ContainerError.cloudkitResponseMissing)
- return
- }
+ let syncingPolicy: TPSyncingPolicy?
+ do {
+ if let peer = self.model.peer(withID: egoPeerID), let stableInfo = peer.stableInfo {
+ syncingPolicy = try self.syncingPolicyFor(modelID: peer.permanentInfo.modelID, stableInfo: stableInfo)
+ } else {
+ syncingPolicy = nil
+ }
+ } catch {
+ os_log("updateTrustIfNeeded: unable to compute a new syncing policy: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
+ syncingPolicy = nil
+ }
- do {
- try self.persist(changes: response.changes)
- } catch {
- os_log("updateTrust failed: %@", log: tplogDebug, String(describing: error))
- reply(nil, error)
+ reply(TrustedPeersHelperPeerState(peerID: egoPeerID,
+ isPreapproved: false,
+ status: self.model.statusOfPeer(withID: egoPeerID),
+ memberChanges: peerChanges,
+ unknownMachineIDs: self.onqueueFullIDMSListWouldBeHelpful(),
+ osVersion: peer?.stableInfo?.osVersion),
+ syncingPolicy,
+ nil)
return
}
+ // Check if we change that should trigger a notification that should trigger TLKShare updates
+ let havePeerChanges = peerChanges || self.haveTrustMemberChanges(newDynamicInfo: dynamicInfo, oldDynamicInfo: peer?.dynamicInfo)
- if response.changes.more {
- self.fetchChangesAndUpdateTrustIfNeeded(stableChanges: stableChanges,
- changesPending: haveChanges,
- reply: reply)
- } else {
- self.updateTrustIfNeeded(stableChanges: stableChanges,
- changesPending: haveChanges,
- reply: reply)
+ let signedDynamicInfo = SignedPeerDynamicInfo(dynamicInfo)
+ os_log("updateTrustIfNeeded: attempting updateTrust for %{public}@ with: %{public}@", log: tplogDebug, type: .default, egoPeerID, dynamicInfo)
+ var request = UpdateTrustRequest.with {
+ $0.changeToken = self.containerMO.changeToken ?? ""
+ $0.peerID = egoPeerID
+ $0.dynamicInfoAndSig = signedDynamicInfo
}
+ if let stableInfo = stableInfo {
+ request.stableInfoAndSig = SignedPeerStableInfo(stableInfo)
+ }
+
+ self.perform(updateTrust: request, stableChanges: stableChanges, peerChanges: havePeerChanges, reply: reply)
}
}
}
}
}
+ private func perform(updateTrust request: UpdateTrustRequest,
+ stableChanges: StableChanges? = nil,
+ peerChanges: Bool = false,
+ reply: @escaping (TrustedPeersHelperPeerState?, TPSyncingPolicy?, Error?) -> Void) {
+ self.cuttlefish.updateTrust(request) { response, error in
+ guard let response = response, error == nil else {
+ os_log("UpdateTrust failed: %{public}@", log: tplogDebug, type: .default, (error as CVarArg?) ?? "no error")
+ reply(nil, nil, error ?? ContainerError.cloudkitResponseMissing)
+ return
+ }
+
+ do {
+ try self.persist(changes: response.changes)
+ } catch {
+ os_log("UpdateTrust failed: %{public}@", log: tplogDebug, String(describing: error))
+ reply(nil, nil, error)
+ return
+ }
+
+ if response.changes.more {
+ self.fetchChangesAndUpdateTrustIfNeeded(stableChanges: stableChanges,
+ peerChanges: peerChanges,
+ reply: reply)
+ } else {
+ self.updateTrustIfNeeded(stableChanges: stableChanges,
+ peerChanges: peerChanges,
+ reply: reply)
+ }
+ }
+ }
+
private func persist(changes: Changes) throws {
// This is some nonsense: I can't figure out how to tell swift to throw an exception across performAndWait.
// So, do it ourself
os_log("persist: Received %d peer differences, more: %d", log: tplogDebug, type: .default,
changes.differences.count,
changes.more)
- os_log("persist: New change token: %@", log: tplogDebug, type: .default, changes.changeToken)
+ os_log("persist: New change token: %{public}@", log: tplogDebug, type: .default, changes.changeToken)
do {
try self.onQueuePersist(changes: changes)
self.containerMO.changeToken = changes.changeToken
self.containerMO.moreChanges = changes.more
- if changes.differences.count > 0 {
+ if !changes.differences.isEmpty {
self.model.clearViableBottles()
+ os_log("escrow cache and viable bottles are no longer valid", log: tplogDebug, type: .default)
+ self.onQueueRemoveEscrowCache()
}
try changes.differences.forEach { peerDifference in
if let operation = peerDifference.operation {
switch operation {
- case .add(let peer):
- try self.addOrUpdate(peer: peer)
-
- case .update(let peer):
+ case .add(let peer), .update(let peer):
try self.addOrUpdate(peer: peer)
// Update containerMO ego data if it has changed.
if peer.peerID == self.containerMO.egoPeerID {
let signingKey = changes.recoverySigningPubKey
let encryptionKey = changes.recoveryEncryptionPubKey
- if signingKey.count > 0 && encryptionKey.count > 0 {
+ if !signingKey.isEmpty && !encryptionKey.isEmpty {
self.addOrUpdate(signingKey: signingKey, encryptionKey: encryptionKey)
}
try self.moc.save()
peerRequest.predicate = NSPredicate(format: "container == %@", self.containerMO)
try self.moc.execute(NSBatchDeleteRequest(fetchRequest: peerRequest))
- let bottleRequest = NSFetchRequest<NSFetchRequestResult>(entityName: "Bottle")
- bottleRequest.predicate = NSPredicate(format: "container == %@", self.containerMO)
- try self.moc.execute(NSBatchDeleteRequest(fetchRequest: bottleRequest))
+ // If we have an ego peer ID, keep the bottle associated with it
+ if let peerID = self.containerMO.egoPeerID, let bottles = self.containerMO.bottles {
+ let nonPeerBottles = NSSet(array: bottles.filter {
+ switch $0 {
+ case let bottleMO as BottleMO:
+ return bottleMO.peerID != peerID
+ default:
+ return false
+ }
+ })
+ self.containerMO.removeFromBottles(nonPeerBottles as NSSet)
+ } else {
+ let bottleRequest = NSFetchRequest<NSFetchRequestResult>(entityName: "Bottle")
+ bottleRequest.predicate = NSPredicate(format: "container == %@", self.containerMO)
+ try self.moc.execute(NSBatchDeleteRequest(fetchRequest: bottleRequest))
+ self.containerMO.bottles = nil
+ }
self.containerMO.peers = nil
- self.containerMO.bottles = nil
self.containerMO.changeToken = nil
self.containerMO.moreChanges = false
self.model = Container.loadModel(from: self.containerMO)
try self.moc.save()
} catch {
- os_log("Local delete failed: %@", log: tplogDebug, type: .default, error as CVarArg)
+ os_log("Local delete failed: %{public}@", log: tplogDebug, type: .default, error as CVarArg)
throw error
}
// Must be on moc queue to call this.
private func addOrUpdate(signingKey: Data, encryptionKey: Data) {
self.model.setRecoveryKeys(
- TPRecoveryKeyPair(signingSPKI: signingKey, encryptionSPKI: encryptionKey))
+ TPRecoveryKeyPair(signingKeyData: signingKey, encryptionKeyData: encryptionKey))
+
+ self.containerMO.recoveryKeySigningSPKI = signingKey
+ self.containerMO.recoveryKeyEncryptionSPKI = encryptionKey
}
// Must be on moc queue to call this.
} else {
// Update:
// The assertion here is that every peer registered in model is also present in containerMO
- let peerMO = try self.fetchPeerMO(peerID: peer.peerID)!
+ guard let peerMO = try self.fetchPeerMO(peerID: peer.peerID) else {
+ throw ContainerError.peerRegisteredButNotStored(peer.peerID)
+ }
if let stableInfo = peer.stableInfoAndSig.toStableInfo() {
try self.model.update(stableInfo, forPeerWithID: peer.peerID)
peer.vouchers.forEach {
if let voucher = TPVoucher(infoWith: $0.voucher, sig: $0.sig) {
self.model.register(voucher)
- let voucherMO = VoucherMO(context: self.moc)
- voucherMO.voucherInfo = voucher.data
- voucherMO.voucherInfoSig = voucher.sig
- peerMO.addToVouchers(voucherMO)
+ if peer.vouchers.filter({ $0.voucher == voucher.data && $0.sig == voucher.sig }).isEmpty {
+ let voucherMO = VoucherMO(context: self.moc)
+ voucherMO.voucherInfo = voucher.data
+ voucherMO.voucherInfoSig = voucher.sig
+ peerMO.addToVouchers(voucherMO)
+ }
}
}
}
guard let policyDoc = self.model.policy(withVersion: policyVersion) else {
throw ContainerError.unknownPolicyVersion(policyVersion)
}
- assert(policyVersion == policyDoc.policyVersion)
- if policyVersion == prevailingPolicyVersion {
- assert(policyDoc.policyHash == prevailingPolicyHash)
+ assert(policyVersion == policyDoc.version.versionNumber)
+ if policyVersion == prevailingPolicyVersion.versionNumber {
+ assert(policyDoc.version.policyHash == prevailingPolicyVersion.policyHash)
}
return policyDoc
}
// Must be on moc queue to call this.
private func createNewStableInfoIfNeeded(stableChanges: StableChanges?,
- egoPeerID: String,
+ permanentInfo: TPPeerPermanentInfo,
+ existingStableInfo: TPPeerStableInfo?,
dynamicInfo: TPPeerDynamicInfo,
signingKeyPair: _SFECKeyPair) throws -> TPPeerStableInfo? {
func noChange<T: Equatable>(_ change: T?, _ existing: T?) -> Bool {
return (nil == change) || change == existing
}
- let existingStableInfo = self.model.peer(withID: egoPeerID)?.stableInfo
+
+ let policyOfPeers = try? self.model.policy(forPeerIDs: dynamicInfo.includedPeerIDs,
+ candidatePeerID: permanentInfo.peerID,
+ candidateStableInfo: existingStableInfo)
+
+ // Pick the best version of:
+ // 1. The policy version asked for by the client
+ // 2. The policy override set on this object (tests only)
+ // 3. The max of our existing policyVersion, the highest policy used by our trusted peers, and the compile-time prevailing policy version
+ let optimalPolicyVersionNumber = stableChanges?.policyVersion ??
+ self.policyVersionOverride?.versionNumber ??
+ max(existingStableInfo?.bestPolicyVersion().versionNumber ?? prevailingPolicyVersion.versionNumber,
+ policyOfPeers?.version.versionNumber ?? prevailingPolicyVersion.versionNumber,
+ prevailingPolicyVersion.versionNumber)
+
+ // Determine which recovery key we'd like to be using, given our current idea of who to trust
+ let optimalRecoveryKey = self.model.bestRecoveryKey(for: existingStableInfo, dynamicInfo: dynamicInfo)
+
+ let intendedSyncUserControllableViews = stableChanges?.setSyncUserControllableViews?.sanitizeForPlatform(permanentInfo: permanentInfo)
+
if noChange(stableChanges?.deviceName, existingStableInfo?.deviceName) &&
noChange(stableChanges?.serialNumber, existingStableInfo?.serialNumber) &&
noChange(stableChanges?.osVersion, existingStableInfo?.osVersion) &&
- noChange(stableChanges?.policyVersion, existingStableInfo?.policyVersion) &&
+ noChange(optimalPolicyVersionNumber, existingStableInfo?.bestPolicyVersion().versionNumber) &&
noChange(stableChanges?.policySecrets, existingStableInfo?.policySecrets) &&
- noChange(stableChanges?.recoverySigningPubKey, existingStableInfo?.recoverySigningPublicKey) &&
- noChange(stableChanges?.recoveryEncryptionPubKey, existingStableInfo?.recoveryEncryptionPublicKey) &&
- self.model.doesPeerRecoveryKeyMatchPeers(egoPeerID) {
+ noChange(optimalRecoveryKey?.signingKeyData, existingStableInfo?.recoverySigningPublicKey) &&
+ noChange(optimalRecoveryKey?.encryptionKeyData, existingStableInfo?.recoveryEncryptionPublicKey) &&
+ noChange(intendedSyncUserControllableViews, existingStableInfo?.syncUserControllableViews) {
return nil
}
- let policyHash: String?
- if let policyVersion = stableChanges?.policyVersion {
- let policyDoc = try self.getPolicyDoc(policyVersion)
- policyHash = policyDoc.policyHash
- } else {
- policyHash = nil
- }
- // Determine which recovery key we'd like to be using, given our current idea of who to trust
- let newRecoveryKeys = self.model.bestRecoveryKey(with: dynamicInfo)
+ // If a test has asked a policy version before we froze this policy, then don't set a flexible version--it's trying to build a peer from before the policy was frozen
+ let optimalPolicyVersion = try self.getPolicyDoc(optimalPolicyVersionNumber).version
+ let useFrozenPolicyVersion = optimalPolicyVersion.versionNumber >= frozenPolicyVersion.versionNumber
+
+ if let intendedSyncUserControllableViews = intendedSyncUserControllableViews {
+ os_log("Intending to set user-controllable views to %{public}@", log: tplogTrace, type: .info, TPPBPeerStableInfo_UserControllableViewStatusAsString(intendedSyncUserControllableViews))
+ }
- return try self.model.createStableInfo(withPolicyVersion: stableChanges?.policyVersion ?? existingStableInfo?.policyVersion ?? prevailingPolicyVersion,
- policyHash: policyHash ?? existingStableInfo?.policyHash ?? prevailingPolicyHash,
+ return try self.model.createStableInfo(withFrozenPolicyVersion: useFrozenPolicyVersion ? frozenPolicyVersion : optimalPolicyVersion,
+ flexiblePolicyVersion: useFrozenPolicyVersion ? optimalPolicyVersion : nil,
policySecrets: stableChanges?.policySecrets ?? existingStableInfo?.policySecrets,
+ syncUserControllableViews: intendedSyncUserControllableViews ?? existingStableInfo?.syncUserControllableViews ?? .UNKNOWN,
deviceName: stableChanges?.deviceName ?? existingStableInfo?.deviceName ?? "",
serialNumber: stableChanges?.serialNumber ?? existingStableInfo?.serialNumber ?? "",
osVersion: stableChanges?.osVersion ?? existingStableInfo?.osVersion ?? "",
signing: signingKeyPair,
- recoverySigningPubKey: newRecoveryKeys?.signingSPKI ?? existingStableInfo?.recoverySigningPublicKey,
- recoveryEncryptionPubKey: newRecoveryKeys?.encryptionSPKI ?? existingStableInfo?.recoveryEncryptionPublicKey)
+ recoverySigningPubKey: optimalRecoveryKey?.signingKeyData,
+ recoveryEncryptionPubKey: optimalRecoveryKey?.encryptionKeyData)
}
private func assembleBottle(egoPeerID: String) throws -> Bottle {
if let count = bottleMOs?.count {
if count > 1 {
throw ContainerError.tooManyBottlesForPeer
+ // swiftlint:disable empty_count
} else if count == 0 {
+ // swiftlint:enable empty_count
throw ContainerError.noBottleForPeer
}
} else {
func reportHealth(request: ReportHealthRequest, reply: @escaping (Error?) -> Void) {
self.semaphore.wait()
let reply: (Error?) -> Void = {
- os_log("reportHealth complete %@", log: tplogTrace, type: .info, traceError($0))
+ os_log("reportHealth complete %{public}@", log: tplogTrace, type: .info, traceError($0))
self.semaphore.signal()
reply($0)
}
}
self.moc.performAndWait {
- self.cuttlefish.reportHealth(updatedRequest) { response, error in
- os_log("reportHealth(%@): %@, error: %@", log: tplogDebug, "\(String(describing: request))", "\(String(describing: response))", "\(String(describing: error))")
+ self.cuttlefish.reportHealth(updatedRequest) { _, error in
guard error == nil else {
reply(error)
return
func pushHealthInquiry(reply: @escaping (Error?) -> Void) {
self.semaphore.wait()
let reply: (Error?) -> Void = {
- os_log("reportHealth complete %@", log: tplogTrace, type: .info, traceError($0))
+ os_log("reportHealth complete %{public}@", log: tplogTrace, type: .info, traceError($0))
self.semaphore.signal()
reply($0)
}
self.moc.performAndWait {
- self.cuttlefish.pushHealthInquiry(PushHealthInquiryRequest()) { response, error in
- os_log("pushHealthInquiry(): %@, error: %@", log: tplogDebug, "\(String(describing: response))", "\(String(describing: error))")
+ self.cuttlefish.pushHealthInquiry(PushHealthInquiryRequest()) { _, error in
guard error == nil else {
reply(error)
return