From c069fc5168a8318bd73e8422a37777d89223493d Mon Sep 17 00:00:00 2001 From: Michael Reneer Date: Fri, 17 May 2024 12:44:05 -0700 Subject: [PATCH] Update the GitHub repository URL for TFF. PiperOrigin-RevId: 634858992 --- CITATION.cff | 2 +- CONTRIBUTING.md | 4 +- README.md | 15 +++-- RELEASE.md | 2 + WORKSPACE | 2 +- docs/_index.yaml | 4 +- docs/collaborations/notes/2022-02-16.md | 2 +- docs/collaborations/notes/2022-09.29.md | 2 +- docs/deployment.md | 4 +- docs/design/backend.md | 20 +++--- docs/design/compilation.md | 22 +++---- docs/design/context.md | 34 +++++----- docs/design/execution.md | 10 +-- docs/design/package_structure_analytics.dot | 10 +-- docs/design/package_structure_core.dot | 52 +++++++-------- docs/design/package_structure_learning.dot | 16 ++--- docs/design/package_structure_overview.dot | 10 +-- docs/design/package_structure_simulation.dot | 10 +-- docs/design/tracing.md | 18 ++--- docs/faq.md | 4 +- docs/federated_core.md | 2 +- docs/federated_learning.md | 2 +- docs/install.md | 4 +- docs/learning/federated_program_guide.md | 4 +- docs/program/federated_program.md | 6 +- docs/program/guide.md | 20 +++--- docs/tff_for_research.md | 36 +++++----- examples/personalization/README.md | 10 +-- examples/program/program.py | 2 +- examples/simple_fedavg/README.md | 66 +++++++++---------- examples/stateful_clients/README.md | 2 +- requirements.txt | 6 +- tensorflow_federated/data/README.md | 2 +- .../tools/python_package/setup.py | 12 ++-- 34 files changed, 212 insertions(+), 205 deletions(-) diff --git a/CITATION.cff b/CITATION.cff index 7b1c569073..756cc93f89 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -6,4 +6,4 @@ authors: title: "TensorFlow Federated" version: 0.78.0 date-released: 2018-12-12 -url: "https://github.com/tensorflow/federated" +url: "https://github.com/google-parfait/tensorflow-federated" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8160518a9d..bf38ab9dae 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -37,7 +37,7 @@ information on using pull requests. ## Collaborations If you are interested in collaborating to grow the TFF ecosystem, please see the -[collaborations page](https://github.com/tensorflow/federated/blob/main/docs/collaborations/README.md) +[collaborations page](https://github.com/google-parfait/tensorflow-federated/blob/main/docs/collaborations/README.md) and/or join our [Discord server](https://discord.com/invite/5shux83qZ5) to engage in conversations with other developers building on or contributing to the TFF ecosystem. @@ -80,7 +80,7 @@ git diff --name-only \ Include a license at the top of new files. -* [Python license example](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/__init__.py#L1) +* [Python license example](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/__init__.py#L1) ### TensorFlow Guidelines. diff --git a/README.md b/README.md index 71f2a27c05..3a7fe2bae5 100644 --- a/README.md +++ b/README.md @@ -17,12 +17,12 @@ computations, such as aggregated analytics over decentralized data. TFF's interfaces are organized in two layers: -* [Federated Learning (FL) API](https://github.com/tensorflow/federated/blob/main/docs/federated_learning.md) +* [Federated Learning (FL) API](https://github.com/google-parfait/tensorflow-federated/blob/main/docs/federated_learning.md) The `tff.learning` layer offers a set of high-level interfaces that allow developers to apply the included implementations of federated training and evaluation to their existing TensorFlow models. -* [Federated Core (FC) API](https://github.com/tensorflow/federated/blob/main/docs/federated_core.md) +* [Federated Core (FC) API](https://github.com/google-parfait/tensorflow-federated/blob/main/docs/federated_core.md) At the core of the system is a set of lower-level interfaces for concisely expressing novel federated algorithms by combining TensorFlow with distributed communication operators within a strongly-typed functional @@ -37,14 +37,14 @@ and try it out yourself! ## Installation See the -[install](https://github.com/tensorflow/federated/blob/main/docs/install.md) +[install](https://github.com/google-parfait/tensorflow-federated/blob/main/docs/install.md) documentation for instructions on how to install TensorFlow Federated as a package or build TensorFlow Federated from source. ## Getting Started See the -[get started](https://github.com/tensorflow/federated/blob/main/docs/get_started.md) +[get started](https://github.com/google-parfait/tensorflow-federated/blob/main/docs/get_started.md) documentation for instructions on how to use TensorFlow Federated. ## Contributing @@ -80,13 +80,14 @@ There are a number of ways to contribute depending on what you're interested in: infrastructure. Please be sure to review the -[contribution](https://github.com/tensorflow/federated/blob/main/CONTRIBUTING.md#guidelines) +[contribution](https://github.com/google-parfait/tensorflow-federated/blob/main/CONTRIBUTING.md#guidelines) guidelines on how to contribute. ## Issues -Use [GitHub issues](https://github.com/tensorflow/federated/issues) for tracking -requests and bugs. +Use +[GitHub issues](https://github.com/google-parfait/tensorflow-federated/issues) +for tracking requests and bugs. ## Questions diff --git a/RELEASE.md b/RELEASE.md index e71076bed4..b4f18964bd 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -3,6 +3,8 @@ ## Breaking Changes * Updated `com_github_grpc_grpc` to version `1.50.0`. +* Moved the TFF repository from https://github.com/tensorflow/federated to + https://github.com/google-parfait/tensorflow-federated. # Release 0.78.0 diff --git a/WORKSPACE b/WORKSPACE index 298409b74e..f0d9849d46 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -88,7 +88,7 @@ http_archive( # ) # The version of TensorFlow should match the version in -# https://github.com/tensorflow/federated/blob/main/requirements.txt. +# https://github.com/google-parfait/tensorflow-federated/blob/main/requirements.txt. http_archive( name = "org_tensorflow", url = "https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.14.0.tar.gz", diff --git a/docs/_index.yaml b/docs/_index.yaml index f7e91b7cf5..1b50feecde 100644 --- a/docs/_index.yaml +++ b/docs/_index.yaml @@ -127,10 +127,10 @@ landing_page: items: - heading: "TF Federated on GitHub" image_path: /resources/images/github-card-16x9.png - path: https://github.com/tensorflow/federated + path: https://github.com/google-parfait/tensorflow-federated buttons: - label: "View on GitHub" - path: https://github.com/tensorflow/federated + path: https://github.com/google-parfait/tensorflow-federated - heading: "Federated Learning: Collaborative Machine Learning without Centralized Training Data" image_path: /resources/images/google-research-card-16x9.png path: https://ai.googleblog.com/2017/04/federated-learning-collaborative.html diff --git a/docs/collaborations/notes/2022-02-16.md b/docs/collaborations/notes/2022-02-16.md index 2d594c96cc..9b43f27c6f 100644 --- a/docs/collaborations/notes/2022-02-16.md +++ b/docs/collaborations/notes/2022-02-16.md @@ -172,7 +172,7 @@ * [ostrowski] Communicating openly * What to make publicly available (on - [the GitHub landing page](https://github.com/tensorflow/federated/blob/main/docs/collaborations/README.md)) + [the GitHub landing page](https://github.com/google-parfait/tensorflow-federated/blob/main/docs/collaborations/README.md)) * Summary of discussions and decisions from this and follow-up meetings to be made available within a few days after each meeting on th GitHub page * Links to artifacts (any plans, roadmaps, design docs, etc. to be diff --git a/docs/collaborations/notes/2022-09.29.md b/docs/collaborations/notes/2022-09.29.md index 357767e756..805fc1cc50 100644 --- a/docs/collaborations/notes/2022-09.29.md +++ b/docs/collaborations/notes/2022-09.29.md @@ -17,7 +17,7 @@ * Suspects: TFF setup cost, communication * Overlapping data ingestion and computation likely to help * Code to support this upcoming - * https://github.com/tensorflow/federated/blob/main/tensorflow\_federated/python/program/prefetching\_data\_source.py + * https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow\_federated/python/program/prefetching\_data\_source.py * Discussion continued on the Discord server * Versioning - also relevant to OpenMined (need to resolve 3-way) * To discuss at a future meeting diff --git a/docs/deployment.md b/docs/deployment.md index 179d5dbd76..bb3479ea48 100644 --- a/docs/deployment.md +++ b/docs/deployment.md @@ -14,14 +14,14 @@ There are two principal modes of deployment for TFF computations: * **Native backends**. We're going to refer to a backend as *native* if it is capable of interpreting the syntactic structure of TFF computations as defined in - [`computation.proto`](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/proto/v0/computation.proto). + [`computation.proto`](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/proto/v0/computation.proto). A native backend does not necessarily have to support all language constructs or intrinsics. Native backends must implement one of the standard TFF *executor* interfaces, such as [`tff.framework.Executor`](https://www.tensorflow.org/federated/api_docs/python/tff/framework/Executor) for consumption by Python code, or the language-independent version of it defined in - [`executor.proto`](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/proto/v0/executor.proto) + [`executor.proto`](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/proto/v0/executor.proto) exposed as a gRPC endpoint. Native backends that support the above interfaces can be used interactively diff --git a/docs/design/backend.md b/docs/design/backend.md index 55fc6d1453..960475fed0 100644 --- a/docs/design/backend.md +++ b/docs/design/backend.md @@ -9,7 +9,7 @@ an [AST](compilation.md#ast), meaning a backend constructs environments that evaluate an AST. The -[backends](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/backends) +[backends](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/backends) package contains backends which may extend the TFF compiler and/or the TFF runtime; these extensions can be found in the corresponding backend. @@ -27,7 +27,7 @@ low-level abstraction. ``` The **blue** nodes are provided by TFF -[core](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core). +[core](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core). The **green**, **red**, **yellow**, and **purple** nodes are provided by the [native](#native), [mapreduce](#mapreduce), and [reference](#reference) backends @@ -41,7 +41,7 @@ inheritance. ## Native The -[native](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/backends/native) +[native](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/backends/native) backend composes of the TFF compiler and TFF runtime in order to compile and execute an AST in a way that is reasonably efficient and debuggable. @@ -54,7 +54,7 @@ intrinsics. ### Compiler The -[compiler.transform_to_native_form](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/backends/native/compiler.py) +[compiler.transform_to_native_form](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/backends/native/compiler.py) function compiles an AST into a [native form](#native-form). ### Runtime @@ -81,37 +81,37 @@ set_default_context.set_default_context(context) However, there are some common configurations: The -[execution_context.set_sync_local_cpp_execution_context](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/backends/native/execution_context.py) +[execution_context.set_sync_local_cpp_execution_context](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/backends/native/execution_context.py) function constructs an `ExecutionContext` with a native compiler and a [local execution stack](execution.md#local-execution-stack). ## MapReduce The -[mapreduce](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/backends/mapreduce) +[mapreduce](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/backends/mapreduce) backend contains the data structures and compiler required to construct a form that can be executed on MapReduce-like runtimes. ### `MapReduceForm` A -[forms.MapReduceForm](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/backends/mapreduce/forms.py) +[forms.MapReduceForm](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/backends/mapreduce/forms.py) is a data structure defining the representation of logic that can be executed on MapReduce-like runtimes. This logic is organized as a collection of TensorFlow functions, see the -[forms](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/backends/mapreduce/forms.py) +[forms](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/backends/mapreduce/forms.py) module for more information about the nature of these functions. ### Compiler The -[compiler](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/backends/mapreduce/compiler.py) +[compiler](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/backends/mapreduce/compiler.py) module contains [Building Block](compilation.md#building-block) and [TensorFlow Computation](compilation.md#tensorflow-computation) transformations required to compile an AST to a [MapReduceForm](#canonicalform). The -[form_utils](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/backends/mapreduce/form_utils.py) +[form_utils](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/backends/mapreduce/form_utils.py) module contains the compiler for the MapReduce backend and constructs a [MapReduceForm](#canonicalform). diff --git a/docs/design/compilation.md b/docs/design/compilation.md index 4f10795f8f..703d78768f 100644 --- a/docs/design/compilation.md +++ b/docs/design/compilation.md @@ -3,7 +3,7 @@ [TOC] The -[compiler](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/compiler) +[compiler](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/compiler) package contains data structures defining the Python representation of the [AST](#ast), core [transformation](#transformation) functions, and [compiler](#compiler) related functionality. @@ -16,15 +16,15 @@ computation. ### Building Block A -[building_block.ComputationBuildingBlock](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/compiler/building_blocks.py) +[building_block.ComputationBuildingBlock](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/compiler/building_blocks.py) is the Python representation of the [AST](#ast). #### `CompiledComputation` A -[building_block.CompiledComputation](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/compiler/building_blocks.py) +[building_block.CompiledComputation](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/compiler/building_blocks.py) is a -[building_block.ComputationBuildingBlock](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/compiler/building_blocks.py) +[building_block.ComputationBuildingBlock](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/compiler/building_blocks.py) that represents a computation that will be delegated to an [external runtime](execution.md#external-runtime). Currently TFF only supports [TensorFlow computations](#tensorFlow-computation), but could be expanded to @@ -33,13 +33,13 @@ support [Computations](#computation) backed by other external runtimes. ### `Computation` A -[pb.Computation](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/proto/v0/computation.proto) +[pb.Computation](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/proto/v0/computation.proto) is the Proto or serialized representation of the [AST](#ast). #### TensorFlow Computation A -[pb.Computation](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/proto/v0/computation.proto) +[pb.Computation](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/proto/v0/computation.proto) that represents a [Computations](#computation) that will be delegated to the [TensorFlow](execution.md#tensorflow) runtime. @@ -66,25 +66,25 @@ about; as a result, composite transformations are hand-crafted and most are somewhat fragile. The -[tree_transformations](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/compiler/tree_transformations.py) +[tree_transformations](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/compiler/tree_transformations.py) module contains atomic [building block](#building-block) transformations. The -[transformations](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/compiler/transformations.py) +[transformations](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/compiler/transformations.py) module contains composite [building block](#building-block) transformations. The -[tensorflow_computation_transformations](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/environments/tensorflow_backend/tensorflow_computation_transformations.py) +[tensorflow_computation_transformations](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/environments/tensorflow_backend/tensorflow_computation_transformations.py) module contains atomic [TensorFlow computation](#tensorflow-computation) transformations. The -[compiled_computation_transformations](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/environments/tensorflow_backend/compiled_computation_transformations.py) +[compiled_computation_transformations](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/environments/tensorflow_backend/compiled_computation_transformations.py) module contains atomic and composite [Compiled Computation](#compiled-computation) transformations. The -[transformation_utils](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/compiler/transformation_utils.py) +[transformation_utils](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/compiler/transformation_utils.py) module contains functions, traversal logic, and data structures used by other transformation modules. diff --git a/docs/design/context.md b/docs/design/context.md index 08f59602b6..8cf48377b3 100644 --- a/docs/design/context.md +++ b/docs/design/context.md @@ -5,9 +5,9 @@ ## `Context` A -[context_base.SyncContext](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/context_stack/context_base.py) +[context_base.SyncContext](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/context_stack/context_base.py) or -[context_base.AsyncContext](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/context_stack/context_base.py) +[context_base.AsyncContext](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/context_stack/context_base.py) is an environment that can [construct](tracing.md), [compile](compilation.md), or [execute](execution.md) an [AST](compilation.md#ast). @@ -18,11 +18,11 @@ This API defines a **low-level abstraction** that should be used when an ### `ExecutionContext` An -[execution_context.ExecutionContext](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/execution_contexts/execution_context.py) +[execution_context.ExecutionContext](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/execution_contexts/execution_context.py) is -[context_base.SyncContext](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/context_stack/context_base.py) +[context_base.SyncContext](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/context_stack/context_base.py) or -[context_base.AsyncContext](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/context_stack/context_base.py) +[context_base.AsyncContext](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/context_stack/context_base.py) that compiles computations using a compilation function and executes computations using an [Executor](execution.md#executor). @@ -33,25 +33,25 @@ This API defines a **high-level abstraction** that should be used when an ### `FederatedComputationContext` A -[federated_computation_context.FederatedComputationContext](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/federated_context/federated_computation_context.py) +[federated_computation_context.FederatedComputationContext](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/federated_context/federated_computation_context.py) is a context that constructs federated computations. This context is used trace Python functions decorated with the -[federated_computation.federated_computation](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/federated_context/federated_computation.py) +[federated_computation.federated_computation](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/federated_context/federated_computation.py) decorator. ### `TensorFlowComputationContext` A -[tensorflow_computation_context.TensorFlowComputationContext](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/environments/tensorflow_backends/tensorflow_computation_context.py) +[tensorflow_computation_context.TensorFlowComputationContext](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/environments/tensorflow_backends/tensorflow_computation_context.py) is a context that constructs TensorFlow computations. This context is used to serialize Python functions decorated with the -[tensorflow_computation.tf_computation](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/environments/tensorflow_backends/tensorflow_computation.py) +[tensorflow_computation.tf_computation](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/environments/tensorflow_backends/tensorflow_computation.py) decorator. ## `ContextStack` A -[context_stack_base.ContextStack](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/context_stack/context_stack_base.py) +[context_stack_base.ContextStack](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/context_stack/context_stack_base.py) is a data structure for interacting with a stack of [Contexts](#context). You can set the context TFF will use to [construct](tracing.md), @@ -59,25 +59,25 @@ You can set the context TFF will use to [construct](tracing.md), [AST](compilation.md#ast) by: * Invoking - [set_default_context.set_default_context](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/context_stack/set_default_context.py) + [set_default_context.set_default_context](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/context_stack/set_default_context.py) to set the default context. This API is often used to install a context that will compile or execute a computation. * Invoking - [get_context_stack.get_context_stack](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/context_stack/get_context_stack.py) + [get_context_stack.get_context_stack](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/context_stack/get_context_stack.py) to get the current context stack and then invoking - [context_stack_base.ContextStack.install](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/context_stack/context_stack_base.py) + [context_stack_base.ContextStack.install](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/context_stack/context_stack_base.py) to temporarily install a context onto the top of the stack. For example, the - [federated_computation.federated_computation](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/federated_context/federated_computation.py) + [federated_computation.federated_computation](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/federated_context/federated_computation.py) and - [tensorflow_computation.tf_computation](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/environments/tensorflow_backends/tensorflow_computation.py) + [tensorflow_computation.tf_computation](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/environments/tensorflow_backends/tensorflow_computation.py) decorators push the corresponding contexts onto the current context stack while the decorated function is being traced. ### `ContextStackImpl` A -[context_stack_impl.ContextStackImpl](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/context_stack/context_stack_impl.py) +[context_stack_impl.ContextStackImpl](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/context_stack/context_stack_impl.py) is a -[context_stack_base.ContextStack](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/context_stack/context_stack_base.py) +[context_stack_base.ContextStack](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/context_stack/context_stack_base.py) that is implemented as a common thread-local stack. diff --git a/docs/design/execution.md b/docs/design/execution.md index c3162a9923..7cf02b72c1 100644 --- a/docs/design/execution.md +++ b/docs/design/execution.md @@ -3,7 +3,7 @@ [TOC] The -[executors](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/executors) +[executors](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/executors) package contains core [Executors](#executor) classes and [runtime](#runtime) related functionality. @@ -31,16 +31,16 @@ referred to as an [execution stack](#execution-stack). ## `Executor` An -[executor_base.Executor](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/executors/executor_base.py) +[executor_base.Executor](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/executors/executor_base.py) is an abstract interface that defines the API for executing an [AST](compilation.md#ast). The -[executors](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/executors) +[executors](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/executors) package contains a collection of concrete implementations of this interface. ## `ExecutorFactory` An -[executor_factory.ExecutorFactory](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/executors/executor_factory.py) +[executor_factory.ExecutorFactory](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/executors/executor_factory.py) is an abstract interface that defines the API for constructing an [Executor](#executor). These factories construct the executor lazily and manage the lifecycle of the executor; the motivation to lazily constructing executors @@ -49,5 +49,5 @@ is to infer the number of clients at execution time. ## Execution Stack An execution stack is a hierarchy of [Executors](#executor). The -[executor_stacks](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/executor_stacks) +[executor_stacks](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/executor_stacks) package contains logic for constructing and composing specific execution stacks. diff --git a/docs/design/package_structure_analytics.dot b/docs/design/package_structure_analytics.dot index 779440ce87..a20bf77eeb 100644 --- a/docs/design/package_structure_analytics.dot +++ b/docs/design/package_structure_analytics.dot @@ -4,11 +4,11 @@ digraph { edge [color="#616161"] subgraph cluster_analytics { - graph [label="Analytics", href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/analytics" target="_parent"] - iblt [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/analytics/heavy_hitters/iblt" target="_parent"] - heavy_hitters [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/analytics/heavy_hitters" target="_parent"] - hierarchical_histogram [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/analytics/hierarchical_histogram" target="_parent"] - analytics [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/analytics" target="_parent"] + graph [label="Analytics", href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/analytics" target="_parent"] + iblt [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/analytics/heavy_hitters/iblt" target="_parent"] + heavy_hitters [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/analytics/heavy_hitters" target="_parent"] + hierarchical_histogram [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/analytics/hierarchical_histogram" target="_parent"] + analytics [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/analytics" target="_parent"] } iblt -> heavy_hitters diff --git a/docs/design/package_structure_core.dot b/docs/design/package_structure_core.dot index efbe5c0990..eeca361029 100644 --- a/docs/design/package_structure_core.dot +++ b/docs/design/package_structure_core.dot @@ -4,11 +4,11 @@ digraph { edge [color="#616161"] subgraph cluster_backends { - graph [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/backends" target="_parent"] - mapreduce [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/backends/mapreduce" target="_parent"] - native [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/backends/native" target="_parent"] - backends_test [label="test", href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/backends/test" target="_parent"] - xla [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/backends/xla" target="_parent"] + graph [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/backends" target="_parent"] + mapreduce [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/backends/mapreduce" target="_parent"] + native [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/backends/native" target="_parent"] + backends_test [label="test", href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/backends/test" target="_parent"] + xla [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/backends/xla" target="_parent"] } // TODO: b/233800075 - Remove dependency from `native` to `mapreduce`. @@ -21,11 +21,11 @@ digraph { backends_test -> tensorflow_frontend [arrowhead="none", style="invisible"] subgraph cluster_environments { - graph [label="Environments", href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/environments" target="_parent"] - jax_frontend [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/environments/jax_frontend" target="_parent"] - xla_backend [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/environments/xla_backend" target="_parent"] - tensorflow_frontend [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/environments/tensorflow_frontend" target="_parent"] - tensorflow_backend [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/environments/tensorflow_backend" target="_parent"] + graph [label="Environments", href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/environments" target="_parent"] + jax_frontend [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/environments/jax_frontend" target="_parent"] + xla_backend [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/environments/xla_backend" target="_parent"] + tensorflow_frontend [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/environments/tensorflow_frontend" target="_parent"] + tensorflow_backend [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/environments/tensorflow_backend" target="_parent"] } jax_frontend -> xla_backend @@ -38,9 +38,9 @@ digraph { tensorflow_backend -> templates [arrowhead="none", style="invisible"] subgraph cluster_core_1 { - framework [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/framework" target="_parent"] - test [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/test" target="_parent"] - templates [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/templates" target="_parent"] + framework [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/framework" target="_parent"] + test [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/test" target="_parent"] + templates [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/templates" target="_parent"] } framework -> execution_contexts [arrowhead="none", style="invisible"] @@ -48,16 +48,16 @@ digraph { templates -> execution_contexts [arrowhead="none", style="invisible"] subgraph cluster_core_2 { - graph [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl" target="_parent"] - execution_contexts [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/execution_contexts" target="_parent"] - executor_stacks [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/executor_stacks" target="_parent"] - executors [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/executors" target="_parent"] - federated_context [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/federated_context" target="_parent"] - computation [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/computation" target="_parent"] - compiler [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/compiler" target="_parent"] - utils [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/utils" target="_parent"] - types [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/types" target="_parent"] - context_stack [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/context_stack" target="_parent"] + graph [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl" target="_parent"] + execution_contexts [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/execution_contexts" target="_parent"] + executor_stacks [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/executor_stacks" target="_parent"] + executors [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/executors" target="_parent"] + federated_context [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/federated_context" target="_parent"] + computation [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/computation" target="_parent"] + compiler [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/compiler" target="_parent"] + utils [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/utils" target="_parent"] + types [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/types" target="_parent"] + context_stack [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/context_stack" target="_parent"] } execution_contexts -> executor_stacks @@ -90,9 +90,9 @@ digraph { types -> proto [arrowhead="none", style="invisible"] subgraph cluster_core_3 { - tensorflow_libs [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/tensorflow_libs" target="_parent"] - common_libs [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/common_libs" target="_parent"] - proto [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/proto" target="_parent"] + tensorflow_libs [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/tensorflow_libs" target="_parent"] + common_libs [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/common_libs" target="_parent"] + proto [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/proto" target="_parent"] } tensorflow_libs -> common_libs diff --git a/docs/design/package_structure_learning.dot b/docs/design/package_structure_learning.dot index 42481c5a1f..b0035b9774 100644 --- a/docs/design/package_structure_learning.dot +++ b/docs/design/package_structure_learning.dot @@ -4,14 +4,14 @@ digraph { edge [color="#616161"] subgraph cluster_learning { - graph [label="Learning", href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/learning" target="_parent"] - algorithms [label="algorithms", href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/learning/algorithms" target="_parent"] - programs [label="programs", href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/learning/programs" target="_parent"] - templates [label="templates", href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/learning/templates" target="_parent"] - optimizers [label="optimizers", href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/learning/optimizers" target="_parent"] - models [label="models", href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/learning/models" target="_parent"] - metrics [label="metrics", href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/learning/metrics" target="_parent"] - learning [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/learning" target="_parent"] + graph [label="Learning", href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/learning" target="_parent"] + algorithms [label="algorithms", href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/learning/algorithms" target="_parent"] + programs [label="programs", href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/learning/programs" target="_parent"] + templates [label="templates", href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/learning/templates" target="_parent"] + optimizers [label="optimizers", href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/learning/optimizers" target="_parent"] + models [label="models", href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/learning/models" target="_parent"] + metrics [label="metrics", href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/learning/metrics" target="_parent"] + learning [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/learning" target="_parent"] } programs -> templates diff --git a/docs/design/package_structure_overview.dot b/docs/design/package_structure_overview.dot index 9e6144708c..13676a7c59 100644 --- a/docs/design/package_structure_overview.dot +++ b/docs/design/package_structure_overview.dot @@ -5,8 +5,8 @@ digraph { subgraph cluster_users { research [href="https://github.com/google-research/federated/blob/master" target="_parent", fillcolor="#E2F3EB", color="#57BB8A"] - examples [href="https://github.com/tensorflow/federated/blob/main/examples" target="_parent", fillcolor="#E2F3EB", color="#57BB8A"] - tests [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/tests" target="_parent", fillcolor="#E2F3EB", color="#57BB8A"] + examples [href="https://github.com/google-parfait/tensorflow-federated/blob/main/examples" target="_parent", fillcolor="#E2F3EB", color="#57BB8A"] + tests [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/tests" target="_parent", fillcolor="#E2F3EB", color="#57BB8A"] } research -> simulation [arrowhead="none", style="invisible"] @@ -14,12 +14,12 @@ digraph { tests -> simulation [arrowhead="none", style="invisible"] subgraph cluster_tff { - graph [label="TFF", href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated" target="_parent"] + graph [label="TFF", href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated" target="_parent"] simulation [href="#simulation" target="_parent"] learning [href="#learning" target="_parent"] analytics [href="#analytics" target="_parent"] - aggregators [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/aggregators" target="_parent"] - program [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/program" target="_parent"] + aggregators [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/aggregators" target="_parent"] + program [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/program" target="_parent"] core [href="#core" target="_parent"] } diff --git a/docs/design/package_structure_simulation.dot b/docs/design/package_structure_simulation.dot index 43da8dda96..8786b6e687 100644 --- a/docs/design/package_structure_simulation.dot +++ b/docs/design/package_structure_simulation.dot @@ -4,11 +4,11 @@ digraph { edge [color="#616161"] subgraph cluster_simulation { - graph [label="Simulation", href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/simulation" target="_parent"] - baselines [label="baselines", href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/simulation/baselines" target="_parent"] - datasets [label="datasets", href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/simulation/datasets" target="_parent"] - models [label="models", href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/simulation/models" target="_parent"] - simulation [href="https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/simulation" target="_parent"] + graph [label="Simulation", href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/simulation" target="_parent"] + baselines [label="baselines", href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/simulation/baselines" target="_parent"] + datasets [label="datasets", href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/simulation/datasets" target="_parent"] + models [label="models", href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/simulation/models" target="_parent"] + simulation [href="https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/simulation" target="_parent"] } baselines -> datasets diff --git a/docs/design/tracing.md b/docs/design/tracing.md index fa77497742..9a1827db8a 100644 --- a/docs/design/tracing.md +++ b/docs/design/tracing.md @@ -16,23 +16,23 @@ At a high level, there are three components to tracing a Federated computation. Internally, a TFF computation only ever have zero or one argument. The arguments provided to the -[federated_computation.federated_computation](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/federated_context/federated_computation.py) +[federated_computation.federated_computation](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/federated_context/federated_computation.py) decorator describe type signature of the arguments to the TFF computation. TFF uses this information to to determine how to pack the arguments of the Python function into a single -[structure.Struct](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/common_libs/structure.py). +[structure.Struct](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/common_libs/structure.py). Note: Using `Struct` as a single data structure to represent both Python `args` and `kwargs` is the reason that `Struct` accepts both named and unnamed fields. See -[function_utils.wrap_as_zero_or_one_arg_callable](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/computation/function_utils.py) +[function_utils.wrap_as_zero_or_one_arg_callable](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/computation/function_utils.py) for more information. ### Tracing the function When tracing a `federated_computation`, the user's function is called using -[value_impl.Value](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/federated_context/value_impl.py) +[value_impl.Value](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/federated_context/value_impl.py) as a stand-in replacement for each argument. `Value` attempts to emulate the behavior of the original argument type by implementing common Python dunder methods (e.g. `__getattr__`). @@ -40,9 +40,9 @@ methods (e.g. `__getattr__`). In more detail, when there is exactly one argument, tracing is accomplished by: 1. Constructing a - [value_impl.Value](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/federated_context/value_impl.py) + [value_impl.Value](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/federated_context/value_impl.py) backed by a - [building_blocks.Reference](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/compiler/building_blocks.py) + [building_blocks.Reference](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/compiler/building_blocks.py) with appropriate type signature to represent the argument. 2. Invoking the function on the `Value`. This causes the Python runtime to @@ -61,7 +61,7 @@ Here the function’s parameter is a tuple and in the body of the function the 0 element is selected. This invokes Python’s `__getitem__` method, which is overridden on `Value`. In the simplest case, the implementation of `Value.__getitem__` constructs a -[building_blocks.Selection](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/compiler/building_blocks.py) +[building_blocks.Selection](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/compiler/building_blocks.py) to represent the invocation of `__getitem__` and returns a `Value` backed by this new `Selection`. @@ -72,9 +72,9 @@ dunder methods to be invoked. ### Constructing the AST The result of tracing the function is packaged into a -[building_blocks.Lambda](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/compiler/building_blocks.py) +[building_blocks.Lambda](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/compiler/building_blocks.py) whose `parameter_name` and `parameter_type` map to the -[building_block.Reference](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/core/impl/compiler/building_blocks.py) +[building_block.Reference](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/core/impl/compiler/building_blocks.py) created to represent the packed arguments. The resulting `Lambda` is then returned as a Python object that fully represents the user’s Python function. diff --git a/docs/faq.md b/docs/faq.md index b78ddcf14d..6c985cfec0 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -45,8 +45,8 @@ calling `sess.run` on the output tensor repeatedly within the same session. ## How can I contribute? -See the [README](https://github.com/tensorflow/federated/blob/main/README.md), -[contributing](https://github.com/tensorflow/federated/blob/main/CONTRIBUTING.md) +See the [README](https://github.com/google-parfait/tensorflow-federated/blob/main/README.md), +[contributing](https://github.com/google-parfait/tensorflow-federated/blob/main/CONTRIBUTING.md) guidelines, and [collaborations](collaborations/README.md). ## What is the relationship between FedJAX and TensorFlow Federated? diff --git a/docs/federated_core.md b/docs/federated_core.md index b1e9dd50d2..21e2eadc53 100644 --- a/docs/federated_core.md +++ b/docs/federated_core.md @@ -80,7 +80,7 @@ blocks such as `tff.federated_sum`, `tff.federated_reduce`, or TFF uses an internal language to represent federated computations, the syntax of which is defined by the serializable representation in -[computation.proto](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/proto/v0/computation.proto). +[computation.proto](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/proto/v0/computation.proto). Users of FC API generally won't need to interact with this language directly, though. Rather, we provide a Python API (the `tff` namespace) that wraps arounds it as a way to define computations. diff --git a/docs/federated_learning.md b/docs/federated_learning.md index 5bcdf7ab98..e632eeb630 100644 --- a/docs/federated_learning.md +++ b/docs/federated_learning.md @@ -178,7 +178,7 @@ You can find examples of how to define your own custom `tff.learning.models.VariableModel` in the second part of our [image classification](tutorials/federated_learning_for_image_classification.ipynb) tutorial, as well as in the example models we use for testing in -[`model_examples.py`](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/learning/models/model_examples.py). +[`model_examples.py`](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/learning/models/model_examples.py). ### Converters for Keras diff --git a/docs/install.md b/docs/install.md index 4c0272cf1e..aa35f6933e 100644 --- a/docs/install.md +++ b/docs/install.md @@ -72,8 +72,8 @@ build tool used to compile Tensorflow Federated. ### 3. Clone the Tensorflow Federated repository.
-git clone https://github.com/tensorflow/federated.git
-cd "federated"
+git clone https://github.com/google-parfait/tensorflow-federated.git
+cd "tensorflow-federated"
 
### 4. Create a virtual environment. diff --git a/docs/learning/federated_program_guide.md b/docs/learning/federated_program_guide.md index 64b9183e44..50fa1b4546 100644 --- a/docs/learning/federated_program_guide.md +++ b/docs/learning/federated_program_guide.md @@ -1,11 +1,11 @@ # Learning Federated Program Developer Guide This documentation is for anyone who is interested in authoring -[federated program logic](https://github.com/tensorflow/federated/blob/main/docs/program/federated_program.md#program-logic) +[federated program logic](https://github.com/google-parfait/tensorflow-federated/blob/main/docs/program/federated_program.md#program-logic) in [`tff.learning`](https://www.tensorflow.org/federated/api_docs/python/tff/learning). It assumes knowledge of `tff.learning` and the -[Federated Program Developer Guide](https://github.com/tensorflow/federated/blob/main/docs/program/guide.md). +[Federated Program Developer Guide](https://github.com/google-parfait/tensorflow-federated/blob/main/docs/program/guide.md). [TOC] diff --git a/docs/program/federated_program.md b/docs/program/federated_program.md index bee338db7c..3387990713 100644 --- a/docs/program/federated_program.md +++ b/docs/program/federated_program.md @@ -7,8 +7,8 @@ especially its type system. For more information about federated program, see: * [API Documentation](https://www.tensorflow.org/federated/api_docs/python/tff/program) -* [Examples](https://github.com/tensorflow/federated/blob/main/examples/program) -* [Developer Guide](https://github.com/tensorflow/federated/blob/main/docs/program/guide.md) +* [Examples](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/program) +* [Developer Guide](https://github.com/google-parfait/tensorflow-federated/blob/main/docs/program/guide.md) [TOC] @@ -144,7 +144,7 @@ decorators to create a [`tff.framework.ConcreteComputation`](https://www.tensorflow.org/federated/api_docs/python/tff/framework/ConcreteComputation): See -[life of a computation](https://github.com/tensorflow/federated/blob/main/docs/design/life_of_a_computation.md) +[life of a computation](https://github.com/google-parfait/tensorflow-federated/blob/main/docs/design/life_of_a_computation.md) for more information. ### Program Logic diff --git a/docs/program/guide.md b/docs/program/guide.md index f50c10e07b..8289958709 100644 --- a/docs/program/guide.md +++ b/docs/program/guide.md @@ -1,22 +1,22 @@ # Federated Program Developer Guide This documentation is for anyone who is interested in authoring -[federated program logic](https://github.com/tensorflow/federated/blob/main/docs/program/federated_program.md#program-logic) +[federated program logic](https://github.com/google-parfait/tensorflow-federated/blob/main/docs/program/federated_program.md#program-logic) or a -[federated program](https://github.com/tensorflow/federated/blob/main/docs/program/federated_program.md#program). +[federated program](https://github.com/google-parfait/tensorflow-federated/blob/main/docs/program/federated_program.md#program). It assumes knowledge of TensorFlow Federated, especially its type system, and -[federated programs](https://github.com/tensorflow/federated/blob/main/docs/program/federated_program.md). +[federated programs](https://github.com/google-parfait/tensorflow-federated/blob/main/docs/program/federated_program.md). [TOC] ## Program Logic This section defines guidelines for how -[program logic](https://github.com/tensorflow/federated/blob/main/docs/program/federated_program.md#program-logic) +[program logic](https://github.com/google-parfait/tensorflow-federated/blob/main/docs/program/federated_program.md#program-logic) should be authored. See the example -[program_logic.py](https://github.com/tensorflow/federated/blob/main/examples/program/program_logic.py) +[program_logic.py](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/program/program_logic.py) for more information. ### Document Type Signatures @@ -192,7 +192,7 @@ Note: It is ok to release all the values if that is what is required. **Do** define the program logic as an [asynchronous function](https://docs.python.org/3/reference/compound_stmts.html#coroutine-function-definition). The -[components](https://github.com/tensorflow/federated/blob/main/docs/program/federated_program.md#components) +[components](https://github.com/google-parfait/tensorflow-federated/blob/main/docs/program/federated_program.md#components) of TFF's federated program library use [asyncio](https://docs.python.org/3/library/asyncio.html) to execute Python concurrently and defining the program logic as an asynchronous function makes it @@ -213,23 +213,23 @@ def program_logic(...) -> None: ### Tests **Do** provide unit tests for the program logic (e.g. -[program_logic_test.py](https://github.com/tensorflow/federated/blob/main/examples/program/program_logic_test.py)). +[program_logic_test.py](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/program/program_logic_test.py)). ## Program This section defines guidelines for how a -[program](https://github.com/tensorflow/federated/blob/main/docs/program/federated_program.md#program) +[program](https://github.com/google-parfait/tensorflow-federated/blob/main/docs/program/federated_program.md#program) should be authored. See the example -[program.py](https://github.com/tensorflow/federated/blob/main/examples/program/program.py) +[program.py](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/program/program.py) for more information. ### Document the Program **Do** document the details of the program to the customer in the docstring of the module (e.g. -[program.py](https://github.com/tensorflow/federated/blob/main/examples/program/program.py)): +[program.py](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/program/program.py)): * How to manually run the program. * What platform, computations, and data sources are used in the program. diff --git a/docs/tff_for_research.md b/docs/tff_for_research.md index b6281b3e6d..0026dc45fd 100644 --- a/docs/tff_for_research.md +++ b/docs/tff_for_research.md @@ -21,7 +21,7 @@ types of logic. encapsulate logic that runs in a single location (e.g., on clients or on a server). This code is typically written and tested without any `tff.*` references, and can be re-used outside of TFF. For example, the - [client training loop in Federated Averaging](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py#L184-L222) + [client training loop in Federated Averaging](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py#L184-L222) is implemented at this level. 1. TensorFlow Federated orchestration logic, which binds together the @@ -29,12 +29,12 @@ types of logic. `tff.tensorflow.computation`s and then orchestrating them using abstractions like `tff.federated_broadcast` and `tff.federated_mean` inside a `tff.federated_computation`. See, for example, this - [orchestration for Federated Averaging](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/simple_fedavg_tff.py#L112-L140). + [orchestration for Federated Averaging](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/simple_fedavg_tff.py#L112-L140). 1. An outer driver script that simulates the control logic of a production FL system, selecting simulated clients from a dataset and then executing federated computations defined in 2. on those clients. For example, - [a Federated EMNIST experiment driver](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py). + [a Federated EMNIST experiment driver](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py). ## Federated learning datasets @@ -114,25 +114,25 @@ TFF, depending on the desired level of customization. A minimal stand-alone implementation of the [Federated Averaging](https://arxiv.org/abs/1602.05629) algorithm is provided -[here](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg). +[here](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg). The code includes -[TF functions](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py) +[TF functions](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py) for local computation, -[TFF computations](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/simple_fedavg_tff.py) +[TFF computations](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/simple_fedavg_tff.py) for orchestration, and a -[driver script](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py) +[driver script](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py) on the EMNIST dataset as an example. These files can easily be adapted for customized applciations and algorithmic changes following detailed instructions in the -[README](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/README.md). +[README](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/README.md). A more general implementation of Federated Averaging can be found -[here](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/learning/algorithms/fed_avg.py). +[here](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/learning/algorithms/fed_avg.py). This implementation allows for more sophisticated optimization techniques, including the use of different optimizers on both the server and client. Other federated learning algorithms, including federated k-means clustering, can be found -[here](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/learning/algorithms/). +[here](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/learning/algorithms/). ### Model update compression @@ -177,21 +177,21 @@ systems and differential privacy based defenses considered in *[Can You Really Back door Federated Learning?](https://arxiv.org/abs/1911.07963)*. This is done by building an iterative process with potentially malicious clients (see -[`build_federated_averaging_process_attacked`](https://github.com/tensorflow/federated/blob/6477a3dba6e7d852191bfd733f651fad84b82eab/federated_research/targeted_attack/attacked_fedavg.py#L412)). +[`build_federated_averaging_process_attacked`](https://github.com/google-parfait/tensorflow-federated/blob/6477a3dba6e7d852191bfd733f651fad84b82eab/federated_research/targeted_attack/attacked_fedavg.py#L412)). The -[targeted_attack](https://github.com/tensorflow/federated/tree/6477a3dba6e7d852191bfd733f651fad84b82eab/federated_research/targeted_attack) +[targeted_attack](https://github.com/google-parfait/tensorflow-federated/tree/6477a3dba6e7d852191bfd733f651fad84b82eab/federated_research/targeted_attack) directory contains more details. * New attacking algorithms can be implemented by writing a client update function which is a Tensorflow function, see - [`ClientProjectBoost`](https://github.com/tensorflow/federated/blob/6477a3dba6e7d852191bfd733f651fad84b82eab/federated_research/targeted_attack/attacked_fedavg.py#L460) + [`ClientProjectBoost`](https://github.com/google-parfait/tensorflow-federated/blob/6477a3dba6e7d852191bfd733f651fad84b82eab/federated_research/targeted_attack/attacked_fedavg.py#L460) for an example. * New defenses can be implemented by customizing - ['tff.utils.StatefulAggregateFn'](https://github.com/tensorflow/federated/blob/6477a3dba6e7d852191bfd733f651fad84b82eab/tensorflow_federated/python/core/utils/computation_utils.py#L103) + ['tff.utils.StatefulAggregateFn'](https://github.com/google-parfait/tensorflow-federated/blob/6477a3dba6e7d852191bfd733f651fad84b82eab/tensorflow_federated/python/core/utils/computation_utils.py#L103) which aggregates client outputs to get a global update. For an example script for simulation, see -[`emnist_with_targeted_attack.py`](https://github.com/tensorflow/federated/blob/6477a3dba6e7d852191bfd733f651fad84b82eab/federated_research/targeted_attack/emnist_with_targeted_attack.py). +[`emnist_with_targeted_attack.py`](https://github.com/google-parfait/tensorflow-federated/blob/6477a3dba6e7d852191bfd733f651fad84b82eab/federated_research/targeted_attack/emnist_with_targeted_attack.py). ### Generative Adversarial Networks @@ -204,7 +204,7 @@ their own optimization step. TFF can be used for research on federated training of GANs. For example, the DP-FedAvg-GAN algorithm presented in [recent work](https://arxiv.org/abs/1911.06679) is -[implemented in TFF](https://github.com/tensorflow/federated/tree/main/federated_research/gans). +[implemented in TFF](https://github.com/google-parfait/tensorflow-federated/tree/main/federated_research/gans). This work demonstrates the effectiveness of combining federated learning, generative models, and [differential privacy](#differential_privacy). @@ -218,13 +218,13 @@ One approach is to let each client fine-tune a single global model (trained using federated learning) with their local data. This approach has connections to meta-learning, see, e.g., [this paper](https://arxiv.org/abs/1909.12488). An example of this approach is given in -[`emnist_p13n_main.py`](https://github.com/tensorflow/federated/blob/main/examples/personalization/emnist_p13n_main.py). +[`emnist_p13n_main.py`](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/personalization/emnist_p13n_main.py). To explore and compare different personalization strategies, you can: * Define a personalization strategy by implementing a `tf.function` that starts from an initial model, trains and evaluates a personalized model using each client's local datasets. An example is given by - [`build_personalize_fn`](https://github.com/tensorflow/federated/blob/main/examples/personalization/p13n_utils.py). + [`build_personalize_fn`](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/personalization/p13n_utils.py). * Define an `OrderedDict` that maps strategy names to the corresponding personalization strategies, and use it as the `personalize_fn_dict` argument diff --git a/examples/personalization/README.md b/examples/personalization/README.md index 5353badef4..0d1167e14d 100644 --- a/examples/personalization/README.md +++ b/examples/personalization/README.md @@ -39,7 +39,7 @@ an unbatched `tf.data.Dataset` for test, and an extra `context` object, trains a personalized model, and returns the evaluation metrics. Users can define whatever personalization strategies they like. An example of fine-tuning based personalization strategy is given by `build_personalize_fn` in -[`p13n_utils`](https://github.com/tensorflow/federated/blob/main/examples/personalization/p13n_utils.py). +[`p13n_utils`](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/personalization/p13n_utils.py). The [`tff.learning.build_personalization_eval_computation`](https://www.tensorflow.org/federated/api_docs/python/tff/learning/build_personalization_eval_computation) @@ -49,7 +49,7 @@ time, starting from the same global model. Specifically, users define a and then pass it to the API as the `personalize_fn_dict` argument. In -[our experiment](https://github.com/tensorflow/federated/blob/main/examples/personalization/emnist_p13n_main.py), +[our experiment](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/personalization/emnist_p13n_main.py), we define and evaluate two fine-tuning based personalization strategies: one uses SGD and other other uses Adam optimizer. @@ -60,7 +60,7 @@ The API has an argument `max_num_samples` with a default value 100. Metrics from at most `max_num_samples` clients (the clients are sampled without replacement) will be collected and returned. In -[our experiment](https://github.com/tensorflow/federated/blob/main/examples/personalization/emnist_p13n_main.py), +[our experiment](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/personalization/emnist_p13n_main.py), we set this value to be larger than the number of clients in the federated dataset, which means that metrics from all clients will be returned. @@ -70,7 +70,7 @@ strategy names to the evaluation metrics of the corresponding personalization strategies. Each returned metric contains a list of scalars (each scalar comes from one sampled client). Metric values at the same position, e.g., `metric_1[i]`, `metric_2[i]`, ..., corresponds to the same client. In -[our experiment](https://github.com/tensorflow/federated/blob/main/examples/personalization/emnist_p13n_main.py), +[our experiment](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/personalization/emnist_p13n_main.py), the baseline evaluation function is given by `evaluate_fn` in -[`p13n_utils`](https://github.com/tensorflow/federated/blob/main/examples/personalization/p13n_utils.py), +[`p13n_utils`](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/personalization/p13n_utils.py), which is the same evaluation function used by the personalization strategies. diff --git a/examples/program/program.py b/examples/program/program.py index 18986b839d..ed4c0d3457 100644 --- a/examples/program/program.py +++ b/examples/program/program.py @@ -33,7 +33,7 @@ * This example is the author of the program and the program logic Please read -https://github.com/tensorflow/federated/blob/main/docs/program/federated_program.md +https://github.com/google-parfait/tensorflow-federated/blob/main/docs/program/federated_program.md for more information. Usage: diff --git a/examples/simple_fedavg/README.md b/examples/simple_fedavg/README.md index 1227f1bcc7..daaeadcb2f 100644 --- a/examples/simple_fedavg/README.md +++ b/examples/simple_fedavg/README.md @@ -4,16 +4,16 @@ This is intended to be a flexible and minimal implementation of Federated Averaging, and the code is designed to be modular and reusable. This implementation of the federated averaging algorithm only uses key TFF functions and does not depend on advanced features in `tff.learning`. See -[fed_avg_.py](https://github.com/tensorflow/federated/blob/main/tensorflow_federated/python/learning/algorithms/fed_avg.py) +[fed_avg_.py](https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/python/learning/algorithms/fed_avg.py) for a more full-featured implementation. ## Instructions A minimal implementation of the [Federated Averaging](https://arxiv.org/abs/1602.05629) algorithm is provided -[here](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg), +[here](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg), along with an example -[federated EMNIST experiment](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py). +[federated EMNIST experiment](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py). The implementation demonstrates the three main types of logic of a typical federated learning simulation. @@ -32,59 +32,59 @@ federated learning simulation. This EMNIST example can easily be adapted for experimental changes: * In the driver file - [emnist_fedavg_main](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py), + [emnist_fedavg_main](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py), we can change the - [dataset](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py#L49-L79), + [dataset](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py#L49-L79), the - [neural network architecture](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py#L82-L122), + [neural network architecture](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py#L82-L122), the - [server_optimizer](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py#L125-L126), + [server_optimizer](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py#L125-L126), and the - [client_optimizer](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py#L129-L130) + [client_optimizer](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py#L129-L130) for cutomized applications. Note that we need a - [model wrapper](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py#L151-152), + [model wrapper](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py#L151-152), and build an - [iterative process](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py#L154-L155) + [iterative process](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py#L154-L155) with TFF. We define a stand-alone model wrapper for keras models in - [simple_fedavg_tf](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py#L39-L81), + [simple_fedavg_tf](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py#L39-L81), which can be substituted with `tff.learning.models.VariableModel` by calling `tff.learning.models.from_keras_model`. Note that the inner - [keras_model](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py#L174) + [keras_model](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py#L174) of `tff.learning.models.VariableModel` may not be directly accessible for evaluation. * In the TF function file - [simple_fedavg_tf](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py), + [simple_fedavg_tf](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py), we have more control over the local computations performed in optimization process. In each round, on the server side, we will update the - [ServerState](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py#L102-L113) + [ServerState](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py#L102-L113) in - [server_update](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py#L131-L141) + [server_update](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py#L131-L141) function; we then build a - [BroadcastMessage](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py#L116-L128) + [BroadcastMessage](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py#L116-L128) with the - [build_server_broadcast_message](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py#L165-L181) + [build_server_broadcast_message](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py#L165-L181) function to prepare for broadcasting from server to clients; on the client side, we perform local updates with the - [client_update](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py#L184-L222) + [client_update](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py#L184-L222) function and return - [ClientOutput](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py#L84-L99) + [ClientOutput](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py#L84-L99) to be sent back to server. Note that server_optimizer defined in - [emnist_fedavg_main](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py#L125-L126) + [emnist_fedavg_main](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py#L125-L126) is used in - [server_update](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py#L131-L141) + [server_update](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py#L131-L141) function; client_optimizer defined in - [emnist_fedavg_main](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py#L129-L130) + [emnist_fedavg_main](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py#L129-L130) is used in - [client_update](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py#L184-L222). + [client_update](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py#L184-L222). These functions are used as local computation building blocks in the overall TFF computation, which handles the broadcasting and aggregation between server and clients. * In the TFF file - [simple_fedavg_tff](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/simple_fedavg_tff.py), + [simple_fedavg_tff](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/simple_fedavg_tff.py), we have control over the orchestration strategy. We take the - [weighted average](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/simple_fedavg_tff.py#L132-L133) + [weighted average](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/simple_fedavg_tff.py#L132-L133) of client updates to update the model kept in server state. More detailed instruction on the usage of TFF functions `federated_broadcast`, `federated_map`, and `federated_mean` can be found in the @@ -92,33 +92,33 @@ This EMNIST example can easily be adapted for experimental changes: We expect researchers working on applications and models only need to change the driver file -[emnist_fedavg_main](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py), +[emnist_fedavg_main](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py), researchers working on optimization may implement most of the ideas by writing pure TF code in the TF file -[simple_fedavg_tf](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py), +[simple_fedavg_tf](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py), while researchers who need more control over the orchestration strategy may get familiar with TFF code in -[simple_fedavg_tff](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/simple_fedavg_tff.py). +[simple_fedavg_tff](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/simple_fedavg_tff.py). We encourage readers to consider the following exercises for using this set of code for your research: 1. Try a more complicated server optimizer such as ADAM. You only need to change - [emnist_fedavg_main](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py). + [emnist_fedavg_main](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py). 1. Implement a model that uses L2 regularization. You will need to change the model definition in - [emnist_fedavg_main](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py) + [emnist_fedavg_main](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/emnist_fedavg_main.py) and add Keras regularization losses in the `KerasModelWrapper` class in - [simple_fedavg_tf](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py). + [simple_fedavg_tf](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py). 1. Implement a decaying learning rate schedule on the clients based on the global round, using the `round_num` broadcasted to the clients in - [simple_fedavg_tf](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py). + [simple_fedavg_tf](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/simple_fedavg_tf.py). 1. Implement a more complicated aggregation procedure that drops the client updates with the largest and smallest l2 norms. You will need to change - [simple_fedavg_tff](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg/simple_fedavg_tff.py). + [simple_fedavg_tff](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg/simple_fedavg_tff.py). ## Citation diff --git a/examples/stateful_clients/README.md b/examples/stateful_clients/README.md index a01045252e..f2eeb4edab 100644 --- a/examples/stateful_clients/README.md +++ b/examples/stateful_clients/README.md @@ -9,7 +9,7 @@ protection. This project is based on the standalone implementaion of Federated Averaging algorithm in -[`simple_fedavg`](https://github.com/tensorflow/federated/blob/main/examples/simple_fedavg). +[`simple_fedavg`](https://github.com/google-parfait/tensorflow-federated/blob/main/examples/simple_fedavg). We introduce a coutner on each client, which tracks the total number of iterations for model training on the clients. For example, if client A has been sampled m times at round n, and each time local model training has been run with diff --git a/requirements.txt b/requirements.txt index 86ea4b8702..dd5d7f2b92 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,11 +2,11 @@ # # If you add a *new* dependency and it is required by the TensorFlow Federated # package, also add the dependency to -# https://github.com/tensorflow/federated/blob/main/tensorflow_federated/tools/development/setup.py. +# https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/tools/development/setup.py. # # If you update the version of an *existing* dependency and it is required by # the TensorFlow Federated package, also update the version of the dependency in -# https://github.com/tensorflow/federated/blob/main/tensorflow_federated/tools/development/setup.py. +# https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/tools/development/setup.py. # # * For packages that have a stable release, we use a version that is # compatible with that release (e.g. `~=x.y`). See @@ -45,7 +45,7 @@ tqdm~=4.64 typing-extensions>=4.5.0,==4.5.* # The version of this dependency should match the version in -# https://github.com/tensorflow/federated/blob/main/WORKSPACE. +# https://github.com/google-parfait/tensorflow-federated/blob/main/WORKSPACE. tensorflow>=2.14.0,==2.14.* # TODO: b/315515548 - Required because current dependencies are pulling in later diff --git a/tensorflow_federated/data/README.md b/tensorflow_federated/data/README.md index 89734eea93..3f23834a96 100644 --- a/tensorflow_federated/data/README.md +++ b/tensorflow_federated/data/README.md @@ -2,6 +2,6 @@ This directory contains package data for the Python package and is installed into the package when it is created using -https://github.com/tensorflow/federated/blob/main/tensorflow_federated/tools/development/setup.py. See +https://github.com/google-parfait/tensorflow-federated/blob/main/tensorflow_federated/tools/development/setup.py. See https://docs.python.org/3/distutils/setupscript.html#installing-package-data for more information. diff --git a/tensorflow_federated/tools/python_package/setup.py b/tensorflow_federated/tools/python_package/setup.py index a42d9d2a58..6d609c652a 100644 --- a/tensorflow_federated/tools/python_package/setup.py +++ b/tensorflow_federated/tools/python_package/setup.py @@ -55,7 +55,7 @@ PROJECT_NAME = 'tensorflow_federated' # The version of a dependency should match the version and follow the guidlines -# in https://github.com/tensorflow/federated/blob/main/requirements.txt. +# in https://github.com/google-parfait/tensorflow-federated/blob/main/requirements.txt. REQUIRED_PACKAGES = [ 'absl-py>=1.0,==1.*', 'attrs~=23.1', @@ -129,9 +129,13 @@ def get_package_name(requirement: str) -> str: ], keywords='tensorflow federated machine learning', project_urls={ - 'Issues': 'https://github.com/tensorflow/federated/issues', - 'Releases': 'https://github.com/tensorflow/federated/releases', - 'Source': 'https://github.com/tensorflow/federated', + 'Issues': ( + 'https://github.com/google-parfait/tensorflow-federated/issues' + ), + 'Releases': ( + 'https://github.com/google-parfait/tensorflow-federated/releases' + ), + 'Source': 'https://github.com/google-parfait/tensorflow-federated', 'Documentation': 'https://www.tensorflow.org/federated', }, packages=setuptools.find_packages(exclude=['tools']),