Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: User can't be edited by project admin (DEV-1373) #2232

Merged
merged 22 commits into from Oct 5, 2022
Merged
Show file tree
Hide file tree
Changes from 21 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
46 changes: 23 additions & 23 deletions Makefile
Expand Up @@ -72,74 +72,74 @@ docker-publish: docker-publish-dsp-api-image docker-publish-sipi-image ## publis
#################################

.PHONY: print-env-file
print-env-file: ## prints the env file used by knora-stack
print-env-file: ## prints the env file used by dsp-stack
@cat .env

.PHONY: env-file
env-file: ## write the env file used by knora-stack.
env-file: ## write the env file used by dsp-stack.
@echo DOCKERHOST=$(DOCKERHOST) > .env
@echo KNORA_DB_REPOSITORY_NAME=$(KNORA_DB_REPOSITORY_NAME) >> .env
@echo LOCAL_HOME=$(CURRENT_DIR) >> .env

#################################
## Knora Stack Targets
## DSP Stack Targets
#################################

.PHONY: stack-up
stack-up: docker-build env-file ## starts the knora-stack: fuseki, sipi, api.
stack-up: docker-build env-file ## starts the dsp-stack: fuseki, sipi, api.
@docker compose -f docker-compose.yml up -d db
$(CURRENT_DIR)/webapi/scripts/wait-for-db.sh
@docker compose -f docker-compose.yml up -d
$(CURRENT_DIR)/webapi/scripts/wait-for-knora.sh
$(CURRENT_DIR)/webapi/scripts/wait-for-api.sh

.PHONY: stack-up-fast
stack-up-fast: docker-build-knora-api-image env-file ## starts the knora-stack by skipping rebuilding most of the images (only api image is rebuilt).
stack-up-fast: docker-build-knora-api-image env-file ## starts the dsp-stack by skipping rebuilding most of the images (only api image is rebuilt).
docker-compose -f docker-compose.yml up -d

.PHONY: stack-up-ci
stack-up-ci: KNORA_DB_REPOSITORY_NAME := knora-test-unit
stack-up-ci: docker-build env-file print-env-file ## starts the knora-stack using 'knora-test-unit' repository: fuseki, sipi, api.
stack-up-ci: docker-build env-file print-env-file ## starts the dsp-stack using 'knora-test-unit' repository: fuseki, sipi, api.
docker-compose -f docker-compose.yml up -d

.PHONY: stack-restart
stack-restart: ## re-starts the knora-stack: fuseki, sipi, api.
stack-restart: ## re-starts the dsp-stack: fuseki, sipi, api.
@docker compose -f docker-compose.yml down
@docker compose -f docker-compose.yml up -d db
$(CURRENT_DIR)/webapi/scripts/wait-for-db.sh
@docker compose -f docker-compose.yml up -d
$(CURRENT_DIR)/webapi/scripts/wait-for-knora.sh
$(CURRENT_DIR)/webapi/scripts/wait-for-api.sh

.PHONY: stack-restart-api
stack-restart-api: ## re-starts the api. Usually used after loading data into fuseki.
docker-compose -f docker-compose.yml restart api
@$(CURRENT_DIR)/webapi/scripts/wait-for-knora.sh
@$(CURRENT_DIR)/webapi/scripts/wait-for-api.sh

.PHONY: stack-logs
stack-logs: ## prints out and follows the logs of the running knora-stack.
stack-logs: ## prints out and follows the logs of the running dsp-stack.
@docker compose -f docker-compose.yml logs -f

.PHONY: stack-logs-db
stack-logs-db: ## prints out and follows the logs of the 'db' container running in knora-stack.
stack-logs-db: ## prints out and follows the logs of the 'db' container running in dsp-stack.
@docker compose -f docker-compose.yml logs -f db

.PHONY: stack-logs-db-no-follow
stack-logs-db-no-follow: ## prints out the logs of the 'db' container running in knora-stack.
stack-logs-db-no-follow: ## prints out the logs of the 'db' container running in dsp-stack.
@docker-compose -f docker-compose.yml logs db

.PHONY: stack-logs-sipi
stack-logs-sipi: ## prints out and follows the logs of the 'sipi' container running in knora-stack.
stack-logs-sipi: ## prints out and follows the logs of the 'sipi' container running in dsp-stack.
@docker compose -f docker-compose.yml logs -f sipi

.PHONY: stack-logs-sipi-no-follow
stack-logs-sipi-no-follow: ## prints out the logs of the 'sipi' container running in knora-stack.
stack-logs-sipi-no-follow: ## prints out the logs of the 'sipi' container running in dsp-stack.
@docker compose -f docker-compose.yml logs sipi

.PHONY: stack-logs-api
stack-logs-api: ## prints out and follows the logs of the 'api' container running in knora-stack.
stack-logs-api: ## prints out and follows the logs of the 'api' container running in dsp-stack.
@docker compose -f docker-compose.yml logs -f api

.PHONY: stack-logs-api-no-follow
stack-logs-api-no-follow: ## prints out the logs of the 'api' container running in knora-stack.
stack-logs-api-no-follow: ## prints out the logs of the 'api' container running in dsp-stack.
@docker compose -f docker-compose.yml logs api

.PHONY: stack-health
Expand All @@ -151,11 +151,11 @@ stack-status:
@docker compose -f docker-compose.yml ps

.PHONY: stack-down
stack-down: ## stops the knora-stack.
stack-down: ## stops the dsp-stack.
@docker compose -f docker-compose.yml down

.PHONY: stack-down-delete-volumes
stack-down-delete-volumes: ## stops the knora-stack and deletes any created volumes (deletes the database!).
stack-down-delete-volumes: ## stops the dsp-stack and deletes any created volumes (deletes the database!).
@docker compose -f docker-compose.yml down --volumes

.PHONY: stack-config
Expand All @@ -164,11 +164,11 @@ stack-config: env-file

## stack without api
.PHONY: stack-without-api
stack-without-api: stack-up ## starts the knora-stack without knora-api: fuseki and sipi only.
stack-without-api: stack-up ## starts the dsp-stack without dsp-api: fuseki and sipi only.
@docker compose -f docker-compose.yml stop api

.PHONY: stack-without-api-and-sipi
stack-without-api-and-sipi: stack-up ## starts the knora-stack without knora-api and sipi: fuseki only.
stack-without-api-and-sipi: stack-up ## starts the dsp-stack without dsp-api and sipi: fuseki only.
@docker compose -f docker-compose.yml stop api
@docker compose -f docker-compose.yml stop sipi

Expand Down Expand Up @@ -348,11 +348,11 @@ clean-sipi-projects: ## deletes all files uploaded within a project
@rm -rf sipi/images/originals/[0-9A-F][0-9A-F][0-9A-F][0-9A-F]

.PHONY: check
check: ## Run code formating check
check: ## Run code formatting check
@sbt "check"

.PHONY: fmt
fmt: ## Run code formating fix
fmt: ## Run code formatting fix
@sbt "fmt"


Expand Down
Expand Up @@ -37,11 +37,11 @@ if [[ -z "${TIMEOUT}" ]]; then
TIMEOUT=360
fi

poll-knora() {
check-health() {
STATUS=$(curl -s -o /dev/null -w '%{http_code}' http://${HOST}/health)

if [ "${STATUS}" -eq 200 ]; then
echo "Knora started"
echo "==> DSP-API started"
return 0
else
return 1
Expand All @@ -50,9 +50,9 @@ poll-knora() {

attempt_counter=0

until poll-knora; do
until check-health; do
if [ ${attempt_counter} -eq ${TIMEOUT} ]; then
echo "Timed out waiting for Knora to start"
echo "==> Timed out waiting for DSP-API to start"
exit 1
fi

Expand Down
Expand Up @@ -755,13 +755,13 @@ class UsersResponderADM(responderData: ResponderData) extends Responder(responde
}

// create the update request
updateUseResult <- updateUserADM(
userIri = userIri,
userUpdatePayload = UserChangeRequestADM(projects = Some(updatedProjectMembershipIris)),
requestingUser = requestingUser,
apiRequestID = apiRequestID
)
} yield updateUseResult
updateUserResult <- updateUserADM(
userIri = userIri,
userUpdatePayload = UserChangeRequestADM(projects = Some(updatedProjectMembershipIris)),
requestingUser = requestingUser,
apiRequestID = apiRequestID
)
} yield updateUserResult

for {
// run the task with an IRI lock
Expand Down Expand Up @@ -1467,7 +1467,7 @@ class UsersResponderADM(responderData: ResponderData) extends Responder(responde
/* Verify that the user was updated */
maybeUpdatedUserADM <- getSingleUserADM(
identifier = UserIdentifierADM(maybeIri = Some(userIri)),
requestingUser = requestingUser,
requestingUser = KnoraSystemInstances.Users.SystemUser,
userInformationType = UserInformationTypeADM.Full,
skipCache = true
)
Expand Down Expand Up @@ -1516,12 +1516,18 @@ class UsersResponderADM(responderData: ResponderData) extends Responder(responde
}

_ = if (userUpdatePayload.projects.isDefined) {

if (updatedUserADM.projects.map(_.id).sorted != userUpdatePayload.projects.get.sorted) {
throw UpdateNotPerformedException(
"User's 'project' memberships were not updated. Please report this as a possible bug."
)
}
for {
projects <- userProjectMembershipsGetADM(
userIri = userIri,
requestingUser = requestingUser
)
_ =
if (projects.map(_.id).sorted != userUpdatePayload.projects.get.sorted) {
throw UpdateNotPerformedException(
"User's 'project' memberships were not updated. Please report this as a possible bug."
)
}
} yield UserProjectMembershipsGetResponseADM(projects)
}

_ = if (userUpdatePayload.systemAdmin.isDefined) {
Expand Down
67 changes: 27 additions & 40 deletions webapi/src/main/scala/org/knora/webapi/routing/HealthRoute.scala
Expand Up @@ -27,48 +27,35 @@ trait HealthCheck {
for {
_ <- ZIO.logInfo("get application state")
state <- state.get
result <- createResult(state)
result <- setHealthState(state)
_ <- ZIO.logInfo("set health state")
response <- createResponse(result)
_ <- ZIO.logInfo("getting application state done")
} yield response

private def createResult(state: AppState): UIO[HealthCheckResult] =
ZIO
.attempt(
state match {
case AppState.Stopped => unhealthy("Stopped. Please retry later.")
case AppState.StartingUp => unhealthy("Starting up. Please retry later.")
case AppState.WaitingForTriplestore =>
unhealthy("Waiting for triplestore. Please retry later.")
case AppState.TriplestoreReady =>
unhealthy("Triplestore ready. Please retry later.")
case AppState.UpdatingRepository =>
unhealthy("Updating repository. Please retry later.")
case AppState.RepositoryUpToDate =>
unhealthy("Repository up to date. Please retry later.")
case AppState.CreatingCaches => unhealthy("Creating caches. Please retry later.")
case AppState.CachesReady => unhealthy("Caches ready. Please retry later.")
case AppState.UpdatingSearchIndex =>
unhealthy("Updating search index. Please retry later.")
case AppState.SearchIndexReady =>
unhealthy("Search index ready. Please retry later.")
case AppState.LoadingOntologies =>
unhealthy("Loading ontologies. Please retry later.")
case AppState.OntologiesReady => unhealthy("Ontologies ready. Please retry later.")
case AppState.WaitingForIIIFService =>
unhealthy("Waiting for IIIF service. Please retry later.")
case AppState.IIIFServiceReady =>
unhealthy("IIIF service ready. Please retry later.")
case AppState.WaitingForCacheService =>
unhealthy("Waiting for cache service. Please retry later.")
case AppState.CacheServiceReady =>
unhealthy("Cache service ready. Please retry later.")
case AppState.MaintenanceMode =>
unhealthy("Application is in maintenance mode. Please retry later.")
case AppState.Running => healthy
}
)
.orDie
private def setHealthState(state: AppState): UIO[HealthCheckResult] =
ZIO.succeed(
state match {
case AppState.Stopped => unhealthy("Stopped. Please retry later.")
case AppState.StartingUp => unhealthy("Starting up. Please retry later.")
case AppState.WaitingForTriplestore => unhealthy("Waiting for triplestore. Please retry later.")
case AppState.TriplestoreReady => unhealthy("Triplestore ready. Please retry later.")
case AppState.UpdatingRepository => unhealthy("Updating repository. Please retry later.")
case AppState.RepositoryUpToDate => unhealthy("Repository up to date. Please retry later.")
case AppState.CreatingCaches => unhealthy("Creating caches. Please retry later.")
case AppState.CachesReady => unhealthy("Caches ready. Please retry later.")
case AppState.UpdatingSearchIndex => unhealthy("Updating search index. Please retry later.")
case AppState.SearchIndexReady => unhealthy("Search index ready. Please retry later.")
case AppState.LoadingOntologies => unhealthy("Loading ontologies. Please retry later.")
case AppState.OntologiesReady => unhealthy("Ontologies ready. Please retry later.")
case AppState.WaitingForIIIFService => unhealthy("Waiting for IIIF service. Please retry later.")
case AppState.IIIFServiceReady => unhealthy("IIIF service ready. Please retry later.")
case AppState.WaitingForCacheService => unhealthy("Waiting for cache service. Please retry later.")
case AppState.CacheServiceReady => unhealthy("Cache service ready. Please retry later.")
case AppState.MaintenanceMode => unhealthy("Application is in maintenance mode. Please retry later.")
case AppState.Running => healthy
}
)

private def createResponse(result: HealthCheckResult): UIO[HttpResponse] =
ZIO
Expand All @@ -94,12 +81,12 @@ trait HealthCheck {

private case class HealthCheckResult(name: String, severity: String, status: Boolean, message: String)

private def unhealthy(str: String) =
private def unhealthy(message: String) =
HealthCheckResult(
name = "AppState",
severity = "non fatal",
status = false,
message = str
message = message
)

private val healthy =
Expand Down