it seems to be a problem on where python is searching for de library and not finding it.
still, im very new in this so may be its another thing.
this is the error (i separated the middle part where i think shows the problem):
ftuser#a5a1d3ed08d3:/freqtrade$ freqtrade backtesting --strategy canal
2022-08-26 03:51:37,394 - freqtrade.configuration.load_config - INFO - Using config: user_data/config.json ...
2022-08-26 03:51:37,483 - freqtrade.loggers - INFO - Verbosity set to 0
2022-08-26 03:51:37,484 - freqtrade.configuration.configuration - INFO - Using max_open_trades: 1 ...
2022-08-26 03:51:37,716 - freqtrade.configuration.configuration - INFO - Using user-data directory: /freqtrade/user_data ...
2022-08-26 03:51:37,718 - freqtrade.configuration.configuration - INFO - Using data directory: /freqtrade/user_data/data/binance ...
2022-08-26 03:51:37,719 - freqtrade.configuration.configuration - INFO - Parameter --cache=day detected ...
2022-08-26 03:51:37,719 - freqtrade.configuration.check_exchange - INFO - Checking exchange...
2022-08-26 03:51:37,741 - freqtrade.configuration.check_exchange - INFO - Exchange "binance" is officially supported by the Freqtrade development team.
2022-08-26 03:51:37,741 - freqtrade.configuration.configuration - INFO - Using pairlist from configuration.
2022-08-26 03:51:37,741 - freqtrade.configuration.config_validation - INFO - Validating configuration ...
2022-08-26 03:51:37,746 - freqtrade.commands.optimize_commands - INFO - Starting freqtrade in Backtesting mode
2022-08-26 03:51:37,746 - freqtrade.exchange.exchange - INFO - Instance is running with dry_run enabled
2022-08-26 03:51:37,746 - freqtrade.exchange.exchange - INFO - Using CCXT 1.92.20
2022-08-26 03:51:37,746 - freqtrade.exchange.exchange - INFO - Applying additional ccxt config: {'options': {'defaultType': 'future'}}
2022-08-26 03:51:37,766 - freqtrade.exchange.exchange - INFO - Applying additional ccxt config: {'options': {'defaultType': 'future'}}
2022-08-26 03:51:37,782 - freqtrade.exchange.exchange - INFO - Using Exchange "Binance"
2022-08-26 03:51:39,052 - freqtrade.resolvers.exchange_resolver - INFO - Using resolved exchange 'Binance'...
2022-08-26 03:51:39,097 - freqtrade.resolvers.iresolver - WARNING - Could not import /freqtrade/user_data/strategies/canal.py due to 'cannot import name 'SSLchannels' from 'technical.indicators' (/home/ftuser/.local/lib/python3.10/site-packages/technical/indicators/init.py)'
2022-08-26 03:51:39,182 - freqtrade - ERROR - Impossible to load Strategy 'canal'. This class does not exist or contains Python code errors.
2022-08-26 03:51:39,182 - freqtrade.exchange.exchange - INFO - Closing async ccxt session.
this is the code in VS Code:
import numpy as np # noqa
import pandas as pd # noqa
from pandas import DataFrame
from freqtrade.strategy import (BooleanParameter, CategoricalParameter, DecimalParameter,
IStrategy, IntParameter)
# --------------------------------
# Add your lib to import here
import talib.abstract as ta
import freqtrade.vendor.qtpylib.indicators as qtpylib
from technical.indicators import SSLchannels
# This class is a sample. Feel free to customize it.
class canal(IStrategy):
INTERFACE_VERSION = 3
# Can this strategy go short?
can_short: bool = False
# Minimal ROI designed for the strategy.
# This attribute will be overridden if the config file contains "minimal_roi".
minimal_roi = {
"60": 0.01,
"30": 0.02,
"0": 0.04
}
# Optimal stoploss designed for the strategy.
# This attribute will be overridden if the config file contains "stoploss".
stoploss = -0.10
# Trailing stoploss
trailing_stop = False
# trailing_only_offset_is_reached = False
# trailing_stop_positive = 0.01
# trailing_stop_positive_offset = 0.0 # Disabled / not configured
# Optimal timeframe for the strategy.
timeframe = '5m'
# Run "populate_indicators()" only for new candle.
process_only_new_candles = True
# These values can be overridden in the config.
use_exit_signal = True
exit_profit_only = False
ignore_roi_if_entry_signal = False
buy_rsi = IntParameter(low=1, high=50, default=30, space='buy', optimize=True, load=True)
sell_rsi = IntParameter(low=50, high=100, default=70, space='sell', optimize=True, load=True)
short_rsi = IntParameter(low=51, high=100, default=70, space='sell', optimize=True, load=True)
exit_short_rsi = IntParameter(low=1, high=50, default=30, space='buy', optimize=True, load=True)
# Number of candles the strategy requires before producing valid signals
startup_candle_count: int = 30
# Optional order type mapping.
order_types = {
'entry': 'limit',
'exit': 'limit',
'stoploss': 'market',
'stoploss_on_exchange': False
}
# Optional order time in force.
order_time_in_force = {
'entry': 'gtc',
'exit': 'gtc'
}
plot_config = {
'main_plot': {
'tema': {},
'sar': {'color': 'white'},
},
'subplots': {
"MACD": {
'macd': {'color': 'blue'},
'macdsignal': {'color': 'orange'},
},
"RSI": {
'rsi': {'color': 'red'},
}
}
}
def informative_pairs(self):
"""
Define additional, informative pair/interval combinations to be cached from the exchange.
These pair/interval combinations are non-tradeable, unless they are part
of the whitelist as well.
For more information, please consult the documentation
:return: List of tuples in the format (pair, interval)
Sample: return [("ETH/USDT", "5m"),
("BTC/USDT", "15m"),
]
"""
return []
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
# RSI
dataframe['rsi'] = ta.RSI(dataframe)
return dataframe
def populate_entry_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
dataframe.loc[
(
# Signal: RSI crosses above 30
(qtpylib.crossed_above(dataframe['rsi'], self.buy_rsi.value)) &
(dataframe['volume'] > 0) # Make sure Volume is not 0
),
'enter_long'] = 1
dataframe.loc[
(
# Signal: RSI crosses above 70
(qtpylib.crossed_above(dataframe['rsi'], self.short_rsi.value)) &
(dataframe['volume'] > 0) # Make sure Volume is not 0
),
'enter_short'] = 1
return dataframe
def populate_exit_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
dataframe.loc[
(
# Signal: RSI crosses above 70
(qtpylib.crossed_above(dataframe['rsi'], self.sell_rsi.value)) &
(dataframe['volume'] > 0) # Make sure Volume is not 0
),
'exit_long'] = 1
dataframe.loc[
(
# Signal: RSI crosses above 30
(qtpylib.crossed_above(dataframe['rsi'], self.exit_short_rsi.value)) &
# Guard: tema below BB middle
(dataframe['volume'] > 0) # Make sure Volume is not 0
),
'exit_short'] = 1
return dataframe
I left RSI indicator so that i could coment:
#from technical.indicators import SSLchannels
and test the code is ok and it works. It runs the backtest ok.
Heres how i have the folders in my PC
I also tryed choosing python 3.8 and 3.10 in VS Code just ro try and both work well if i take out technical library and shows error if i put it.
any help would be apreciated.
Thanks!
I would think that either you need to pip install the technical.indicators or use docker. I prefer using docker to execute freqtrade commands as they already have all the dependencies installed.
Related
I was building a documentation site for my python project using mkdocstrings.
For generating the code referece files I followed this instructions https://mkdocstrings.github.io/recipes/
I get these errors:
INFO
- Building documentation... INFO
- Cleaning site directory INFO
- The following pages exist in the docs directory, but are not included in the "nav" configuration: - reference\SUMMARY.md
- reference_init_.md
... ...
- reference\tests\manual_tests.md ERROR
- mkdocstrings: No module named ' ' ERROR
- Error reading page 'reference/init.md': ERROR
- Could not collect ' '
This is my file structure:
This is my docs folder:
I have the same gen_ref_pages.py file shown in the page:
from pathlib import Path
import mkdocs_gen_files
nav = mkdocs_gen_files.Nav()
for path in sorted(Path("src").rglob("*.py")):
module_path = path.relative_to("src").with_suffix("")
doc_path = path.relative_to("src").with_suffix(".md")
full_doc_path = Path("reference", doc_path)
parts = tuple(module_path.parts)
if parts[-1] == "__init__":
parts = parts[:-1]
elif parts[-1] == "__main__":
continue
nav[parts] = doc_path.as_posix() #
with mkdocs_gen_files.open(full_doc_path, "w") as fd:
ident = ".".join(parts)
fd.write(f"::: {ident}")
mkdocs_gen_files.set_edit_path(full_doc_path, path)
with mkdocs_gen_files.open("reference/SUMMARY.md", "w") as nav_file: #
nav_file.writelines(nav.build_literate_nav()) # ```
This is my mkdocs.yml:
``` site_name: CA Prediction Docs
theme:
name: "material"
palette:
primary: deep purple
logo: assets/logo.png
favicon: assets/favicon.png
features:
- navigation.instant
- navigation.tabs
- navigation.expand
- navigation.top
# - navigation.sections
- search.highlight
- navigation.footer
icon:
repo: fontawesome/brands/git-alt
copyright: Copyright © 2022 - 2023 Ezequiel González
extra:
social:
- icon: fontawesome/brands/github
link: https://github.com/ezegonmac
- icon: fontawesome/brands/linkedin
link: https://www.linkedin.com/in/ezequiel-gonzalez-macho-329583223/
repo_url: https://github.com/ezegonmac/TFG-CellularAutomata
repo_name: ezegonmac/TFG-CellularAutomata
plugins:
- search
- gen-files:
scripts:
- docs/gen_ref_pages.py
- mkdocstrings
nav:
- Introduction: index.md
- Getting Started: getting-started.md
- API Reference: reference.md
# - Reference: reference/
- Explanation: explanation.md
I'm currently setting up Jaeger SPM (service performance metrics) on my application. Below is the code and setup that I'm using, currently failing with Failed to export traces, error code: StatusCode.UNIMPLEMENTED.
What am I missing in my configuration?
otel-collector-config.yml
receivers:
jaeger:
protocols:
grpc:
otlp:
protocols:
grpc:
prometheus:
config:
exporters:
logging:
loglevel: debug
prometheus:
endpoint: '0.0.0.0:8889'
resource_to_telemetry_conversion:
enabled: true
jaeger:
endpoint: 'jaeger:14250'
tls:
insecure: true
otlp:
endpoint: otel_collector:4317
tls:
insecure: true
processors:
# https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/batchprocessor/README.md
batch:
# https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/memorylimiterprocessor/README.md
memory_limiter:
check_interval: 5s
limit_mib: 819
spike_limit_mib: 256
spanmetrics:
metrics_exporter: prometheus
# latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 100ms, 250ms]
dimensions_cache_size: 1500
# The aggregation temporality of the generated metrics.
# Default: "AGGREGATION_TEMPORALITY_CUMULATIVE"
aggregation_temporality: 'AGGREGATION_TEMPORALITY_CUMULATIVE'
# Additional list of dimensions on top of:
# - service.name
# - operation
# - span.kind
# - status.code
dimensions:
# If the span is missing http.method, the processor will insert
# the http.method dimension with value 'GET'.
# For example, in the following scenario, http.method is not present in a span and so will be added as a dimension to the metric with value "GET":
# - calls_total{http_method="GET",operation="/Address",service_name="shippingservice",span_kind="SPAN_KIND_SERVER",status_code="STATUS_CODE_UNSET"} 1
- name: http.method
default: GET
# If a default is not provided, the http.status_code dimension will be omitted
# if the span does not contain http.status_code.
# For example, consider a scenario with two spans, one span having http.status_code=200 and another missing http.status_code. Two metrics would result with this configuration, one with the http_status_code omitted and the other included:
# - calls_total{http_status_code="200",operation="/Address",service_name="shippingservice",span_kind="SPAN_KIND_SERVER",status_code="STATUS_CODE_UNSET"} 1
# - calls_total{operation="/Address",service_name="shippingservice",span_kind="SPAN_KIND_SERVER",status_code="STATUS_CODE_UNSET"} 1
- name: http.status_code
default: 200
extensions:
health_check:
memory_ballast:
pprof:
endpoint: :1888
zpages:
# http://localhost:55679/debug/tracez
endpoint: :55679
service:
extensions: [memory_ballast, health_check, zpages, pprof]
telemetry:
metrics:
address: :8888
logs:
level: debug
pipelines:
traces:
receivers: [otlp]
# receivers: [jaeger] # This is creating a problem
processors: [memory_limiter, spanmetrics, batch]
exporters: [logging, otlp]
metrics:
receivers: [otlp]
processors: [memory_limiter, batch]
exporters: [prometheus]
logs:
receivers: [otlp]
processors: [memory_limiter, batch]
exporters: [logging]
with this, I'm able to see latency bucket and call_total on Prometheus, however, I cannot see the graphs on Jaeger
main.py (simple example)
import time
import httpx
from opentelemetry import metrics, trace
from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import (
OTLPMetricExporter,
)
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (
OTLPSpanExporter,
)
from opentelemetry.instrumentation.httpx import HTTPXClientInstrumentor
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
from opentelemetry.trace.status import Status, StatusCode
# Service name is required for most backends
resource = Resource(attributes={SERVICE_NAME: "jaeger-monitor-test"})
reader = PeriodicExportingMetricReader(
OTLPMetricExporter(endpoint="localhost:4317", insecure=True)
# OTLPMetricExporter(insecure=True)
)
provider = MeterProvider(resource=resource, metric_readers=[reader])
metrics.set_meter_provider(provider)
# Service name is required for most backends
resource = Resource(attributes={SERVICE_NAME: "jaeger-monitor-test"})
provider = TracerProvider(resource=resource)
processor = BatchSpanProcessor(OTLPSpanExporter(endpoint="localhost:14250", insecure=True))
processor = BatchSpanProcessor(OTLPSpanExporter(insecure=True))
provider.add_span_processor(processor)
trace.set_tracer_provider(provider)
meter = metrics.get_meter(__name__)
tracer = trace.get_tracer(__name__)
HTTPXClientInstrumentor().instrument()
def send_request() -> None:
with tracer.start_as_current_span('send_request') as span:
with httpx.Client() as client:
request = client.get("https://www.google.com")
print(request.status_code)
time.sleep(2)
span.set_status(Status(StatusCode.OK)) # Hardcoded
if __name__ == "__main__":
while True:
send_request()
Right now if I use otlpas the reciever it works, but without any graphs on SPM.
If I change it to recievers: [jaeger]
it fails with
Failed to export traces, error code: StatusCode.UNIMPLEMENTED
I want to add data inside the 'tags' key in this YAML script
# Generated by Chef, local modifications will be overwritten
---
env: nonprod
api_key: 5d9k8h43124g40j9ocmnb619h762d458
hostname: ''
bind_host: localhost
additional_endpoints: {}
tags:
- application_name:testin123
- cloud_supportteam:eagles
- technical_applicationid:0000
- application:default
- lifecycle:default
- function:default-api-key
dogstatsd_non_local_traffic: false
histogram_aggregates:
- max
- median
- avg
- count
which should be like this,
tags:
- application_name:testing123
- cloud_supportteam:eagles
- technical_applicationid:0000
- application:default
- lifecycle:default
- function:default-api-key
- managed_by:Teams
so far I have created this script that will append the data at the end of the file seems not the solution,
import yaml
data = {
'tags': {
'- managed_by': 'Teams'
}
}
with open('test.yml', 'a') as outfile:
yaml.dump(data, outfile,indent=2)
Figured out it like this and this is working,
import yaml
from yaml.loader import SafeLoader
with open('test.yaml', 'r') as f:
data = dict(yaml.load(f,Loader=SafeLoader))
data ['tags'].append('managed_by:teams')
print(data['tags'])
with open ('test.yaml', 'w') as write:
data2 = yaml.dump(data,write,sort_keys= False, default_flow_style=False)
and the output was like this,
['application_name:testin123', 'cloud_supportteam:eagles', 'technical_applicationid:0000', 'application:default', 'lifecycle:default', 'function:default-api-key', 'managed_by:teams']
and the test.yaml file was updated,
tags:
- application_name:testing123
- cloud_supportteam:eagles
- technical_applicationid:0000
- application:default
- lifecycle:default
- function:default-api-key
- managed_by:teams
I'm using Django REST Framework v3.9 built-in interactive documentation.
I have a url need query parameters for get.
such as:
../jobs/?order_choice=0&city=1®ion=0
But i don't know how to documenting it in interactive documentation.
I use method: to add parameters:
such as:
class JobListView(APIView):
"""
get:
- order_choices
- city
- region
- job_type
"""
but it print in one line
- order_choices - city - region - job_type
it's my parameters/
params_data = {
'city': request.query_params.get('city', None),
'region': request.query_params.get('region', None),
'job_type': request.query_params.get('job_type', None),
'status': 1,
}
I want to know how to documenting it correctly.
Try something like below ...
class PackageViewSet(viewsets.ModelViewSet):
"""
** Query Parameters **
`page` - get data of a particular page.
`page_size` - change total objects in a page (default=20).
** Filter Parameters **
`status` - `1/2`
** Search Parameters **
`name` - `search by package name`
** Ordering Parameters **
`name`
`created_at`
`updated_at`
** Default Ordering **
`-created_at`
`Method Allowed`
`GET -` `Lists all the Packages of a facility/company.`
`POST -` `Creates Package for a facility.`
`PUT -` `Updates a Package.`
`DELETE -` `deletes a Package.`
`POST/Create, PUT/Update`
{
"name": "package one",
"description": "package one",
"status": 1 // 1- Active, 2-Inactive
}
"""
I need a Python script to analyze the contents of a log file. The log files (named like: log.txt.2014-01-01) are made up as follows:
....<different structure>
2013-05-09 19:09:20,112 [1] DEBUG Management.Handle - Action: Amount=005,00; Date=25.04.2013 19:25:04
2013-05-09 19:09:20,112 [1] DEBUG Management.Handle - Action: Amount=005,00; Date=25.04.2013 19:27:05
2013-05-09 19:09:20,112 [1] DEBUG Management.Handle - Action: Amount=005,00; Date=25.04.2013 19:28:05
...<different structure>
I need to sum the Amount and print the total.
This is a job for regular expressions:
import re
from cStringIO import StringIO
def extractAmount(file_like):
amountRe = re.compile('^.* Management\.Handle - Action: Amount=(\d+),(\d+);')
for line in file_like:
result = amountRe.match(line)
if result:
matches = result.groups()
yield (float(matches[0]) + (float(matches[1]) / 100.0))
data = StringIO("""....<different structure>
2013-05-09 19:09:20,112 [1] DEBUG Management.Handle - Action: Amount=005,00; Date=25.04.2013 19:25:04
2013-05-09 19:09:20,112 [1] DEBUG Management.Handle - Action: Amount=005,00; Date=25.04.2013 19:27:05
2013-05-09 19:09:20,112 [1] DEBUG Management.Handle - Action: Amount=005,00; Date=25.04.2013 19:28:05
...<different structure>""")
print sum(extractAmount(data))
In the example I've used a cStringIO object to load the data, but this approach should work with any iterable that gives strings (such as the file object from open).
import re
x=<your_test_string>
z= [float(re.sub(r",",".",i)) for i in re.findall(r"(?<=DEBUG Management\.Handle - Action: Amount=)([^;]+)",x)]
print sum(z)
You can try this.
Try
http://www.pythontutor.com/visualize.html#code=import+re%0Ax%3D%22%22%22....%3Cdifferent+structure%3E%0A%0A2013-05-09+19%3A09%3A20,112+%5B1%5D+DEBUG+Management.Handle+-+Action%3A+Amount%3D005,00%3B+Date%3D25.04.2013+19%3A25%3A04%0A%0A2013-05-09+19%3A09%3A20,112+%5B1%5D+DEBUG+Management.Handle+-+Action%3A+Amount%3D005,00%3B+Date%3D25.04.2013+19%3A27%3A05%0A%0A2013-05-09+19%3A09%3A20,112+%5B1%5D+DEBUG+Management.Handle+-+Action%3A+Amount%3D005,00%3B+Date%3D25.04.2013+19%3A28%3A05%0A%0A...%3Cdifferent+structure%3E%22%22%22%0Az%3D+%5Bfloat(re.sub(r%22,%22,%22.%22,i))+for+i+in+re.findall(r%22(%3F%3C%3DDEBUG+Management%5C.Handle+-+Action%3A+Amount%3D)(%5B%5E%3B%5D%2B)%22,x)%5D%0Aprint+sum(z)&mode=display&origin=opt-frontend.js&cumulative=false&heapPrimitives=false&drawParentPointers=false&textReferences=false&showOnlyOutputs=false&py=2&rawInputLstJSON=%5B%5D&curInstr=7